file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lambda.rs | use dotenv;
use fastspring_keygen_integration::fastspring;
use fastspring_keygen_integration::keygen;
use fastspring_keygen_integration::keygen::{generate_licenses, suspend_license};
use fastspring_keygen_integration::util;
use fastspring_keygen_integration::patreon;
use http::header::CONTENT_TYPE;
use lambda_http::{lambda, Body, Request, RequestExt, Response};
use lambda_runtime::error::HandlerError;
use lambda_runtime::Context;
use log::{debug, info, warn};
use std::collections::HashMap;
use std::error::Error;
use std::env;
use lazy_static::lazy_static;
use lettre::transport::smtp::authentication::Credentials;
use lettre::{Message, SmtpTransport, Transport};
lazy_static! {
static ref MNPRX_COMMUNITY_KEYGEN_POLICY_ID: String = env::var("MNPRX_COMMUNITY_KEYGEN_POLICY_ID").unwrap();
static ref SMTP_SERVER: String = env::var("SMTP_SERVER").unwrap();
static ref SMTP_USERNAME: String = env::var("SMTP_USERNAME").unwrap();
static ref SMTP_PASSWORD: String = env::var("SMTP_PASSWORD").unwrap();
}
fn router(req: Request, c: Context) -> Result<Response<Body>, HandlerError> {
debug!("router request={:?}", req);
debug!("path={:?}", req.uri().path());
debug!("query={:?}", req.query_string_parameters());
let client = reqwest::Client::new();
match req.uri().path() {
"/fastspring-keygen-integration-service/keygen/create" => match *req.method() {
http::Method::POST => handle_keygen_create(req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/webhooks" => match *req.method() {
http::Method::POST => handle_webhook(&client, req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/patreon" => match *req.method() {
http::Method::POST => handle_patreon_webhook(&client, req, c),
_ => not_allowed(req, c),
},
_ => not_found(req, c),
}
}
fn license_key(code: &str) -> Option<&str> {
code.split('.').nth(1)
}
fn handle_patreon_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError>
{
if !patreon::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let trigger = req.headers().get("X-Patreon-Event")
.ok_or("invalid format (X-Patreon-Event)")?
.to_str().ok().ok_or("invalid format (X-Patreon-Event)")?;
debug!("X-Patreon-Event: {}", trigger);
let body = util::body_to_json(req.body())?;
if trigger == "pledges:create" {
patreon_handle_pledge_create(client, &body)?;
} else if trigger == "pledges:delete" {
patreon_handle_pledge_delete(client, &body)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Patreon pledge create trigger
fn patreon_handle_pledge_create(
client: &reqwest::Client,
body: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_create {:?}", body);
let user_id = body["data"]["relationships"]["patron"]["data"]["id"].as_str().ok_or("invalid format (.data.relationships.patron.data.id)")?;
let mut user_email = None;
let mut user_first_name = None;
for included in body["included"].as_array().ok_or("invalid format (.included)")?.iter() {
if included["id"].as_str().ok_or("invalid format (.included.#.id)")? == user_id {
user_email = Some(included["attributes"]["email"].as_str().ok_or("invalid format (.included.#.attributes.email)")?);
user_first_name = included["attributes"]["first_name"].as_str();
}
}
let user_email = user_email.ok_or("could not find patron email")?;
debug!("patron email: {}", user_email);
let license=
keygen::generate_license(
client,
"PATREON",
MNPRX_COMMUNITY_KEYGEN_POLICY_ID.as_ref(),
None,
Some(user_id),
false)?;
let user_name = body["data"]["relationships"]["patron"]["data"]["id"].as_str().unwrap_or("");
let email_body = format!(r##"Hi,
Thank you for becoming our Patreon!
You can activate your Flair Community license with the following key:
{}
For more information on how to install and activate your license, please refer to the documentation: https://docs.artineering.io/flair/setup/
If you encounter any issues, please feel free to reach out to us through Discord, we are here to help.
Have fun using Flair and make sure to share your results with the community.
Cheers,
Your team at Artineering."##, license);
// send the license to the patron
let email = Message::builder()
.from("Artineering <[email protected]>".parse().unwrap())
.reply_to("Artineering <[email protected]>".parse().unwrap())
.to(user_email.parse().unwrap())
.bcc("[email protected]".parse().unwrap())
.subject("[Flair] Your Community license key")
.body(email_body)
.unwrap();
let creds = Credentials::new(SMTP_USERNAME.clone(), SMTP_PASSWORD.clone());
let mailer = SmtpTransport::relay(SMTP_SERVER.as_ref())
.unwrap()
.credentials(creds)
.build();
match mailer.send(&email) {
Ok(_) => info!("Email sent successfully"),
Err(e) => panic!("Could not send email: {:?}", e),
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Patreon pledge delete trigger
fn patreon_handle_pledge_delete(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_delete {:?}", data);
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
fn handle_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError> {
if !fastspring::authentify_web_hook(&req) |
let events_json = util::body_to_json(req.body())?;
let events_json = events_json["events"].as_array().ok_or("invalid format")?;
// TODO do not reply OK every time: check each event
for e in events_json {
let ty = e["type"].as_str().ok_or("invalid format")?;
let data = &e["data"];
match ty {
"subscription.deactivated" => {
handle_subscription_deactivated(client, data)?;
}
_ => {
warn!("unhandled webhook: {}", ty);
}
};
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Handles deactivation of subscriptions.
///
/// This will suspend all licenses associated with the order.
fn handle_subscription_deactivated(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError> {
debug!("handle_subscription_deactivated {:?}", data);
let subscription_id = data["id"].as_str().ok_or("invalid format (.id)")?;
info!("subscription deactivated: {}", subscription_id);
let orders = fastspring::get_subscription_entries(client, subscription_id)?;
// find the original order
// according to the API, this is the entry whose ".reference" field does not include
// a "B" (for "billing") at the end. All the others are subscription billing orders.
let original_order = orders.as_array().ok_or("invalid format (orders)")?.iter().find(|&order| {
let order = &order["order"];
if order["reference"].is_null() { return false; }
if let Some(s) = order["reference"].as_str() {
!s.ends_with('B')
} else {
false
}
});
let original_order = original_order.ok_or("could not find original order")?;
let order_items = original_order["order"]["items"]
.as_array()
.ok_or("invalid format (.order.items)")?;
// Collect all licenses to revoke
let mut licenses_to_revoke = Vec::new();
for item in order_items.iter() {
//let product = &item["product"];
for (_k, v) in item["fulfillments"]
.as_object()
.ok_or("invalid format (.fulfillments)")?
.iter()
{
if let Some(licenses) = v.as_array() {
for l in licenses {
let code = if let Some(s) = l["license"].as_str() {
s
} else {
continue;
};
licenses_to_revoke.push(String::from(code));
}
}
}
}
// revoke all licenses
for lic in licenses_to_revoke.iter() {
let key = license_key(lic).ok_or("invalid license key")?;
keygen::revoke_license(key)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Handles license creation requests (coming from FastSpring).
fn handle_keygen_create(req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
if !fastspring::verify_license_gen(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let params: HashMap<_, _> = url::form_urlencoded::parse(match req.body() {
Body::Text(ref s) => s.as_bytes(),
_ => return Err("invalid request body".into()),
})
.collect();
//debug!("params = {:?}", params);
let subscription = params
.get("subscription")
.ok_or("invalid query parameters (no subscription)")?;
let policy_id = params
.get("policy")
.ok_or("invalid query parameters (no policy)")?;
let quantity: u32 = params
.get("quantity")
.ok_or("invalid query parameters (no quantity)")?
.parse()?;
let (codes,errors) = generate_licenses(subscription, policy_id, quantity, None, false);
if !errors.is_empty() {
Err(format!("errors encountered while generating licenses ({} successfully generated)", codes.len()).as_str())?
}
let codes = codes.join("\n");
Ok(Response::builder()
.status(http::StatusCode::OK)
.header(CONTENT_TYPE, "text/plain")
.body(codes.into())
.unwrap())
}
fn not_found(_req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
Ok(Response::builder()
.status(http::StatusCode::NOT_FOUND)
.body(Body::default())
.unwrap())
}
fn not_allowed(_req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
Ok(Response::builder()
.status(http::StatusCode::METHOD_NOT_ALLOWED)
.body(Body::default())
.unwrap())
}
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
dotenv::dotenv().ok();
lambda!(router);
Ok(())
}
| {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
} | conditional_block |
state.rs | use std::cmp::min;
use std::collections::BTreeMap;
use chrono::Utc;
use tako::gateway::{
CancelTasks, FromGatewayMessage, LostWorkerMessage, NewWorkerMessage, TaskFailedMessage,
TaskState, TaskUpdate, ToGatewayMessage,
};
use tako::ItemId;
use tako::{define_wrapped_type, TaskId};
use crate::server::autoalloc::{AutoAllocService, LostWorkerDetails};
use crate::server::event::events::JobInfo;
use crate::server::event::storage::EventStorage;
use crate::server::job::Job;
use crate::server::rpc::Backend;
use crate::server::worker::Worker;
use crate::transfer::messages::ServerInfo;
use crate::WrappedRcRefCell;
use crate::{JobId, JobTaskCount, Map, TakoTaskId, WorkerId};
pub struct State {
jobs: crate::Map<JobId, Job>,
workers: crate::Map<WorkerId, Worker>,
// Here we store TaskId -> JobId data, but to make it sparse
// we store ONLY the base_task_id there, i.e. each job has here
// only one entry.
// Example:
// Real mapping: TaskId JobId
// 1 -> 1
// 2 -> 1
// 3 -> 2
// 4 -> 2
// 5 -> 2
// The actual base_task_id_to_job will be 1 -> 1, 3 -> 2
// Therefore we need to find biggest key that is lower then a given task id
// To make this query efficient, we use BTreeMap and not Map
base_task_id_to_job_id: BTreeMap<TakoTaskId, JobId>,
job_id_counter: <JobId as ItemId>::IdType,
task_id_counter: <TaskId as ItemId>::IdType,
pub(crate) autoalloc_service: Option<AutoAllocService>,
event_storage: EventStorage,
server_info: ServerInfo,
}
define_wrapped_type!(StateRef, State, pub);
fn cancel_tasks_from_callback(
state_ref: &StateRef,
tako_ref: &Backend,
job_id: JobId,
tasks: Vec<TakoTaskId>,
) {
if tasks.is_empty() {
return;
}
log::debug!("Canceling {:?} tasks", tasks);
let tako_ref = tako_ref.clone();
let state_ref = state_ref.clone();
tokio::task::spawn_local(async move {
let message = FromGatewayMessage::CancelTasks(CancelTasks { tasks });
let response = tako_ref.send_tako_message(message).await.unwrap();
match response {
ToGatewayMessage::CancelTasksResponse(msg) => {
let mut state = state_ref.get_mut();
if let Some(job) = state.get_job_mut(job_id) {
log::debug!("Tasks {:?} canceled", msg.cancelled_tasks);
log::debug!("Tasks {:?} already finished", msg.already_finished);
for tako_id in msg.cancelled_tasks {
job.set_cancel_state(tako_id, &tako_ref);
}
}
}
ToGatewayMessage::Error(msg) => {
log::debug!("Canceling job {} failed: {}", job_id, msg.message);
}
_ => {
panic!("Invalid message");
}
};
});
}
impl State {
pub fn get_job(&self, job_id: JobId) -> Option<&Job> {
self.jobs.get(&job_id)
}
pub fn get_job_mut(&mut self, job_id: JobId) -> Option<&mut Job> {
self.jobs.get_mut(&job_id)
}
pub fn jobs(&self) -> impl Iterator<Item = &Job> {
self.jobs.values()
}
pub fn add_worker(&mut self, worker: Worker) {
let worker_id = worker.worker_id();
assert!(self.workers.insert(worker_id, worker).is_none())
}
pub fn server_info(&self) -> &ServerInfo {
&self.server_info
}
pub fn set_worker_port(&mut self, port: u16) {
self.server_info.worker_port = port;
}
pub fn add_job(&mut self, job: Job) {
let job_id = job.job_id;
assert!(self
.base_task_id_to_job_id
.insert(job.base_task_id, job_id)
.is_none());
self.event_storage.on_job_submitted(
job_id,
JobInfo {
name: job.name.clone(),
job_desc: job.job_desc.clone(),
base_task_id: job.base_task_id,
task_ids: job.tasks.iter().map(|(id, _)| *id).collect(),
max_fails: job.max_fails,
log: job.log.clone(),
submission_date: job.submission_date,
},
);
assert!(self.jobs.insert(job_id, job).is_none());
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_job_created(job_id);
}
}
/// Completely forgets this job, in order to reduce memory usage.
pub(crate) fn forget_job(&mut self, job_id: JobId) -> Option<Job> {
let job = match self.jobs.remove(&job_id) {
Some(job) => {
assert!(job.is_terminated());
job
}
None => {
log::error!("Trying to forget unknown job {job_id}");
return None;
}
};
self.base_task_id_to_job_id.remove(&job.base_task_id);
Some(job)
}
pub fn get_job_mut_by_tako_task_id(&mut self, task_id: TakoTaskId) -> Option<&mut Job> |
pub fn new_job_id(&mut self) -> JobId {
let id = self.job_id_counter;
self.job_id_counter += 1;
id.into()
}
pub fn revert_to_job_id(&mut self, id: JobId) {
self.job_id_counter = id.as_num();
}
pub fn last_n_ids(&self, n: u32) -> impl Iterator<Item = JobId> {
let n = min(n, self.job_id_counter - 1);
((self.job_id_counter - n)..self.job_id_counter).map(|id| id.into())
}
pub fn new_task_id(&mut self, task_count: JobTaskCount) -> TakoTaskId {
let id = self.task_id_counter;
self.task_id_counter += task_count;
id.into()
}
pub fn get_workers(&self) -> &Map<WorkerId, Worker> {
&self.workers
}
pub fn get_worker(&self, worker_id: WorkerId) -> Option<&Worker> {
self.workers.get(&worker_id)
}
pub fn get_worker_mut(&mut self, worker_id: WorkerId) -> Option<&mut Worker> {
self.workers.get_mut(&worker_id)
}
pub fn process_task_failed(
&mut self,
state_ref: &StateRef,
tako_ref: &Backend,
msg: TaskFailedMessage,
) {
log::debug!("Task id={} failed: {:?}", msg.id, msg.info);
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
for task_id in msg.cancelled_tasks {
log::debug!(
"Task id={} canceled because of task dependency fails",
task_id
);
job.set_cancel_state(task_id, tako_ref);
}
job.set_failed_state(msg.id, msg.info.message, tako_ref);
if let Some(max_fails) = job.max_fails {
if job.counters.n_failed_tasks > max_fails {
let task_ids = job.non_finished_task_ids();
cancel_tasks_from_callback(state_ref, tako_ref, job.job_id, task_ids);
}
}
self.event_storage.on_task_failed(msg.id);
}
pub fn process_task_update(&mut self, msg: TaskUpdate, backend: &Backend) {
log::debug!("Task id={} updated {:?}", msg.id, msg.state);
let (mut job_id, mut is_job_terminated): (Option<JobId>, bool) = (None, false);
match msg.state {
TaskState::Running {
worker_ids,
context,
} => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_running_state(msg.id, worker_ids.clone(), context);
// TODO: Prepare it for multi-node tasks
// This (incomplete) version just takes the first worker as "the worker" for task
self.event_storage.on_task_started(msg.id, worker_ids[0]);
}
TaskState::Finished => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_finished_state(msg.id, backend);
(job_id, is_job_terminated) = (Some(job.job_id), job.is_terminated());
self.event_storage.on_task_finished(msg.id);
}
TaskState::Waiting => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_waiting_state(msg.id);
}
TaskState::Invalid => {
unreachable!()
}
};
if is_job_terminated {
self.event_storage
.on_job_completed(job_id.unwrap(), chrono::offset::Utc::now());
}
}
pub fn process_worker_new(&mut self, msg: NewWorkerMessage) {
log::debug!("New worker id={}", msg.worker_id);
self.add_worker(Worker::new(msg.worker_id, msg.configuration.clone()));
// TODO: use observer in event storage instead of sending these messages directly
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_worker_connected(msg.worker_id, &msg.configuration);
}
self.event_storage
.on_worker_added(msg.worker_id, msg.configuration);
}
pub fn process_worker_lost(
&mut self,
_state_ref: &StateRef,
_tako_ref: &Backend,
msg: LostWorkerMessage,
) {
log::debug!("Worker lost id={}", msg.worker_id);
let worker = self.workers.get_mut(&msg.worker_id).unwrap();
worker.set_offline_state(msg.reason.clone());
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_worker_lost(
msg.worker_id,
&worker.configuration,
LostWorkerDetails {
reason: msg.reason.clone(),
lifetime: (Utc::now() - worker.started_at()).to_std().unwrap(),
},
);
}
for task_id in msg.running_tasks {
let job = self.get_job_mut_by_tako_task_id(task_id).unwrap();
job.set_waiting_state(task_id);
}
self.event_storage.on_worker_lost(msg.worker_id, msg.reason);
}
pub fn stop_autoalloc(&mut self) {
// Drop the sender
self.autoalloc_service = None;
}
pub fn event_storage(&self) -> &EventStorage {
&self.event_storage
}
pub fn event_storage_mut(&mut self) -> &mut EventStorage {
&mut self.event_storage
}
pub fn autoalloc(&self) -> &AutoAllocService {
self.autoalloc_service.as_ref().unwrap()
}
}
impl StateRef {
pub fn new(event_storage: EventStorage, server_info: ServerInfo) -> StateRef {
Self(WrappedRcRefCell::wrap(State {
jobs: Default::default(),
workers: Default::default(),
base_task_id_to_job_id: Default::default(),
job_id_counter: 1,
task_id_counter: 1,
autoalloc_service: None,
event_storage,
server_info,
}))
}
}
#[cfg(test)]
mod tests {
use tako::program::{ProgramDefinition, StdioDef};
use crate::common::arraydef::IntArray;
use crate::server::job::Job;
use crate::server::state::State;
use crate::tests::utils::create_hq_state;
use crate::transfer::messages::{JobDescription, PinMode, TaskDescription};
use crate::{JobId, TakoTaskId};
fn dummy_program_definition() -> ProgramDefinition {
ProgramDefinition {
args: vec![],
env: Default::default(),
stdout: StdioDef::Null,
stderr: StdioDef::Null,
stdin: vec![],
cwd: Default::default(),
}
}
fn test_job<J: Into<JobId>, T: Into<TakoTaskId>>(
ids: IntArray,
job_id: J,
base_task_id: T,
) -> Job {
let job_desc = JobDescription::Array {
ids,
entries: None,
task_desc: TaskDescription {
program: dummy_program_definition(),
resources: Default::default(),
pin_mode: PinMode::None,
task_dir: false,
time_limit: None,
priority: 0,
crash_limit: 5,
},
};
Job::new(
job_desc,
job_id.into(),
base_task_id.into(),
"".to_string(),
None,
None,
Default::default(),
)
}
fn check_id<T: Into<TakoTaskId>>(state: &mut State, task_id: T, expected: Option<u32>) {
assert_eq!(
state
.get_job_mut_by_tako_task_id(task_id.into())
.map(|j| j.job_id.as_num()),
expected
);
}
#[test]
fn test_find_job_id_by_task_id() {
let state_ref = create_hq_state();
let mut state = state_ref.get_mut();
state.add_job(test_job(IntArray::from_range(0, 10), 223, 100));
state.add_job(test_job(IntArray::from_range(0, 15), 224, 110));
state.add_job(test_job(IntArray::from_id(0), 225, 125));
state.add_job(test_job(IntArray::from_id(0), 226, 126));
state.add_job(test_job(IntArray::from_id(0), 227, 130));
let state = &mut state;
check_id(state, 99, None);
check_id(state, 100, Some(223));
check_id(state, 101, Some(223));
check_id(state, 109, Some(223));
check_id(state, 110, Some(224));
check_id(state, 124, Some(224));
check_id(state, 125, Some(225));
check_id(state, 126, Some(226));
check_id(state, 127, None);
check_id(state, 129, None);
check_id(state, 130, Some(227));
check_id(state, 131, None);
}
}
| {
let job_id: JobId = *self.base_task_id_to_job_id.range(..=task_id).next_back()?.1;
let job = self.jobs.get_mut(&job_id)?;
if task_id
< TakoTaskId::new(
job.base_task_id.as_num() + job.n_tasks() as <TaskId as ItemId>::IdType,
)
{
Some(job)
} else {
None
}
} | identifier_body |
state.rs | use std::cmp::min;
use std::collections::BTreeMap;
use chrono::Utc;
use tako::gateway::{
CancelTasks, FromGatewayMessage, LostWorkerMessage, NewWorkerMessage, TaskFailedMessage,
TaskState, TaskUpdate, ToGatewayMessage,
};
use tako::ItemId;
use tako::{define_wrapped_type, TaskId};
use crate::server::autoalloc::{AutoAllocService, LostWorkerDetails};
use crate::server::event::events::JobInfo;
use crate::server::event::storage::EventStorage;
use crate::server::job::Job;
use crate::server::rpc::Backend;
use crate::server::worker::Worker;
use crate::transfer::messages::ServerInfo;
use crate::WrappedRcRefCell;
use crate::{JobId, JobTaskCount, Map, TakoTaskId, WorkerId};
pub struct State {
jobs: crate::Map<JobId, Job>,
workers: crate::Map<WorkerId, Worker>,
// Here we store TaskId -> JobId data, but to make it sparse
// we store ONLY the base_task_id there, i.e. each job has here
// only one entry.
// Example:
// Real mapping: TaskId JobId
// 1 -> 1
// 2 -> 1
// 3 -> 2
// 4 -> 2
// 5 -> 2
// The actual base_task_id_to_job will be 1 -> 1, 3 -> 2
// Therefore we need to find biggest key that is lower then a given task id
// To make this query efficient, we use BTreeMap and not Map
base_task_id_to_job_id: BTreeMap<TakoTaskId, JobId>,
job_id_counter: <JobId as ItemId>::IdType,
task_id_counter: <TaskId as ItemId>::IdType,
pub(crate) autoalloc_service: Option<AutoAllocService>,
event_storage: EventStorage,
server_info: ServerInfo,
}
define_wrapped_type!(StateRef, State, pub);
fn cancel_tasks_from_callback(
state_ref: &StateRef,
tako_ref: &Backend,
job_id: JobId,
tasks: Vec<TakoTaskId>,
) {
if tasks.is_empty() {
return;
}
log::debug!("Canceling {:?} tasks", tasks);
let tako_ref = tako_ref.clone();
let state_ref = state_ref.clone();
tokio::task::spawn_local(async move {
let message = FromGatewayMessage::CancelTasks(CancelTasks { tasks });
let response = tako_ref.send_tako_message(message).await.unwrap();
match response {
ToGatewayMessage::CancelTasksResponse(msg) => {
let mut state = state_ref.get_mut();
if let Some(job) = state.get_job_mut(job_id) {
log::debug!("Tasks {:?} canceled", msg.cancelled_tasks);
log::debug!("Tasks {:?} already finished", msg.already_finished);
for tako_id in msg.cancelled_tasks {
job.set_cancel_state(tako_id, &tako_ref);
}
}
}
ToGatewayMessage::Error(msg) => {
log::debug!("Canceling job {} failed: {}", job_id, msg.message);
}
_ => {
panic!("Invalid message");
}
};
});
}
impl State {
pub fn get_job(&self, job_id: JobId) -> Option<&Job> {
self.jobs.get(&job_id)
}
pub fn get_job_mut(&mut self, job_id: JobId) -> Option<&mut Job> {
self.jobs.get_mut(&job_id)
}
pub fn jobs(&self) -> impl Iterator<Item = &Job> {
self.jobs.values()
}
pub fn add_worker(&mut self, worker: Worker) {
let worker_id = worker.worker_id();
assert!(self.workers.insert(worker_id, worker).is_none())
}
pub fn server_info(&self) -> &ServerInfo {
&self.server_info
}
pub fn set_worker_port(&mut self, port: u16) {
self.server_info.worker_port = port;
}
pub fn add_job(&mut self, job: Job) {
let job_id = job.job_id;
assert!(self
.base_task_id_to_job_id
.insert(job.base_task_id, job_id)
.is_none());
self.event_storage.on_job_submitted(
job_id,
JobInfo {
name: job.name.clone(),
job_desc: job.job_desc.clone(),
base_task_id: job.base_task_id,
task_ids: job.tasks.iter().map(|(id, _)| *id).collect(),
max_fails: job.max_fails,
log: job.log.clone(),
submission_date: job.submission_date,
},
);
assert!(self.jobs.insert(job_id, job).is_none());
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_job_created(job_id);
}
}
/// Completely forgets this job, in order to reduce memory usage.
pub(crate) fn forget_job(&mut self, job_id: JobId) -> Option<Job> {
let job = match self.jobs.remove(&job_id) {
Some(job) => {
assert!(job.is_terminated());
job
}
None => {
log::error!("Trying to forget unknown job {job_id}");
return None;
}
};
self.base_task_id_to_job_id.remove(&job.base_task_id);
Some(job)
}
pub fn get_job_mut_by_tako_task_id(&mut self, task_id: TakoTaskId) -> Option<&mut Job> {
let job_id: JobId = *self.base_task_id_to_job_id.range(..=task_id).next_back()?.1;
let job = self.jobs.get_mut(&job_id)?;
if task_id
< TakoTaskId::new(
job.base_task_id.as_num() + job.n_tasks() as <TaskId as ItemId>::IdType,
)
{
Some(job)
} else {
None
}
}
pub fn new_job_id(&mut self) -> JobId {
let id = self.job_id_counter;
self.job_id_counter += 1;
id.into()
}
pub fn revert_to_job_id(&mut self, id: JobId) {
self.job_id_counter = id.as_num();
}
pub fn last_n_ids(&self, n: u32) -> impl Iterator<Item = JobId> {
let n = min(n, self.job_id_counter - 1);
((self.job_id_counter - n)..self.job_id_counter).map(|id| id.into())
}
pub fn new_task_id(&mut self, task_count: JobTaskCount) -> TakoTaskId {
let id = self.task_id_counter;
self.task_id_counter += task_count;
id.into()
}
pub fn get_workers(&self) -> &Map<WorkerId, Worker> {
&self.workers
}
pub fn get_worker(&self, worker_id: WorkerId) -> Option<&Worker> {
self.workers.get(&worker_id)
}
pub fn get_worker_mut(&mut self, worker_id: WorkerId) -> Option<&mut Worker> {
self.workers.get_mut(&worker_id)
}
pub fn process_task_failed(
&mut self,
state_ref: &StateRef,
tako_ref: &Backend,
msg: TaskFailedMessage,
) {
log::debug!("Task id={} failed: {:?}", msg.id, msg.info);
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
for task_id in msg.cancelled_tasks {
log::debug!(
"Task id={} canceled because of task dependency fails",
task_id
);
job.set_cancel_state(task_id, tako_ref);
}
job.set_failed_state(msg.id, msg.info.message, tako_ref);
if let Some(max_fails) = job.max_fails {
if job.counters.n_failed_tasks > max_fails {
let task_ids = job.non_finished_task_ids();
cancel_tasks_from_callback(state_ref, tako_ref, job.job_id, task_ids);
}
}
self.event_storage.on_task_failed(msg.id);
}
pub fn | (&mut self, msg: TaskUpdate, backend: &Backend) {
log::debug!("Task id={} updated {:?}", msg.id, msg.state);
let (mut job_id, mut is_job_terminated): (Option<JobId>, bool) = (None, false);
match msg.state {
TaskState::Running {
worker_ids,
context,
} => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_running_state(msg.id, worker_ids.clone(), context);
// TODO: Prepare it for multi-node tasks
// This (incomplete) version just takes the first worker as "the worker" for task
self.event_storage.on_task_started(msg.id, worker_ids[0]);
}
TaskState::Finished => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_finished_state(msg.id, backend);
(job_id, is_job_terminated) = (Some(job.job_id), job.is_terminated());
self.event_storage.on_task_finished(msg.id);
}
TaskState::Waiting => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_waiting_state(msg.id);
}
TaskState::Invalid => {
unreachable!()
}
};
if is_job_terminated {
self.event_storage
.on_job_completed(job_id.unwrap(), chrono::offset::Utc::now());
}
}
pub fn process_worker_new(&mut self, msg: NewWorkerMessage) {
log::debug!("New worker id={}", msg.worker_id);
self.add_worker(Worker::new(msg.worker_id, msg.configuration.clone()));
// TODO: use observer in event storage instead of sending these messages directly
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_worker_connected(msg.worker_id, &msg.configuration);
}
self.event_storage
.on_worker_added(msg.worker_id, msg.configuration);
}
pub fn process_worker_lost(
&mut self,
_state_ref: &StateRef,
_tako_ref: &Backend,
msg: LostWorkerMessage,
) {
log::debug!("Worker lost id={}", msg.worker_id);
let worker = self.workers.get_mut(&msg.worker_id).unwrap();
worker.set_offline_state(msg.reason.clone());
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_worker_lost(
msg.worker_id,
&worker.configuration,
LostWorkerDetails {
reason: msg.reason.clone(),
lifetime: (Utc::now() - worker.started_at()).to_std().unwrap(),
},
);
}
for task_id in msg.running_tasks {
let job = self.get_job_mut_by_tako_task_id(task_id).unwrap();
job.set_waiting_state(task_id);
}
self.event_storage.on_worker_lost(msg.worker_id, msg.reason);
}
pub fn stop_autoalloc(&mut self) {
// Drop the sender
self.autoalloc_service = None;
}
pub fn event_storage(&self) -> &EventStorage {
&self.event_storage
}
pub fn event_storage_mut(&mut self) -> &mut EventStorage {
&mut self.event_storage
}
pub fn autoalloc(&self) -> &AutoAllocService {
self.autoalloc_service.as_ref().unwrap()
}
}
impl StateRef {
pub fn new(event_storage: EventStorage, server_info: ServerInfo) -> StateRef {
Self(WrappedRcRefCell::wrap(State {
jobs: Default::default(),
workers: Default::default(),
base_task_id_to_job_id: Default::default(),
job_id_counter: 1,
task_id_counter: 1,
autoalloc_service: None,
event_storage,
server_info,
}))
}
}
#[cfg(test)]
mod tests {
use tako::program::{ProgramDefinition, StdioDef};
use crate::common::arraydef::IntArray;
use crate::server::job::Job;
use crate::server::state::State;
use crate::tests::utils::create_hq_state;
use crate::transfer::messages::{JobDescription, PinMode, TaskDescription};
use crate::{JobId, TakoTaskId};
fn dummy_program_definition() -> ProgramDefinition {
ProgramDefinition {
args: vec![],
env: Default::default(),
stdout: StdioDef::Null,
stderr: StdioDef::Null,
stdin: vec![],
cwd: Default::default(),
}
}
fn test_job<J: Into<JobId>, T: Into<TakoTaskId>>(
ids: IntArray,
job_id: J,
base_task_id: T,
) -> Job {
let job_desc = JobDescription::Array {
ids,
entries: None,
task_desc: TaskDescription {
program: dummy_program_definition(),
resources: Default::default(),
pin_mode: PinMode::None,
task_dir: false,
time_limit: None,
priority: 0,
crash_limit: 5,
},
};
Job::new(
job_desc,
job_id.into(),
base_task_id.into(),
"".to_string(),
None,
None,
Default::default(),
)
}
fn check_id<T: Into<TakoTaskId>>(state: &mut State, task_id: T, expected: Option<u32>) {
assert_eq!(
state
.get_job_mut_by_tako_task_id(task_id.into())
.map(|j| j.job_id.as_num()),
expected
);
}
#[test]
fn test_find_job_id_by_task_id() {
let state_ref = create_hq_state();
let mut state = state_ref.get_mut();
state.add_job(test_job(IntArray::from_range(0, 10), 223, 100));
state.add_job(test_job(IntArray::from_range(0, 15), 224, 110));
state.add_job(test_job(IntArray::from_id(0), 225, 125));
state.add_job(test_job(IntArray::from_id(0), 226, 126));
state.add_job(test_job(IntArray::from_id(0), 227, 130));
let state = &mut state;
check_id(state, 99, None);
check_id(state, 100, Some(223));
check_id(state, 101, Some(223));
check_id(state, 109, Some(223));
check_id(state, 110, Some(224));
check_id(state, 124, Some(224));
check_id(state, 125, Some(225));
check_id(state, 126, Some(226));
check_id(state, 127, None);
check_id(state, 129, None);
check_id(state, 130, Some(227));
check_id(state, 131, None);
}
}
| process_task_update | identifier_name |
state.rs | use std::cmp::min;
use std::collections::BTreeMap;
use chrono::Utc;
use tako::gateway::{
CancelTasks, FromGatewayMessage, LostWorkerMessage, NewWorkerMessage, TaskFailedMessage,
TaskState, TaskUpdate, ToGatewayMessage,
};
use tako::ItemId;
use tako::{define_wrapped_type, TaskId};
use crate::server::autoalloc::{AutoAllocService, LostWorkerDetails};
use crate::server::event::events::JobInfo;
use crate::server::event::storage::EventStorage;
use crate::server::job::Job;
use crate::server::rpc::Backend;
use crate::server::worker::Worker;
use crate::transfer::messages::ServerInfo;
use crate::WrappedRcRefCell;
use crate::{JobId, JobTaskCount, Map, TakoTaskId, WorkerId};
pub struct State {
jobs: crate::Map<JobId, Job>,
workers: crate::Map<WorkerId, Worker>,
// Here we store TaskId -> JobId data, but to make it sparse
// we store ONLY the base_task_id there, i.e. each job has here
// only one entry.
// Example:
// Real mapping: TaskId JobId
// 1 -> 1
// 2 -> 1
// 3 -> 2
// 4 -> 2
// 5 -> 2
// The actual base_task_id_to_job will be 1 -> 1, 3 -> 2
// Therefore we need to find biggest key that is lower then a given task id
// To make this query efficient, we use BTreeMap and not Map
base_task_id_to_job_id: BTreeMap<TakoTaskId, JobId>,
job_id_counter: <JobId as ItemId>::IdType,
task_id_counter: <TaskId as ItemId>::IdType,
pub(crate) autoalloc_service: Option<AutoAllocService>,
event_storage: EventStorage,
server_info: ServerInfo,
}
define_wrapped_type!(StateRef, State, pub);
fn cancel_tasks_from_callback(
state_ref: &StateRef,
tako_ref: &Backend,
job_id: JobId,
tasks: Vec<TakoTaskId>,
) {
if tasks.is_empty() {
return;
}
log::debug!("Canceling {:?} tasks", tasks);
let tako_ref = tako_ref.clone();
let state_ref = state_ref.clone();
tokio::task::spawn_local(async move {
let message = FromGatewayMessage::CancelTasks(CancelTasks { tasks });
let response = tako_ref.send_tako_message(message).await.unwrap();
match response {
ToGatewayMessage::CancelTasksResponse(msg) => {
let mut state = state_ref.get_mut();
if let Some(job) = state.get_job_mut(job_id) {
log::debug!("Tasks {:?} canceled", msg.cancelled_tasks);
log::debug!("Tasks {:?} already finished", msg.already_finished);
for tako_id in msg.cancelled_tasks {
job.set_cancel_state(tako_id, &tako_ref);
}
}
}
ToGatewayMessage::Error(msg) => {
log::debug!("Canceling job {} failed: {}", job_id, msg.message);
}
_ => {
panic!("Invalid message");
}
};
});
}
impl State {
pub fn get_job(&self, job_id: JobId) -> Option<&Job> {
self.jobs.get(&job_id)
}
pub fn get_job_mut(&mut self, job_id: JobId) -> Option<&mut Job> {
self.jobs.get_mut(&job_id)
}
pub fn jobs(&self) -> impl Iterator<Item = &Job> {
self.jobs.values()
}
pub fn add_worker(&mut self, worker: Worker) {
let worker_id = worker.worker_id();
assert!(self.workers.insert(worker_id, worker).is_none())
}
pub fn server_info(&self) -> &ServerInfo {
&self.server_info
}
pub fn set_worker_port(&mut self, port: u16) {
self.server_info.worker_port = port;
}
pub fn add_job(&mut self, job: Job) {
let job_id = job.job_id;
assert!(self
.base_task_id_to_job_id
.insert(job.base_task_id, job_id)
.is_none());
self.event_storage.on_job_submitted(
job_id,
JobInfo {
name: job.name.clone(),
job_desc: job.job_desc.clone(),
base_task_id: job.base_task_id,
task_ids: job.tasks.iter().map(|(id, _)| *id).collect(),
max_fails: job.max_fails,
log: job.log.clone(),
submission_date: job.submission_date,
},
);
assert!(self.jobs.insert(job_id, job).is_none());
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_job_created(job_id);
}
}
/// Completely forgets this job, in order to reduce memory usage.
pub(crate) fn forget_job(&mut self, job_id: JobId) -> Option<Job> {
let job = match self.jobs.remove(&job_id) {
Some(job) => {
assert!(job.is_terminated());
job
}
None => {
log::error!("Trying to forget unknown job {job_id}");
return None;
}
};
self.base_task_id_to_job_id.remove(&job.base_task_id);
Some(job)
}
pub fn get_job_mut_by_tako_task_id(&mut self, task_id: TakoTaskId) -> Option<&mut Job> {
let job_id: JobId = *self.base_task_id_to_job_id.range(..=task_id).next_back()?.1;
let job = self.jobs.get_mut(&job_id)?;
if task_id
< TakoTaskId::new(
job.base_task_id.as_num() + job.n_tasks() as <TaskId as ItemId>::IdType,
)
{
Some(job)
} else {
None
}
}
pub fn new_job_id(&mut self) -> JobId {
let id = self.job_id_counter;
self.job_id_counter += 1;
id.into()
}
pub fn revert_to_job_id(&mut self, id: JobId) {
self.job_id_counter = id.as_num();
}
pub fn last_n_ids(&self, n: u32) -> impl Iterator<Item = JobId> {
let n = min(n, self.job_id_counter - 1);
((self.job_id_counter - n)..self.job_id_counter).map(|id| id.into())
}
pub fn new_task_id(&mut self, task_count: JobTaskCount) -> TakoTaskId {
let id = self.task_id_counter;
self.task_id_counter += task_count;
id.into()
}
pub fn get_workers(&self) -> &Map<WorkerId, Worker> {
&self.workers
}
pub fn get_worker(&self, worker_id: WorkerId) -> Option<&Worker> {
self.workers.get(&worker_id)
}
pub fn get_worker_mut(&mut self, worker_id: WorkerId) -> Option<&mut Worker> {
self.workers.get_mut(&worker_id)
}
pub fn process_task_failed(
&mut self,
state_ref: &StateRef,
tako_ref: &Backend,
msg: TaskFailedMessage,
) {
log::debug!("Task id={} failed: {:?}", msg.id, msg.info);
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
for task_id in msg.cancelled_tasks {
log::debug!(
"Task id={} canceled because of task dependency fails",
task_id
);
job.set_cancel_state(task_id, tako_ref);
}
job.set_failed_state(msg.id, msg.info.message, tako_ref);
if let Some(max_fails) = job.max_fails {
if job.counters.n_failed_tasks > max_fails {
let task_ids = job.non_finished_task_ids();
cancel_tasks_from_callback(state_ref, tako_ref, job.job_id, task_ids);
}
}
self.event_storage.on_task_failed(msg.id);
}
pub fn process_task_update(&mut self, msg: TaskUpdate, backend: &Backend) {
log::debug!("Task id={} updated {:?}", msg.id, msg.state);
let (mut job_id, mut is_job_terminated): (Option<JobId>, bool) = (None, false);
match msg.state {
TaskState::Running {
worker_ids,
context,
} => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_running_state(msg.id, worker_ids.clone(), context);
// TODO: Prepare it for multi-node tasks
// This (incomplete) version just takes the first worker as "the worker" for task
self.event_storage.on_task_started(msg.id, worker_ids[0]);
}
TaskState::Finished => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_finished_state(msg.id, backend);
(job_id, is_job_terminated) = (Some(job.job_id), job.is_terminated());
self.event_storage.on_task_finished(msg.id);
}
TaskState::Waiting => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_waiting_state(msg.id);
}
TaskState::Invalid => {
unreachable!()
}
};
if is_job_terminated {
self.event_storage
.on_job_completed(job_id.unwrap(), chrono::offset::Utc::now());
}
}
pub fn process_worker_new(&mut self, msg: NewWorkerMessage) {
log::debug!("New worker id={}", msg.worker_id);
self.add_worker(Worker::new(msg.worker_id, msg.configuration.clone()));
// TODO: use observer in event storage instead of sending these messages directly
if let Some(autoalloc) = &self.autoalloc_service |
self.event_storage
.on_worker_added(msg.worker_id, msg.configuration);
}
pub fn process_worker_lost(
&mut self,
_state_ref: &StateRef,
_tako_ref: &Backend,
msg: LostWorkerMessage,
) {
log::debug!("Worker lost id={}", msg.worker_id);
let worker = self.workers.get_mut(&msg.worker_id).unwrap();
worker.set_offline_state(msg.reason.clone());
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_worker_lost(
msg.worker_id,
&worker.configuration,
LostWorkerDetails {
reason: msg.reason.clone(),
lifetime: (Utc::now() - worker.started_at()).to_std().unwrap(),
},
);
}
for task_id in msg.running_tasks {
let job = self.get_job_mut_by_tako_task_id(task_id).unwrap();
job.set_waiting_state(task_id);
}
self.event_storage.on_worker_lost(msg.worker_id, msg.reason);
}
pub fn stop_autoalloc(&mut self) {
// Drop the sender
self.autoalloc_service = None;
}
pub fn event_storage(&self) -> &EventStorage {
&self.event_storage
}
pub fn event_storage_mut(&mut self) -> &mut EventStorage {
&mut self.event_storage
}
pub fn autoalloc(&self) -> &AutoAllocService {
self.autoalloc_service.as_ref().unwrap()
}
}
impl StateRef {
pub fn new(event_storage: EventStorage, server_info: ServerInfo) -> StateRef {
Self(WrappedRcRefCell::wrap(State {
jobs: Default::default(),
workers: Default::default(),
base_task_id_to_job_id: Default::default(),
job_id_counter: 1,
task_id_counter: 1,
autoalloc_service: None,
event_storage,
server_info,
}))
}
}
#[cfg(test)]
mod tests {
use tako::program::{ProgramDefinition, StdioDef};
use crate::common::arraydef::IntArray;
use crate::server::job::Job;
use crate::server::state::State;
use crate::tests::utils::create_hq_state;
use crate::transfer::messages::{JobDescription, PinMode, TaskDescription};
use crate::{JobId, TakoTaskId};
fn dummy_program_definition() -> ProgramDefinition {
ProgramDefinition {
args: vec![],
env: Default::default(),
stdout: StdioDef::Null,
stderr: StdioDef::Null,
stdin: vec![],
cwd: Default::default(),
}
}
fn test_job<J: Into<JobId>, T: Into<TakoTaskId>>(
ids: IntArray,
job_id: J,
base_task_id: T,
) -> Job {
let job_desc = JobDescription::Array {
ids,
entries: None,
task_desc: TaskDescription {
program: dummy_program_definition(),
resources: Default::default(),
pin_mode: PinMode::None,
task_dir: false,
time_limit: None,
priority: 0,
crash_limit: 5,
},
};
Job::new(
job_desc,
job_id.into(),
base_task_id.into(),
"".to_string(),
None,
None,
Default::default(),
)
}
fn check_id<T: Into<TakoTaskId>>(state: &mut State, task_id: T, expected: Option<u32>) {
assert_eq!(
state
.get_job_mut_by_tako_task_id(task_id.into())
.map(|j| j.job_id.as_num()),
expected
);
}
#[test]
fn test_find_job_id_by_task_id() {
let state_ref = create_hq_state();
let mut state = state_ref.get_mut();
state.add_job(test_job(IntArray::from_range(0, 10), 223, 100));
state.add_job(test_job(IntArray::from_range(0, 15), 224, 110));
state.add_job(test_job(IntArray::from_id(0), 225, 125));
state.add_job(test_job(IntArray::from_id(0), 226, 126));
state.add_job(test_job(IntArray::from_id(0), 227, 130));
let state = &mut state;
check_id(state, 99, None);
check_id(state, 100, Some(223));
check_id(state, 101, Some(223));
check_id(state, 109, Some(223));
check_id(state, 110, Some(224));
check_id(state, 124, Some(224));
check_id(state, 125, Some(225));
check_id(state, 126, Some(226));
check_id(state, 127, None);
check_id(state, 129, None);
check_id(state, 130, Some(227));
check_id(state, 131, None);
}
}
| {
autoalloc.on_worker_connected(msg.worker_id, &msg.configuration);
} | conditional_block |
state.rs | use std::cmp::min;
use std::collections::BTreeMap;
use chrono::Utc;
use tako::gateway::{
CancelTasks, FromGatewayMessage, LostWorkerMessage, NewWorkerMessage, TaskFailedMessage,
TaskState, TaskUpdate, ToGatewayMessage,
};
use tako::ItemId;
use tako::{define_wrapped_type, TaskId};
use crate::server::autoalloc::{AutoAllocService, LostWorkerDetails};
use crate::server::event::events::JobInfo;
use crate::server::event::storage::EventStorage;
use crate::server::job::Job;
use crate::server::rpc::Backend;
use crate::server::worker::Worker;
use crate::transfer::messages::ServerInfo;
use crate::WrappedRcRefCell;
use crate::{JobId, JobTaskCount, Map, TakoTaskId, WorkerId};
pub struct State {
jobs: crate::Map<JobId, Job>,
workers: crate::Map<WorkerId, Worker>,
// Here we store TaskId -> JobId data, but to make it sparse
// we store ONLY the base_task_id there, i.e. each job has here
// only one entry.
// Example:
// Real mapping: TaskId JobId
// 1 -> 1
// 2 -> 1
// 3 -> 2
// 4 -> 2
// 5 -> 2
// The actual base_task_id_to_job will be 1 -> 1, 3 -> 2
// Therefore we need to find biggest key that is lower then a given task id
// To make this query efficient, we use BTreeMap and not Map
base_task_id_to_job_id: BTreeMap<TakoTaskId, JobId>,
job_id_counter: <JobId as ItemId>::IdType,
task_id_counter: <TaskId as ItemId>::IdType,
pub(crate) autoalloc_service: Option<AutoAllocService>,
event_storage: EventStorage,
server_info: ServerInfo,
}
define_wrapped_type!(StateRef, State, pub);
fn cancel_tasks_from_callback(
state_ref: &StateRef,
tako_ref: &Backend,
job_id: JobId,
tasks: Vec<TakoTaskId>,
) {
if tasks.is_empty() {
return;
}
log::debug!("Canceling {:?} tasks", tasks);
let tako_ref = tako_ref.clone();
let state_ref = state_ref.clone();
tokio::task::spawn_local(async move {
let message = FromGatewayMessage::CancelTasks(CancelTasks { tasks });
let response = tako_ref.send_tako_message(message).await.unwrap();
match response {
ToGatewayMessage::CancelTasksResponse(msg) => {
let mut state = state_ref.get_mut();
if let Some(job) = state.get_job_mut(job_id) {
log::debug!("Tasks {:?} canceled", msg.cancelled_tasks);
log::debug!("Tasks {:?} already finished", msg.already_finished);
for tako_id in msg.cancelled_tasks {
job.set_cancel_state(tako_id, &tako_ref);
}
}
}
ToGatewayMessage::Error(msg) => {
log::debug!("Canceling job {} failed: {}", job_id, msg.message);
}
_ => {
panic!("Invalid message");
}
};
});
}
impl State {
pub fn get_job(&self, job_id: JobId) -> Option<&Job> {
self.jobs.get(&job_id)
}
pub fn get_job_mut(&mut self, job_id: JobId) -> Option<&mut Job> {
self.jobs.get_mut(&job_id)
}
pub fn jobs(&self) -> impl Iterator<Item = &Job> {
self.jobs.values()
}
pub fn add_worker(&mut self, worker: Worker) {
let worker_id = worker.worker_id();
assert!(self.workers.insert(worker_id, worker).is_none())
}
pub fn server_info(&self) -> &ServerInfo {
&self.server_info
}
pub fn set_worker_port(&mut self, port: u16) {
self.server_info.worker_port = port;
}
pub fn add_job(&mut self, job: Job) {
let job_id = job.job_id;
assert!(self
.base_task_id_to_job_id
.insert(job.base_task_id, job_id)
.is_none());
self.event_storage.on_job_submitted(
job_id,
JobInfo {
name: job.name.clone(),
job_desc: job.job_desc.clone(),
base_task_id: job.base_task_id,
task_ids: job.tasks.iter().map(|(id, _)| *id).collect(),
max_fails: job.max_fails,
log: job.log.clone(),
submission_date: job.submission_date,
},
);
assert!(self.jobs.insert(job_id, job).is_none());
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_job_created(job_id);
}
}
/// Completely forgets this job, in order to reduce memory usage.
pub(crate) fn forget_job(&mut self, job_id: JobId) -> Option<Job> {
let job = match self.jobs.remove(&job_id) {
Some(job) => {
assert!(job.is_terminated());
job
}
None => {
log::error!("Trying to forget unknown job {job_id}");
return None;
}
};
self.base_task_id_to_job_id.remove(&job.base_task_id);
Some(job)
}
pub fn get_job_mut_by_tako_task_id(&mut self, task_id: TakoTaskId) -> Option<&mut Job> {
let job_id: JobId = *self.base_task_id_to_job_id.range(..=task_id).next_back()?.1;
let job = self.jobs.get_mut(&job_id)?;
if task_id
< TakoTaskId::new(
job.base_task_id.as_num() + job.n_tasks() as <TaskId as ItemId>::IdType,
)
{
Some(job)
} else {
None
}
}
pub fn new_job_id(&mut self) -> JobId {
let id = self.job_id_counter;
self.job_id_counter += 1;
id.into()
}
pub fn revert_to_job_id(&mut self, id: JobId) {
self.job_id_counter = id.as_num();
}
pub fn last_n_ids(&self, n: u32) -> impl Iterator<Item = JobId> {
let n = min(n, self.job_id_counter - 1);
((self.job_id_counter - n)..self.job_id_counter).map(|id| id.into())
}
pub fn new_task_id(&mut self, task_count: JobTaskCount) -> TakoTaskId {
let id = self.task_id_counter;
self.task_id_counter += task_count;
id.into()
}
pub fn get_workers(&self) -> &Map<WorkerId, Worker> {
&self.workers
}
pub fn get_worker(&self, worker_id: WorkerId) -> Option<&Worker> {
self.workers.get(&worker_id)
}
pub fn get_worker_mut(&mut self, worker_id: WorkerId) -> Option<&mut Worker> {
self.workers.get_mut(&worker_id)
}
pub fn process_task_failed(
&mut self,
state_ref: &StateRef,
tako_ref: &Backend,
msg: TaskFailedMessage,
) {
log::debug!("Task id={} failed: {:?}", msg.id, msg.info);
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
for task_id in msg.cancelled_tasks {
log::debug!(
"Task id={} canceled because of task dependency fails",
task_id
);
job.set_cancel_state(task_id, tako_ref);
}
job.set_failed_state(msg.id, msg.info.message, tako_ref);
if let Some(max_fails) = job.max_fails {
if job.counters.n_failed_tasks > max_fails {
let task_ids = job.non_finished_task_ids();
cancel_tasks_from_callback(state_ref, tako_ref, job.job_id, task_ids);
}
}
self.event_storage.on_task_failed(msg.id);
}
pub fn process_task_update(&mut self, msg: TaskUpdate, backend: &Backend) {
log::debug!("Task id={} updated {:?}", msg.id, msg.state);
let (mut job_id, mut is_job_terminated): (Option<JobId>, bool) = (None, false);
match msg.state {
TaskState::Running {
worker_ids,
context,
} => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_running_state(msg.id, worker_ids.clone(), context);
// TODO: Prepare it for multi-node tasks
// This (incomplete) version just takes the first worker as "the worker" for task
self.event_storage.on_task_started(msg.id, worker_ids[0]);
}
TaskState::Finished => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_finished_state(msg.id, backend);
(job_id, is_job_terminated) = (Some(job.job_id), job.is_terminated());
self.event_storage.on_task_finished(msg.id);
}
TaskState::Waiting => {
let job = self.get_job_mut_by_tako_task_id(msg.id).unwrap();
job.set_waiting_state(msg.id);
}
TaskState::Invalid => {
unreachable!()
}
};
if is_job_terminated {
self.event_storage
.on_job_completed(job_id.unwrap(), chrono::offset::Utc::now());
}
}
pub fn process_worker_new(&mut self, msg: NewWorkerMessage) {
log::debug!("New worker id={}", msg.worker_id);
self.add_worker(Worker::new(msg.worker_id, msg.configuration.clone()));
// TODO: use observer in event storage instead of sending these messages directly
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_worker_connected(msg.worker_id, &msg.configuration);
}
self.event_storage
.on_worker_added(msg.worker_id, msg.configuration);
}
pub fn process_worker_lost(
&mut self,
_state_ref: &StateRef,
_tako_ref: &Backend,
msg: LostWorkerMessage,
) {
log::debug!("Worker lost id={}", msg.worker_id);
let worker = self.workers.get_mut(&msg.worker_id).unwrap();
worker.set_offline_state(msg.reason.clone());
if let Some(autoalloc) = &self.autoalloc_service {
autoalloc.on_worker_lost(
msg.worker_id,
&worker.configuration,
LostWorkerDetails {
reason: msg.reason.clone(),
lifetime: (Utc::now() - worker.started_at()).to_std().unwrap(),
},
);
}
for task_id in msg.running_tasks {
let job = self.get_job_mut_by_tako_task_id(task_id).unwrap();
job.set_waiting_state(task_id);
}
self.event_storage.on_worker_lost(msg.worker_id, msg.reason);
}
pub fn stop_autoalloc(&mut self) {
// Drop the sender
self.autoalloc_service = None;
}
pub fn event_storage(&self) -> &EventStorage {
&self.event_storage
}
pub fn event_storage_mut(&mut self) -> &mut EventStorage {
&mut self.event_storage
}
pub fn autoalloc(&self) -> &AutoAllocService {
self.autoalloc_service.as_ref().unwrap()
}
}
impl StateRef {
pub fn new(event_storage: EventStorage, server_info: ServerInfo) -> StateRef {
Self(WrappedRcRefCell::wrap(State {
jobs: Default::default(),
workers: Default::default(),
base_task_id_to_job_id: Default::default(),
job_id_counter: 1,
task_id_counter: 1,
autoalloc_service: None,
event_storage,
server_info,
}))
}
}
#[cfg(test)]
mod tests {
use tako::program::{ProgramDefinition, StdioDef};
use crate::common::arraydef::IntArray;
use crate::server::job::Job;
use crate::server::state::State;
use crate::tests::utils::create_hq_state;
use crate::transfer::messages::{JobDescription, PinMode, TaskDescription};
use crate::{JobId, TakoTaskId};
fn dummy_program_definition() -> ProgramDefinition {
ProgramDefinition {
args: vec![],
env: Default::default(),
stdout: StdioDef::Null,
stderr: StdioDef::Null,
stdin: vec![],
cwd: Default::default(),
}
}
fn test_job<J: Into<JobId>, T: Into<TakoTaskId>>(
ids: IntArray,
job_id: J,
base_task_id: T,
) -> Job {
let job_desc = JobDescription::Array {
ids,
entries: None,
task_desc: TaskDescription {
program: dummy_program_definition(),
resources: Default::default(),
pin_mode: PinMode::None,
task_dir: false,
time_limit: None,
priority: 0,
crash_limit: 5,
},
};
Job::new(
job_desc,
job_id.into(),
base_task_id.into(),
"".to_string(),
None,
None,
Default::default(),
)
}
fn check_id<T: Into<TakoTaskId>>(state: &mut State, task_id: T, expected: Option<u32>) { | assert_eq!(
state
.get_job_mut_by_tako_task_id(task_id.into())
.map(|j| j.job_id.as_num()),
expected
);
}
#[test]
fn test_find_job_id_by_task_id() {
let state_ref = create_hq_state();
let mut state = state_ref.get_mut();
state.add_job(test_job(IntArray::from_range(0, 10), 223, 100));
state.add_job(test_job(IntArray::from_range(0, 15), 224, 110));
state.add_job(test_job(IntArray::from_id(0), 225, 125));
state.add_job(test_job(IntArray::from_id(0), 226, 126));
state.add_job(test_job(IntArray::from_id(0), 227, 130));
let state = &mut state;
check_id(state, 99, None);
check_id(state, 100, Some(223));
check_id(state, 101, Some(223));
check_id(state, 109, Some(223));
check_id(state, 110, Some(224));
check_id(state, 124, Some(224));
check_id(state, 125, Some(225));
check_id(state, 126, Some(226));
check_id(state, 127, None);
check_id(state, 129, None);
check_id(state, 130, Some(227));
check_id(state, 131, None);
}
} | random_line_split |
|
parse.rs | use crate::{
parse_utils::*,
substitute::{substitute, Substitution},
SubstitutionGroup,
};
use proc_macro::{token_stream::IntoIter, Ident, Span, TokenStream, TokenTree};
use std::{collections::HashSet, iter::Peekable};
/// Parses the attribute part of an invocation of duplicate, returning
/// all the substitutions that should be made to the item.
pub(crate) fn parse_attr(
attr: TokenStream,
stream_span: Span,
) -> Result<Vec<SubstitutionGroup>, (Span, String)>
{
if identify_syntax(attr.clone(), stream_span)?
{
validate_verbose_attr(attr)
}
else
{
let substitutions = validate_short_attr(attr)?;
let mut reorder = Vec::new();
for _ in 0..substitutions[0].2.len()
{
reorder.push(SubstitutionGroup::new());
}
for (ident, args, subs) in substitutions
{
for (idx, sub) in subs.into_iter().enumerate()
{
let substitution = Substitution::new(&args, sub.into_iter());
if let Ok(substitution) = substitution
{
reorder[idx].add_substitution(
Ident::new(&ident.clone(), Span::call_site()),
substitution,
)?;
}
else
{
return Err((Span::call_site(), "Failed creating substitution".into()));
}
}
}
Ok(reorder)
}
}
/// True is verbose, false is short
fn identify_syntax(attr: TokenStream, stream_span: Span) -> Result<bool, (Span, String)>
{
if let Some(token) = next_token(&mut attr.into_iter(), "Could not identify syntax type.")?
{
match token
{
TokenTree::Group(_) => Ok(true),
TokenTree::Ident(_) => Ok(false),
TokenTree::Punct(p) if is_nested_invocation(&p) => Ok(true),
_ =>
{
Err((
token.span(),
"Expected substitution identifier or group. Received neither.".into(),
))
},
}
}
else
{
Err((stream_span, "No substitutions found.".into()))
}
}
/// Validates that the attribute part of a duplicate invocation uses
/// the verbose syntax, and returns all the substitutions that should be made.
fn validate_verbose_attr(attr: TokenStream) -> Result<Vec<SubstitutionGroup>, (Span, String)>
{
if attr.is_empty()
{
return Err((Span::call_site(), "No substitutions found.".into()));
}
let mut sub_groups = Vec::new();
let mut iter = attr.into_iter();
let mut substitution_ids = None;
loop
{
if let Some(tree) = next_token(&mut iter, "Expected substitution group.")?
{
match tree
{
TokenTree::Punct(p) if is_nested_invocation(&p) =>
{
let nested_duplicated = invoke_nested(&mut iter, p.span())?;
let subs = validate_verbose_attr(nested_duplicated)?;
sub_groups.extend(subs.into_iter());
},
_ =>
{
sub_groups.push(extract_verbose_substitutions(tree, &substitution_ids)?);
if None == substitution_ids
{
substitution_ids = Some(sub_groups[0].identifiers().cloned().collect())
}
},
}
}
else
{
break;
}
}
Ok(sub_groups)
}
/// Extracts a substitution group in the verbose syntax.
fn | (
tree: TokenTree,
existing: &Option<HashSet<String>>,
) -> Result<SubstitutionGroup, (Span, String)>
{
// Must get span now, before it's corrupted.
let tree_span = tree.span();
let group = check_group(
tree,
"Hint: When using verbose syntax, a substitutions must be enclosed in a \
group.\nExample:\n..\n[\n\tidentifier1 [ substitution1 ]\n\tidentifier2 [ substitution2 \
]\n]",
)?;
if group.stream().into_iter().count() == 0
{
return Err((group.span(), "No substitution groups found.".into()));
}
let mut substitutions = SubstitutionGroup::new();
let mut stream = group.stream().into_iter();
loop
{
if let Some(ident) = next_token(&mut stream, "Epected substitution identifier.")?
{
if let TokenTree::Ident(ident) = ident
{
let sub = parse_group(
&mut stream,
ident.span(),
"Hint: A substitution identifier should be followed by a group containing the \
code to be inserted instead of any occurrence of the identifier.",
)?;
// Check have found the same as existing
if let Some(idents) = existing
{
if !idents.contains(&ident.to_string())
{
return Err((
ident.span(),
"Unfamiliar substitution identifier. '{}' is not present in previous \
substitution groups."
.into(),
));
}
}
substitutions.add_substitution(ident, Substitution::new_simple(sub.stream()))?;
}
else
{
return Err((
ident.span(),
"Expected substitution identifier, got something else.".into(),
));
}
}
else
{
// Check no substitution idents are missing.
if let Some(idents) = existing
{
let sub_idents = substitutions.identifiers().cloned().collect();
let diff: Vec<_> = idents.difference(&sub_idents).collect();
if diff.len() > 0
{
let mut msg: String = "Missing substitutions. Previous substitutions groups \
had the following identifiers not present in this \
group: "
.into();
for ident in diff
{
msg.push_str("'");
msg.push_str(&ident.to_string());
msg.push_str("' ");
}
return Err((tree_span, msg));
}
}
break;
}
}
Ok(substitutions)
}
/// Validates that the attribute part of a duplicate invocation uses
/// the short syntax and returns the substitution that should be made.
fn validate_short_attr(
attr: TokenStream,
) -> Result<Vec<(String, Vec<String>, Vec<TokenStream>)>, (Span, String)>
{
if attr.is_empty()
{
return Err((Span::call_site(), "No substitutions found.".into()));
}
let mut iter = attr.into_iter();
let (idents, span) = validate_short_get_identifiers(&mut iter, Span::call_site())?;
let mut result: Vec<_> = idents
.into_iter()
.map(|(ident, args)| (ident, args, Vec::new()))
.collect();
validate_short_get_all_substitution_goups(iter, span, &mut result)?;
Ok(result)
}
/// Assuming use of the short syntax, gets the initial list of substitution
/// identifiers.
fn validate_short_get_identifiers(
iter: &mut IntoIter,
mut span: Span,
) -> Result<(Vec<(String, Vec<String>)>, Span), (Span, String)>
{
let mut iter = iter.peekable();
let mut result = Vec::new();
loop
{
if let Some(next_token) = next_token(&mut iter, "Expected substitution identifier or ';'.")?
{
span = next_token.span();
match next_token
{
TokenTree::Ident(ident) =>
{
result.push((
ident.to_string(),
validate_short_get_identifier_arguments(&mut iter)?, // Vec::new()
))
},
TokenTree::Punct(p) if is_semicolon(&p) => break,
_ => return Err((span, "Expected substitution identifier or ';'.".into())),
}
}
else
{
return Err((span, "Expected substitution identifier or ';'.".into()));
}
}
Ok((result, span))
}
/// Assuming use of the short syntax, gets the list of identifier arguments.
fn validate_short_get_identifier_arguments(
iter: &mut Peekable<impl Iterator<Item = TokenTree>>,
) -> Result<Vec<String>, (Span, String)>
{
let mut result = Vec::new();
if let Some(token) = iter.peek()
{
if let TokenTree::Group(group) = token
{
if check_delimiter(group).is_ok()
{
let mut arg_iter = group.stream().into_iter();
while let Some(token) = arg_iter.next()
{
if let TokenTree::Ident(ident) = token
{
result.push(ident.to_string());
if let Some(token) = arg_iter.next()
{
match token
{
TokenTree::Punct(punct) if punct_is_char(&punct, ',') => (),
_ => return Err((token.span(), "Expected ','.".into())),
}
}
}
else
{
return Err((
token.span(),
"Expected substitution identifier argument as identifier.".into(),
));
}
}
// Make sure to consume the group
let _ = iter.next();
}
}
}
Ok(result)
}
/// Gets all substitution groups in the short syntax and inserts
/// them into the given vec.
fn validate_short_get_all_substitution_goups<'a>(
iter: impl Iterator<Item = TokenTree>,
mut span: Span,
result: &mut Vec<(String, Vec<String>, Vec<TokenStream>)>,
) -> Result<(), (Span, String)>
{
let mut iter = iter.peekable();
loop
{
if let Some(TokenTree::Punct(p)) = iter.peek()
{
if is_nested_invocation(&p)
{
let p_span = p.span();
// consume '#'
iter.next();
let nested_duplicated = invoke_nested(&mut iter, p_span)?;
validate_short_get_all_substitution_goups(
&mut nested_duplicated.into_iter(),
span.clone(),
result,
)?;
}
}
else
{
validate_short_get_substitutions(
&mut iter,
span,
result.iter_mut().map(|(_, _, vec)| {
vec.push(TokenStream::new());
vec.last_mut().unwrap()
}),
)?;
if let Some(token) = iter.next()
{
span = token.span();
if let TokenTree::Punct(p) = token
{
if is_semicolon(&p)
{
continue;
}
}
return Err((span, "Expected ';'.".into()));
}
else
{
break;
}
}
}
Ok(())
}
/// Extracts a substitution group in the short syntax and inserts it into
/// the elements returned by the given groups iterator.
fn validate_short_get_substitutions<'a>(
iter: &mut impl Iterator<Item = TokenTree>,
mut span: Span,
mut groups: impl Iterator<Item = &'a mut TokenStream>,
) -> Result<Span, (Span, String)>
{
if let Some(token) = iter.next()
{
let group = check_group(token, "")?;
span = group.span();
*groups.next().unwrap() = group.stream();
for stream in groups
{
let group = parse_group(iter, span, "")?;
span = group.span();
*stream = group.stream();
}
}
Ok(span)
}
/// Invokes a nested invocation of duplicate, assuming the
/// next group is the attribute part of the invocation and the
/// group after that is the element.
fn invoke_nested(
iter: &mut impl Iterator<Item = TokenTree>,
span: Span,
) -> Result<TokenStream, (Span, String)>
{
let hints = "Hint: '#' is a nested invocation of the macro and must therefore be followed by \
a group containing the invocation.\nExample:\n#[\n\tidentifier [ substitute1 ] [ \
substitute2 ]\n][\n\tCode to be substituted whenever 'identifier' occurs \n]";
let nested_attr = parse_group(iter, span, hints)?;
let nested_subs = parse_attr(nested_attr.stream(), nested_attr.span())?;
let nested_item = parse_group(iter, nested_attr.span(), hints)?;
Ok(substitute(nested_item.stream(), nested_subs))
}
| extract_verbose_substitutions | identifier_name |
parse.rs | use crate::{
parse_utils::*,
substitute::{substitute, Substitution},
SubstitutionGroup,
};
use proc_macro::{token_stream::IntoIter, Ident, Span, TokenStream, TokenTree};
use std::{collections::HashSet, iter::Peekable};
/// Parses the attribute part of an invocation of duplicate, returning
/// all the substitutions that should be made to the item.
pub(crate) fn parse_attr(
attr: TokenStream,
stream_span: Span,
) -> Result<Vec<SubstitutionGroup>, (Span, String)>
{
if identify_syntax(attr.clone(), stream_span)?
{
validate_verbose_attr(attr)
}
else
{
let substitutions = validate_short_attr(attr)?;
let mut reorder = Vec::new();
for _ in 0..substitutions[0].2.len()
{
reorder.push(SubstitutionGroup::new());
}
for (ident, args, subs) in substitutions
{
for (idx, sub) in subs.into_iter().enumerate()
{
let substitution = Substitution::new(&args, sub.into_iter());
if let Ok(substitution) = substitution
{
reorder[idx].add_substitution(
Ident::new(&ident.clone(), Span::call_site()),
substitution,
)?;
}
else
{
return Err((Span::call_site(), "Failed creating substitution".into()));
}
}
}
Ok(reorder)
}
}
/// True is verbose, false is short
fn identify_syntax(attr: TokenStream, stream_span: Span) -> Result<bool, (Span, String)>
{
if let Some(token) = next_token(&mut attr.into_iter(), "Could not identify syntax type.")?
{
match token
{
TokenTree::Group(_) => Ok(true),
TokenTree::Ident(_) => Ok(false),
TokenTree::Punct(p) if is_nested_invocation(&p) => Ok(true),
_ =>
{
Err((
token.span(),
"Expected substitution identifier or group. Received neither.".into(),
))
},
}
}
else
{
Err((stream_span, "No substitutions found.".into()))
}
}
/// Validates that the attribute part of a duplicate invocation uses
/// the verbose syntax, and returns all the substitutions that should be made.
fn validate_verbose_attr(attr: TokenStream) -> Result<Vec<SubstitutionGroup>, (Span, String)>
{
if attr.is_empty()
{
return Err((Span::call_site(), "No substitutions found.".into()));
}
let mut sub_groups = Vec::new();
let mut iter = attr.into_iter();
let mut substitution_ids = None;
loop
{
if let Some(tree) = next_token(&mut iter, "Expected substitution group.")?
{
match tree
{
TokenTree::Punct(p) if is_nested_invocation(&p) =>
{
let nested_duplicated = invoke_nested(&mut iter, p.span())?;
let subs = validate_verbose_attr(nested_duplicated)?;
sub_groups.extend(subs.into_iter());
},
_ =>
{
sub_groups.push(extract_verbose_substitutions(tree, &substitution_ids)?);
if None == substitution_ids
{
substitution_ids = Some(sub_groups[0].identifiers().cloned().collect())
}
},
}
}
else
{
break;
}
}
Ok(sub_groups)
}
/// Extracts a substitution group in the verbose syntax.
fn extract_verbose_substitutions(
tree: TokenTree,
existing: &Option<HashSet<String>>,
) -> Result<SubstitutionGroup, (Span, String)>
|
/// Validates that the attribute part of a duplicate invocation uses
/// the short syntax and returns the substitution that should be made.
fn validate_short_attr(
attr: TokenStream,
) -> Result<Vec<(String, Vec<String>, Vec<TokenStream>)>, (Span, String)>
{
if attr.is_empty()
{
return Err((Span::call_site(), "No substitutions found.".into()));
}
let mut iter = attr.into_iter();
let (idents, span) = validate_short_get_identifiers(&mut iter, Span::call_site())?;
let mut result: Vec<_> = idents
.into_iter()
.map(|(ident, args)| (ident, args, Vec::new()))
.collect();
validate_short_get_all_substitution_goups(iter, span, &mut result)?;
Ok(result)
}
/// Assuming use of the short syntax, gets the initial list of substitution
/// identifiers.
fn validate_short_get_identifiers(
iter: &mut IntoIter,
mut span: Span,
) -> Result<(Vec<(String, Vec<String>)>, Span), (Span, String)>
{
let mut iter = iter.peekable();
let mut result = Vec::new();
loop
{
if let Some(next_token) = next_token(&mut iter, "Expected substitution identifier or ';'.")?
{
span = next_token.span();
match next_token
{
TokenTree::Ident(ident) =>
{
result.push((
ident.to_string(),
validate_short_get_identifier_arguments(&mut iter)?, // Vec::new()
))
},
TokenTree::Punct(p) if is_semicolon(&p) => break,
_ => return Err((span, "Expected substitution identifier or ';'.".into())),
}
}
else
{
return Err((span, "Expected substitution identifier or ';'.".into()));
}
}
Ok((result, span))
}
/// Assuming use of the short syntax, gets the list of identifier arguments.
fn validate_short_get_identifier_arguments(
iter: &mut Peekable<impl Iterator<Item = TokenTree>>,
) -> Result<Vec<String>, (Span, String)>
{
let mut result = Vec::new();
if let Some(token) = iter.peek()
{
if let TokenTree::Group(group) = token
{
if check_delimiter(group).is_ok()
{
let mut arg_iter = group.stream().into_iter();
while let Some(token) = arg_iter.next()
{
if let TokenTree::Ident(ident) = token
{
result.push(ident.to_string());
if let Some(token) = arg_iter.next()
{
match token
{
TokenTree::Punct(punct) if punct_is_char(&punct, ',') => (),
_ => return Err((token.span(), "Expected ','.".into())),
}
}
}
else
{
return Err((
token.span(),
"Expected substitution identifier argument as identifier.".into(),
));
}
}
// Make sure to consume the group
let _ = iter.next();
}
}
}
Ok(result)
}
/// Gets all substitution groups in the short syntax and inserts
/// them into the given vec.
fn validate_short_get_all_substitution_goups<'a>(
iter: impl Iterator<Item = TokenTree>,
mut span: Span,
result: &mut Vec<(String, Vec<String>, Vec<TokenStream>)>,
) -> Result<(), (Span, String)>
{
let mut iter = iter.peekable();
loop
{
if let Some(TokenTree::Punct(p)) = iter.peek()
{
if is_nested_invocation(&p)
{
let p_span = p.span();
// consume '#'
iter.next();
let nested_duplicated = invoke_nested(&mut iter, p_span)?;
validate_short_get_all_substitution_goups(
&mut nested_duplicated.into_iter(),
span.clone(),
result,
)?;
}
}
else
{
validate_short_get_substitutions(
&mut iter,
span,
result.iter_mut().map(|(_, _, vec)| {
vec.push(TokenStream::new());
vec.last_mut().unwrap()
}),
)?;
if let Some(token) = iter.next()
{
span = token.span();
if let TokenTree::Punct(p) = token
{
if is_semicolon(&p)
{
continue;
}
}
return Err((span, "Expected ';'.".into()));
}
else
{
break;
}
}
}
Ok(())
}
/// Extracts a substitution group in the short syntax and inserts it into
/// the elements returned by the given groups iterator.
fn validate_short_get_substitutions<'a>(
iter: &mut impl Iterator<Item = TokenTree>,
mut span: Span,
mut groups: impl Iterator<Item = &'a mut TokenStream>,
) -> Result<Span, (Span, String)>
{
if let Some(token) = iter.next()
{
let group = check_group(token, "")?;
span = group.span();
*groups.next().unwrap() = group.stream();
for stream in groups
{
let group = parse_group(iter, span, "")?;
span = group.span();
*stream = group.stream();
}
}
Ok(span)
}
/// Invokes a nested invocation of duplicate, assuming the
/// next group is the attribute part of the invocation and the
/// group after that is the element.
fn invoke_nested(
iter: &mut impl Iterator<Item = TokenTree>,
span: Span,
) -> Result<TokenStream, (Span, String)>
{
let hints = "Hint: '#' is a nested invocation of the macro and must therefore be followed by \
a group containing the invocation.\nExample:\n#[\n\tidentifier [ substitute1 ] [ \
substitute2 ]\n][\n\tCode to be substituted whenever 'identifier' occurs \n]";
let nested_attr = parse_group(iter, span, hints)?;
let nested_subs = parse_attr(nested_attr.stream(), nested_attr.span())?;
let nested_item = parse_group(iter, nested_attr.span(), hints)?;
Ok(substitute(nested_item.stream(), nested_subs))
}
| {
// Must get span now, before it's corrupted.
let tree_span = tree.span();
let group = check_group(
tree,
"Hint: When using verbose syntax, a substitutions must be enclosed in a \
group.\nExample:\n..\n[\n\tidentifier1 [ substitution1 ]\n\tidentifier2 [ substitution2 \
]\n]",
)?;
if group.stream().into_iter().count() == 0
{
return Err((group.span(), "No substitution groups found.".into()));
}
let mut substitutions = SubstitutionGroup::new();
let mut stream = group.stream().into_iter();
loop
{
if let Some(ident) = next_token(&mut stream, "Epected substitution identifier.")?
{
if let TokenTree::Ident(ident) = ident
{
let sub = parse_group(
&mut stream,
ident.span(),
"Hint: A substitution identifier should be followed by a group containing the \
code to be inserted instead of any occurrence of the identifier.",
)?;
// Check have found the same as existing
if let Some(idents) = existing
{
if !idents.contains(&ident.to_string())
{
return Err((
ident.span(),
"Unfamiliar substitution identifier. '{}' is not present in previous \
substitution groups."
.into(),
));
}
}
substitutions.add_substitution(ident, Substitution::new_simple(sub.stream()))?;
}
else
{
return Err((
ident.span(),
"Expected substitution identifier, got something else.".into(),
));
}
}
else
{
// Check no substitution idents are missing.
if let Some(idents) = existing
{
let sub_idents = substitutions.identifiers().cloned().collect();
let diff: Vec<_> = idents.difference(&sub_idents).collect();
if diff.len() > 0
{
let mut msg: String = "Missing substitutions. Previous substitutions groups \
had the following identifiers not present in this \
group: "
.into();
for ident in diff
{
msg.push_str("'");
msg.push_str(&ident.to_string());
msg.push_str("' ");
}
return Err((tree_span, msg));
}
}
break;
}
}
Ok(substitutions)
} | identifier_body |
parse.rs | use crate::{
parse_utils::*,
substitute::{substitute, Substitution},
SubstitutionGroup,
};
use proc_macro::{token_stream::IntoIter, Ident, Span, TokenStream, TokenTree};
use std::{collections::HashSet, iter::Peekable};
/// Parses the attribute part of an invocation of duplicate, returning
/// all the substitutions that should be made to the item.
pub(crate) fn parse_attr(
attr: TokenStream,
stream_span: Span,
) -> Result<Vec<SubstitutionGroup>, (Span, String)>
{
if identify_syntax(attr.clone(), stream_span)?
{
validate_verbose_attr(attr)
}
else
{
let substitutions = validate_short_attr(attr)?;
let mut reorder = Vec::new();
for _ in 0..substitutions[0].2.len()
{
reorder.push(SubstitutionGroup::new());
}
for (ident, args, subs) in substitutions
{
for (idx, sub) in subs.into_iter().enumerate()
{
let substitution = Substitution::new(&args, sub.into_iter());
if let Ok(substitution) = substitution
{
reorder[idx].add_substitution(
Ident::new(&ident.clone(), Span::call_site()),
substitution,
)?;
}
else
{
return Err((Span::call_site(), "Failed creating substitution".into()));
}
}
}
Ok(reorder)
}
}
/// True is verbose, false is short
fn identify_syntax(attr: TokenStream, stream_span: Span) -> Result<bool, (Span, String)>
{
if let Some(token) = next_token(&mut attr.into_iter(), "Could not identify syntax type.")?
{
match token
{
TokenTree::Group(_) => Ok(true),
TokenTree::Ident(_) => Ok(false),
TokenTree::Punct(p) if is_nested_invocation(&p) => Ok(true),
_ =>
{
Err((
token.span(),
"Expected substitution identifier or group. Received neither.".into(),
))
},
}
}
else
{
Err((stream_span, "No substitutions found.".into()))
}
}
/// Validates that the attribute part of a duplicate invocation uses
/// the verbose syntax, and returns all the substitutions that should be made.
fn validate_verbose_attr(attr: TokenStream) -> Result<Vec<SubstitutionGroup>, (Span, String)>
{
if attr.is_empty()
{
return Err((Span::call_site(), "No substitutions found.".into()));
}
let mut sub_groups = Vec::new();
let mut iter = attr.into_iter();
let mut substitution_ids = None;
loop
{
if let Some(tree) = next_token(&mut iter, "Expected substitution group.")?
{
match tree
{
TokenTree::Punct(p) if is_nested_invocation(&p) =>
{
let nested_duplicated = invoke_nested(&mut iter, p.span())?;
let subs = validate_verbose_attr(nested_duplicated)?;
sub_groups.extend(subs.into_iter());
},
_ =>
{
sub_groups.push(extract_verbose_substitutions(tree, &substitution_ids)?);
if None == substitution_ids
{
substitution_ids = Some(sub_groups[0].identifiers().cloned().collect())
}
},
}
}
else
{
break;
}
}
Ok(sub_groups)
}
/// Extracts a substitution group in the verbose syntax.
fn extract_verbose_substitutions(
tree: TokenTree,
existing: &Option<HashSet<String>>, | let group = check_group(
tree,
"Hint: When using verbose syntax, a substitutions must be enclosed in a \
group.\nExample:\n..\n[\n\tidentifier1 [ substitution1 ]\n\tidentifier2 [ substitution2 \
]\n]",
)?;
if group.stream().into_iter().count() == 0
{
return Err((group.span(), "No substitution groups found.".into()));
}
let mut substitutions = SubstitutionGroup::new();
let mut stream = group.stream().into_iter();
loop
{
if let Some(ident) = next_token(&mut stream, "Epected substitution identifier.")?
{
if let TokenTree::Ident(ident) = ident
{
let sub = parse_group(
&mut stream,
ident.span(),
"Hint: A substitution identifier should be followed by a group containing the \
code to be inserted instead of any occurrence of the identifier.",
)?;
// Check have found the same as existing
if let Some(idents) = existing
{
if !idents.contains(&ident.to_string())
{
return Err((
ident.span(),
"Unfamiliar substitution identifier. '{}' is not present in previous \
substitution groups."
.into(),
));
}
}
substitutions.add_substitution(ident, Substitution::new_simple(sub.stream()))?;
}
else
{
return Err((
ident.span(),
"Expected substitution identifier, got something else.".into(),
));
}
}
else
{
// Check no substitution idents are missing.
if let Some(idents) = existing
{
let sub_idents = substitutions.identifiers().cloned().collect();
let diff: Vec<_> = idents.difference(&sub_idents).collect();
if diff.len() > 0
{
let mut msg: String = "Missing substitutions. Previous substitutions groups \
had the following identifiers not present in this \
group: "
.into();
for ident in diff
{
msg.push_str("'");
msg.push_str(&ident.to_string());
msg.push_str("' ");
}
return Err((tree_span, msg));
}
}
break;
}
}
Ok(substitutions)
}
/// Validates that the attribute part of a duplicate invocation uses
/// the short syntax and returns the substitution that should be made.
fn validate_short_attr(
attr: TokenStream,
) -> Result<Vec<(String, Vec<String>, Vec<TokenStream>)>, (Span, String)>
{
if attr.is_empty()
{
return Err((Span::call_site(), "No substitutions found.".into()));
}
let mut iter = attr.into_iter();
let (idents, span) = validate_short_get_identifiers(&mut iter, Span::call_site())?;
let mut result: Vec<_> = idents
.into_iter()
.map(|(ident, args)| (ident, args, Vec::new()))
.collect();
validate_short_get_all_substitution_goups(iter, span, &mut result)?;
Ok(result)
}
/// Assuming use of the short syntax, gets the initial list of substitution
/// identifiers.
fn validate_short_get_identifiers(
iter: &mut IntoIter,
mut span: Span,
) -> Result<(Vec<(String, Vec<String>)>, Span), (Span, String)>
{
let mut iter = iter.peekable();
let mut result = Vec::new();
loop
{
if let Some(next_token) = next_token(&mut iter, "Expected substitution identifier or ';'.")?
{
span = next_token.span();
match next_token
{
TokenTree::Ident(ident) =>
{
result.push((
ident.to_string(),
validate_short_get_identifier_arguments(&mut iter)?, // Vec::new()
))
},
TokenTree::Punct(p) if is_semicolon(&p) => break,
_ => return Err((span, "Expected substitution identifier or ';'.".into())),
}
}
else
{
return Err((span, "Expected substitution identifier or ';'.".into()));
}
}
Ok((result, span))
}
/// Assuming use of the short syntax, gets the list of identifier arguments.
fn validate_short_get_identifier_arguments(
iter: &mut Peekable<impl Iterator<Item = TokenTree>>,
) -> Result<Vec<String>, (Span, String)>
{
let mut result = Vec::new();
if let Some(token) = iter.peek()
{
if let TokenTree::Group(group) = token
{
if check_delimiter(group).is_ok()
{
let mut arg_iter = group.stream().into_iter();
while let Some(token) = arg_iter.next()
{
if let TokenTree::Ident(ident) = token
{
result.push(ident.to_string());
if let Some(token) = arg_iter.next()
{
match token
{
TokenTree::Punct(punct) if punct_is_char(&punct, ',') => (),
_ => return Err((token.span(), "Expected ','.".into())),
}
}
}
else
{
return Err((
token.span(),
"Expected substitution identifier argument as identifier.".into(),
));
}
}
// Make sure to consume the group
let _ = iter.next();
}
}
}
Ok(result)
}
/// Gets all substitution groups in the short syntax and inserts
/// them into the given vec.
fn validate_short_get_all_substitution_goups<'a>(
iter: impl Iterator<Item = TokenTree>,
mut span: Span,
result: &mut Vec<(String, Vec<String>, Vec<TokenStream>)>,
) -> Result<(), (Span, String)>
{
let mut iter = iter.peekable();
loop
{
if let Some(TokenTree::Punct(p)) = iter.peek()
{
if is_nested_invocation(&p)
{
let p_span = p.span();
// consume '#'
iter.next();
let nested_duplicated = invoke_nested(&mut iter, p_span)?;
validate_short_get_all_substitution_goups(
&mut nested_duplicated.into_iter(),
span.clone(),
result,
)?;
}
}
else
{
validate_short_get_substitutions(
&mut iter,
span,
result.iter_mut().map(|(_, _, vec)| {
vec.push(TokenStream::new());
vec.last_mut().unwrap()
}),
)?;
if let Some(token) = iter.next()
{
span = token.span();
if let TokenTree::Punct(p) = token
{
if is_semicolon(&p)
{
continue;
}
}
return Err((span, "Expected ';'.".into()));
}
else
{
break;
}
}
}
Ok(())
}
/// Extracts a substitution group in the short syntax and inserts it into
/// the elements returned by the given groups iterator.
fn validate_short_get_substitutions<'a>(
iter: &mut impl Iterator<Item = TokenTree>,
mut span: Span,
mut groups: impl Iterator<Item = &'a mut TokenStream>,
) -> Result<Span, (Span, String)>
{
if let Some(token) = iter.next()
{
let group = check_group(token, "")?;
span = group.span();
*groups.next().unwrap() = group.stream();
for stream in groups
{
let group = parse_group(iter, span, "")?;
span = group.span();
*stream = group.stream();
}
}
Ok(span)
}
/// Invokes a nested invocation of duplicate, assuming the
/// next group is the attribute part of the invocation and the
/// group after that is the element.
fn invoke_nested(
iter: &mut impl Iterator<Item = TokenTree>,
span: Span,
) -> Result<TokenStream, (Span, String)>
{
let hints = "Hint: '#' is a nested invocation of the macro and must therefore be followed by \
a group containing the invocation.\nExample:\n#[\n\tidentifier [ substitute1 ] [ \
substitute2 ]\n][\n\tCode to be substituted whenever 'identifier' occurs \n]";
let nested_attr = parse_group(iter, span, hints)?;
let nested_subs = parse_attr(nested_attr.stream(), nested_attr.span())?;
let nested_item = parse_group(iter, nested_attr.span(), hints)?;
Ok(substitute(nested_item.stream(), nested_subs))
} | ) -> Result<SubstitutionGroup, (Span, String)>
{
// Must get span now, before it's corrupted.
let tree_span = tree.span(); | random_line_split |
font.go | // Copyright 2015, Timothy Bogdala <[email protected]>
// See the LICENSE file for more details.
package eweygewey
/*
Based primarily on gltext found at https://github.com/go-gl/gltext
But also based on examples from the freetype-go project:
https://github.com/golang/freetype
This implementation differs in the way the images are rendered and then
copied into an OpenGL texture. In addition to that, this module can
create a renderable 'string' node which is a bunch of polygons with uv's
mapped to the appropriate glyphs.
*/
import (
"fmt"
"image"
"image/color"
"image/draw"
"io/ioutil"
"math"
"os"
mgl "github.com/go-gl/mathgl/mgl32"
ft "github.com/golang/freetype"
"github.com/golang/freetype/truetype"
graphics "github.com/tbogdala/fizzle/graphicsprovider"
imgfont "golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
// runeData stores information pulled from the freetype parsing of glyphs.
type runeData struct {
imgX, imgY int // offset into the image texture for the top left position of rune
advanceWidth, leftSideBearing float32 // HMetric data from glyph
advanceHeight, topSideBearing float32 // VMetric data from glyph
uvMinX, uvMinY float32
uvMaxX, uvMaxY float32
}
// Font contains data regarding a font and the texture that was created
// with the specified set of glyphs. It can then be used to create
// renderable string objects.
type Font struct {
Texture graphics.Texture
TextureSize int
Glyphs string
GlyphHeight float32
GlyphWidth float32
Owner *Manager
locations map[rune]runeData
opts truetype.Options
face imgfont.Face
}
// newFont takes a fontFilepath and uses the Go freetype library to parse it
// and render the specified glyphs to a texture that is then buffered into OpenGL.
func newFont(owner *Manager, fontFilepath string, scaleInt int, glyphs string) (f *Font, e error) {
// Load the font used for UI interaction
fontFile, err := os.Open(fontFilepath)
if err != nil {
return f, fmt.Errorf("Failed to open the font file.\n%v", err)
}
defer fontFile.Close()
// load in the font
fontBytes, err := ioutil.ReadAll(fontFile)
if err != nil {
return f, fmt.Errorf("Failed to load font data from stream.\n%v", err)
}
return newFontBytes(owner, fontBytes, scaleInt, glyphs)
}
// newFontBytes takes a byte slice representing the font and uses the Go freetype library to parse it
// and render the specified glyphs to a texture that is then buffered into OpenGL.
func newFontBytes(owner *Manager, fontBytes []byte, scaleInt int, glyphs string) (f *Font, e error) {
f = new(Font)
scale := fixed.I(scaleInt)
// allocate the location map
f.locations = make(map[rune]runeData)
// parse the truetype font data
ttfData, err := ft.ParseFont(fontBytes)
if err != nil {
return f, fmt.Errorf("Failed to prase the truetype font data.\n%v", err)
}
f.opts.Size = float64(scaleInt)
f.face = truetype.NewFace(ttfData, &f.opts)
// this may have negative components, but get the bounds for the font
glyphBounds := ttfData.Bounds(scale)
// width and height are getting +2 here since the glyph will be buffered by a
// pixel in the texture
glyphDimensions := glyphBounds.Max.Sub(glyphBounds.Min)
glyphWidth := fixedInt26ToFloat(glyphDimensions.X)
glyphHeight := fixedInt26ToFloat(glyphDimensions.Y)
glyphCeilWidth := int(math.Ceil(float64(glyphWidth)))
glyphCeilHeight := int(math.Ceil(float64(glyphHeight)))
// create the buffer image used to draw the glyphs
glyphRect := image.Rect(0, 0, glyphCeilWidth, glyphCeilHeight)
glyphImg := image.NewRGBA(glyphRect)
// calculate the area needed for the font texture
var fontTexSize = 2
minAreaNeeded := (glyphCeilWidth) * (glyphCeilHeight) * len(glyphs)
for (fontTexSize * fontTexSize) < minAreaNeeded {
fontTexSize *= 2
if fontTexSize > 2048 {
return f, fmt.Errorf("Font texture was going to exceed 2048x2048 and that's currently not supported.")
}
}
// create the font image
fontImgRect := image.Rect(0, 0, fontTexSize, fontTexSize)
fontImg := image.NewRGBA(fontImgRect)
// the number of glyphs
fontRowSize := fontTexSize / glyphCeilWidth
// create the freetype context
c := ft.NewContext()
c.SetDPI(72)
c.SetFont(ttfData)
c.SetFontSize(float64(scaleInt))
c.SetClip(glyphImg.Bounds())
c.SetDst(glyphImg)
c.SetSrc(image.White)
// NOTE: always disabled for now since it causes a stack overflow error
//c.SetHinting(imgfont.HintingFull)
var fx, fy int
for _, ch := range glyphs {
index := ttfData.Index(ch)
metricH := ttfData.HMetric(scale, index)
metricV := ttfData.VMetric(scale, index)
fxGW := fx * glyphCeilWidth
fyGH := fy * glyphCeilHeight
f.locations[ch] = runeData{
fxGW, fyGH,
fixedInt26ToFloat(metricH.AdvanceWidth), fixedInt26ToFloat(metricH.LeftSideBearing),
fixedInt26ToFloat(metricV.AdvanceHeight), fixedInt26ToFloat(metricV.TopSideBearing),
float32(fxGW) / float32(fontTexSize), (float32(fyGH) + glyphHeight) / float32(fontTexSize),
(float32(fxGW) + glyphWidth) / float32(fontTexSize), float32(fyGH) / float32(fontTexSize),
}
pt := ft.Pt(1, 1+int(c.PointToFixed(float64(scaleInt))>>6))
_, err := c.DrawString(string(ch), pt)
if err != nil {
return f, fmt.Errorf("Freetype returned an error while drawing a glyph: %v.", err)
}
// copy the glyph image into the font image
for subY := 0; subY < glyphCeilHeight; subY++ {
for subX := 0; subX < glyphCeilWidth; subX++ {
glyphRGBA := glyphImg.RGBAAt(subX, subY)
fontImg.SetRGBA((fxGW)+subX, (fyGH)+subY, glyphRGBA)
}
}
// erase the glyph image buffer
draw.Draw(glyphImg, glyphImg.Bounds(), image.Transparent, image.ZP, draw.Src)
// adjust the pointers into the font image
fx++
if fx > fontRowSize {
fx = 0
fy++
}
}
// set the white point
fontImg.SetRGBA(fontTexSize-1, fontTexSize-1, color.RGBA{R: 255, G: 255, B: 255, A: 255})
// buffer the font image into an OpenGL texture
f.Glyphs = glyphs
f.TextureSize = fontTexSize
f.GlyphWidth = glyphWidth
f.GlyphHeight = glyphHeight
f.Owner = owner
f.Texture = f.loadRGBAToTexture(fontImg.Pix, int32(fontImg.Rect.Max.X))
return
}
// Destroy releases the OpenGL texture for the font.
func (f *Font) Destroy() {
f.Owner.gfx.DeleteTexture(f.Texture)
}
// GetCurrentScale returns the scale value for the font based on the current
// Manager's resolution vs the resolution the UI was designed for.
func (f *Font) GetCurrentScale() float32 {
_, uiHeight := f.Owner.GetResolution()
designHeight := f.Owner.GetDesignHeight()
return float32(uiHeight) / float32(designHeight)
}
// GetRenderSize returns the width and height necessary in pixels for the
// font to display a string. The third return value is the advance height the string.
func (f *Font) GetRenderSize(msg string) (float32, float32, float32) {
var w, h float32
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for _, ch := range msg {
bounds, _, _ := f.face.GlyphBounds(ch)
glyphDimensions := bounds.Max.Sub(bounds.Min)
adv, _ := f.face.GlyphAdvance(ch)
w += fixedInt26ToFloat(adv)
glyphDYf := fixedInt26ToFloat(glyphDimensions.Y)
if h < glyphDYf {
h = glyphDYf
}
}
metrics := f.face.Metrics()
advH := fixedInt26ToFloat(metrics.Ascent)
return w * fontScale, h * fontScale, advH * fontScale
}
// OffsetFloor returns the maximum width offset that will fit between characters that
// is still smaller than the offset passed in.
func (f *Font) OffsetFloor(msg string, offset float32) float32 {
var w float32
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for _, ch := range msg {
adv, ok := f.face.GlyphAdvance(ch)
if !ok {
fmt.Printf("ERROR on glyphadvance for %c!\n", ch)
}
advf := fixedInt26ToFloat(adv)
// break if we go over the distance
if w+advf > offset {
break
}
w += advf
}
return w * fontScale
}
// OffsetForIndex returns the width offset that will fit just before the `stopIndex`
// number character in the msg.
func (f *Font) OffsetForIndex(msg string, stopIndex int) float32 {
return f.OffsetForIndexAdv(msg, 0, stopIndex)
}
// OffsetForIndexAdv returns the width offset that will fit just before the `stopIndex`
// number character in the msg, starting at charStartIndex.
func (f *Font) OffsetForIndexAdv(msg string, charStartIndex int, stopIndex int) float32 {
var w float32
// sanity test the input
if len(msg) < 1 {
return 0.0
}
if charStartIndex > stopIndex {
return 0.0
}
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for i, ch := range msg[charStartIndex:] {
// calculate up to the stopIndex but do not include it
if i+charStartIndex >= stopIndex {
break
}
adv, _ := f.face.GlyphAdvance(ch)
w += fixedInt26ToFloat(adv)
}
return w * fontScale
}
// fixedInt26ToFloat converts a fixed int 26:6 precision to a float32.
func fixedInt26ToFloat(fixedInt fixed.Int26_6) float32 {
var result float32
i := int32(fixedInt)
result += float32(i >> 6)
result += float32(i&0x003F) / float32(64.0)
return result
}
// TextRenderData is a structure containing the raw OpenGL VBO data needed
// to render a text string for a given texture.
type TextRenderData struct {
ComboBuffer []float32 // the combo VBO data (vert/uv/color)
IndexBuffer []uint32 // the element index VBO data
Faces uint32 // the number of faces in the text string
Width float32 // the width in pixels of the text string
Height float32 // the height in pixels of the text string
AdvanceHeight float32 // the amount of pixels to move the pen in the verticle direction
CursorOverflowRight bool // whether or not the cursor was too far to the right for string width
}
// CreateText makes a new renderable object from the supplied string
// using the data in the font. The data is returned as a TextRenderData object.
func (f *Font) CreateText(pos mgl.Vec3, color mgl.Vec4, msg string) TextRenderData {
return f.CreateTextAdv(pos, color, -1.0, -1, -1, msg)
}
// CreateTextAdv makes a new renderable object from the supplied string
// using the data in the font. The string returned will be the maximum amount of the msg that fits
// the specified maxWidth (if greater than 0.0) starting at the charOffset specified.
// The data is returned as a TextRenderData object.
func (f *Font) CreateTextAdv(pos mgl.Vec3, color mgl.Vec4, maxWidth float32, charOffset int, cursorPosition int, msg string) TextRenderData {
// this is the texture ID of the font to use in the shader; by default
// the library always binds the font to the first texture sampler.
const floatTexturePosition = 0.0
// sanity checks
originalLen := len(msg)
trimmedMsg := msg
if originalLen == 0 {
return TextRenderData{
ComboBuffer: nil,
IndexBuffer: nil,
Faces: 0,
Width: 0.0,
Height: 0.0,
AdvanceHeight: 0.0,
CursorOverflowRight: false,
}
}
if charOffset > 0 && charOffset < originalLen {
// trim the string based on incoming character offset
trimmedMsg = trimmedMsg[charOffset:]
}
// get the length of our message
msgLength := len(trimmedMsg)
// create the arrays to hold the data to buffer to OpenGL
comboBuffer := make([]float32, 0, msgLength*(2+2+4)*4) // pos, uv, color4
indexBuffer := make([]uint32, 0, msgLength*6) // two faces * three indexes
// do a preliminary test to see how much room the message will take up
dimX, dimY, advH := f.GetRenderSize(trimmedMsg)
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
// loop through the message
var totalChars = 0
var scaledSize float32 = 0.0
var cursorOverflowRight bool
var penX = pos[0]
var penY = pos[1] - float32(advH)
for chi, ch := range trimmedMsg {
// get the rune data
chData := f.locations[ch]
/*
bounds, _, _ := f.face.GlyphBounds(ch)
glyphD := bounds.Max.Sub(bounds.Min)
glyphAdvW, _ := f.face.GlyphAdvance(ch)
metrics := f.face.Metrics()
glyphAdvH := float32(metrics.Ascent.Round())
glyphH := float32(glyphD.Y.Round())
glyphW := float32(glyphD.X.Round())
advHeight := glyphAdvH
advWidth := float32(glyphAdvW.Round())
*/
glyphH := f.GlyphHeight
glyphW := f.GlyphWidth
advHeight := chData.advanceHeight
advWidth := chData.advanceWidth
// possibly stop here if we're going to overflow the max width
if maxWidth > 0.0 && scaledSize+(advWidth*fontScale) > maxWidth {
// we overflowed the size of the string, now check to see if
// the cursor position is covered within this string or if that hasn't
// been reached yet.
if cursorPosition >= 0 && cursorPosition-charOffset > chi {
cursorOverflowRight = true
}
// adjust the dimX here since we shortened the string
dimX = scaledSize
break
}
scaledSize += advWidth * fontScale
// setup the coordinates for ther vetexes
x0 := penX
y0 := penY - (glyphH-advHeight)*fontScale
x1 := x0 + glyphW*fontScale
y1 := y0 + glyphH*fontScale
s0 := chData.uvMinX
t0 := chData.uvMinY
s1 := chData.uvMaxX
t1 := chData.uvMaxY
// set the vertex data
comboBuffer = append(comboBuffer, x1)
comboBuffer = append(comboBuffer, y0)
comboBuffer = append(comboBuffer, s1)
comboBuffer = append(comboBuffer, t0)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x1)
comboBuffer = append(comboBuffer, y1)
comboBuffer = append(comboBuffer, s1)
comboBuffer = append(comboBuffer, t1)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x0)
comboBuffer = append(comboBuffer, y1)
comboBuffer = append(comboBuffer, s0)
comboBuffer = append(comboBuffer, t1)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x0)
comboBuffer = append(comboBuffer, y0)
comboBuffer = append(comboBuffer, s0)
comboBuffer = append(comboBuffer, t0)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
startIndex := uint32(chi) * 4
indexBuffer = append(indexBuffer, startIndex)
indexBuffer = append(indexBuffer, startIndex+1)
indexBuffer = append(indexBuffer, startIndex+2)
indexBuffer = append(indexBuffer, startIndex+2)
indexBuffer = append(indexBuffer, startIndex+3)
indexBuffer = append(indexBuffer, startIndex)
// advance the pen
penX += advWidth * fontScale
totalChars++
}
return TextRenderData{
ComboBuffer: comboBuffer,
IndexBuffer: indexBuffer,
Faces: uint32(totalChars * 2),
Width: float32(dimX),
Height: float32(dimY),
AdvanceHeight: float32(advH),
CursorOverflowRight: cursorOverflowRight,
}
}
// loadRGBAToTexture takes a byte slice and throws it into an OpenGL texture.
func (f *Font) loadRGBAToTexture(rgba []byte, imageSize int32) graphics.Texture |
// loadRGBAToTextureExt takes a byte slice and throws it into an OpenGL texture.
func (f *Font) loadRGBAToTextureExt(rgba []byte, imageSize, magFilter, minFilter, wrapS, wrapT int32) graphics.Texture {
tex := f.Owner.gfx.GenTexture()
f.Owner.gfx.ActiveTexture(graphics.TEXTURE0)
f.Owner.gfx.BindTexture(graphics.TEXTURE_2D, tex)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_MAG_FILTER, magFilter)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_MIN_FILTER, minFilter)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_WRAP_S, wrapS)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_WRAP_T, wrapT)
f.Owner.gfx.TexImage2D(graphics.TEXTURE_2D, 0, graphics.RGBA, imageSize, imageSize, 0, graphics.RGBA, graphics.UNSIGNED_BYTE, f.Owner.gfx.Ptr(rgba), len(rgba))
return tex
}
| {
return f.loadRGBAToTextureExt(rgba, imageSize, graphics.LINEAR, graphics.LINEAR, graphics.CLAMP_TO_EDGE, graphics.CLAMP_TO_EDGE)
} | identifier_body |
font.go | // Copyright 2015, Timothy Bogdala <[email protected]>
// See the LICENSE file for more details.
package eweygewey
/*
Based primarily on gltext found at https://github.com/go-gl/gltext
But also based on examples from the freetype-go project:
https://github.com/golang/freetype
This implementation differs in the way the images are rendered and then
copied into an OpenGL texture. In addition to that, this module can
create a renderable 'string' node which is a bunch of polygons with uv's
mapped to the appropriate glyphs.
*/
import (
"fmt"
"image"
"image/color"
"image/draw"
"io/ioutil"
"math"
"os"
mgl "github.com/go-gl/mathgl/mgl32"
ft "github.com/golang/freetype"
"github.com/golang/freetype/truetype"
graphics "github.com/tbogdala/fizzle/graphicsprovider"
imgfont "golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
// runeData stores information pulled from the freetype parsing of glyphs.
type runeData struct {
imgX, imgY int // offset into the image texture for the top left position of rune
advanceWidth, leftSideBearing float32 // HMetric data from glyph
advanceHeight, topSideBearing float32 // VMetric data from glyph
uvMinX, uvMinY float32
uvMaxX, uvMaxY float32
}
// Font contains data regarding a font and the texture that was created
// with the specified set of glyphs. It can then be used to create
// renderable string objects.
type Font struct {
Texture graphics.Texture
TextureSize int
Glyphs string
GlyphHeight float32
GlyphWidth float32
Owner *Manager
locations map[rune]runeData
opts truetype.Options
face imgfont.Face
}
// newFont takes a fontFilepath and uses the Go freetype library to parse it
// and render the specified glyphs to a texture that is then buffered into OpenGL.
func newFont(owner *Manager, fontFilepath string, scaleInt int, glyphs string) (f *Font, e error) {
// Load the font used for UI interaction
fontFile, err := os.Open(fontFilepath)
if err != nil {
return f, fmt.Errorf("Failed to open the font file.\n%v", err)
}
defer fontFile.Close()
// load in the font
fontBytes, err := ioutil.ReadAll(fontFile)
if err != nil {
return f, fmt.Errorf("Failed to load font data from stream.\n%v", err)
}
return newFontBytes(owner, fontBytes, scaleInt, glyphs)
}
// newFontBytes takes a byte slice representing the font and uses the Go freetype library to parse it
// and render the specified glyphs to a texture that is then buffered into OpenGL.
func newFontBytes(owner *Manager, fontBytes []byte, scaleInt int, glyphs string) (f *Font, e error) {
f = new(Font)
scale := fixed.I(scaleInt)
// allocate the location map
f.locations = make(map[rune]runeData)
// parse the truetype font data
ttfData, err := ft.ParseFont(fontBytes)
if err != nil {
return f, fmt.Errorf("Failed to prase the truetype font data.\n%v", err)
}
f.opts.Size = float64(scaleInt)
f.face = truetype.NewFace(ttfData, &f.opts)
// this may have negative components, but get the bounds for the font
glyphBounds := ttfData.Bounds(scale)
// width and height are getting +2 here since the glyph will be buffered by a
// pixel in the texture
glyphDimensions := glyphBounds.Max.Sub(glyphBounds.Min)
glyphWidth := fixedInt26ToFloat(glyphDimensions.X)
glyphHeight := fixedInt26ToFloat(glyphDimensions.Y)
glyphCeilWidth := int(math.Ceil(float64(glyphWidth)))
glyphCeilHeight := int(math.Ceil(float64(glyphHeight)))
// create the buffer image used to draw the glyphs
glyphRect := image.Rect(0, 0, glyphCeilWidth, glyphCeilHeight)
glyphImg := image.NewRGBA(glyphRect)
// calculate the area needed for the font texture
var fontTexSize = 2
minAreaNeeded := (glyphCeilWidth) * (glyphCeilHeight) * len(glyphs)
for (fontTexSize * fontTexSize) < minAreaNeeded {
fontTexSize *= 2
if fontTexSize > 2048 {
return f, fmt.Errorf("Font texture was going to exceed 2048x2048 and that's currently not supported.")
}
}
// create the font image
fontImgRect := image.Rect(0, 0, fontTexSize, fontTexSize)
fontImg := image.NewRGBA(fontImgRect)
// the number of glyphs
fontRowSize := fontTexSize / glyphCeilWidth
// create the freetype context
c := ft.NewContext()
c.SetDPI(72)
c.SetFont(ttfData)
c.SetFontSize(float64(scaleInt))
c.SetClip(glyphImg.Bounds())
c.SetDst(glyphImg)
c.SetSrc(image.White)
// NOTE: always disabled for now since it causes a stack overflow error
//c.SetHinting(imgfont.HintingFull)
var fx, fy int
for _, ch := range glyphs {
index := ttfData.Index(ch)
metricH := ttfData.HMetric(scale, index)
metricV := ttfData.VMetric(scale, index)
fxGW := fx * glyphCeilWidth
fyGH := fy * glyphCeilHeight
f.locations[ch] = runeData{
fxGW, fyGH,
fixedInt26ToFloat(metricH.AdvanceWidth), fixedInt26ToFloat(metricH.LeftSideBearing),
fixedInt26ToFloat(metricV.AdvanceHeight), fixedInt26ToFloat(metricV.TopSideBearing),
float32(fxGW) / float32(fontTexSize), (float32(fyGH) + glyphHeight) / float32(fontTexSize),
(float32(fxGW) + glyphWidth) / float32(fontTexSize), float32(fyGH) / float32(fontTexSize),
}
pt := ft.Pt(1, 1+int(c.PointToFixed(float64(scaleInt))>>6))
_, err := c.DrawString(string(ch), pt)
if err != nil {
return f, fmt.Errorf("Freetype returned an error while drawing a glyph: %v.", err)
}
// copy the glyph image into the font image
for subY := 0; subY < glyphCeilHeight; subY++ {
for subX := 0; subX < glyphCeilWidth; subX++ {
glyphRGBA := glyphImg.RGBAAt(subX, subY)
fontImg.SetRGBA((fxGW)+subX, (fyGH)+subY, glyphRGBA)
}
}
// erase the glyph image buffer
draw.Draw(glyphImg, glyphImg.Bounds(), image.Transparent, image.ZP, draw.Src)
// adjust the pointers into the font image
fx++
if fx > fontRowSize {
fx = 0
fy++
}
}
// set the white point
fontImg.SetRGBA(fontTexSize-1, fontTexSize-1, color.RGBA{R: 255, G: 255, B: 255, A: 255})
// buffer the font image into an OpenGL texture
f.Glyphs = glyphs
f.TextureSize = fontTexSize
f.GlyphWidth = glyphWidth
f.GlyphHeight = glyphHeight
f.Owner = owner
f.Texture = f.loadRGBAToTexture(fontImg.Pix, int32(fontImg.Rect.Max.X))
return
}
// Destroy releases the OpenGL texture for the font.
func (f *Font) Destroy() {
f.Owner.gfx.DeleteTexture(f.Texture)
}
// GetCurrentScale returns the scale value for the font based on the current
// Manager's resolution vs the resolution the UI was designed for.
func (f *Font) GetCurrentScale() float32 {
_, uiHeight := f.Owner.GetResolution()
designHeight := f.Owner.GetDesignHeight()
return float32(uiHeight) / float32(designHeight)
}
// GetRenderSize returns the width and height necessary in pixels for the
// font to display a string. The third return value is the advance height the string.
func (f *Font) GetRenderSize(msg string) (float32, float32, float32) {
var w, h float32
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for _, ch := range msg {
bounds, _, _ := f.face.GlyphBounds(ch)
glyphDimensions := bounds.Max.Sub(bounds.Min)
adv, _ := f.face.GlyphAdvance(ch)
w += fixedInt26ToFloat(adv)
glyphDYf := fixedInt26ToFloat(glyphDimensions.Y)
if h < glyphDYf {
h = glyphDYf
}
}
metrics := f.face.Metrics()
advH := fixedInt26ToFloat(metrics.Ascent)
return w * fontScale, h * fontScale, advH * fontScale
}
// OffsetFloor returns the maximum width offset that will fit between characters that
// is still smaller than the offset passed in.
func (f *Font) OffsetFloor(msg string, offset float32) float32 {
var w float32
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for _, ch := range msg {
adv, ok := f.face.GlyphAdvance(ch)
if !ok {
fmt.Printf("ERROR on glyphadvance for %c!\n", ch)
}
advf := fixedInt26ToFloat(adv)
// break if we go over the distance
if w+advf > offset {
break
}
w += advf
}
return w * fontScale
}
// OffsetForIndex returns the width offset that will fit just before the `stopIndex`
// number character in the msg.
func (f *Font) OffsetForIndex(msg string, stopIndex int) float32 {
return f.OffsetForIndexAdv(msg, 0, stopIndex)
}
// OffsetForIndexAdv returns the width offset that will fit just before the `stopIndex`
// number character in the msg, starting at charStartIndex.
func (f *Font) OffsetForIndexAdv(msg string, charStartIndex int, stopIndex int) float32 {
var w float32
// sanity test the input
if len(msg) < 1 {
return 0.0
}
if charStartIndex > stopIndex {
return 0.0
}
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for i, ch := range msg[charStartIndex:] {
// calculate up to the stopIndex but do not include it
if i+charStartIndex >= stopIndex {
break
}
adv, _ := f.face.GlyphAdvance(ch)
w += fixedInt26ToFloat(adv)
}
return w * fontScale
}
// fixedInt26ToFloat converts a fixed int 26:6 precision to a float32.
func | (fixedInt fixed.Int26_6) float32 {
var result float32
i := int32(fixedInt)
result += float32(i >> 6)
result += float32(i&0x003F) / float32(64.0)
return result
}
// TextRenderData is a structure containing the raw OpenGL VBO data needed
// to render a text string for a given texture.
type TextRenderData struct {
ComboBuffer []float32 // the combo VBO data (vert/uv/color)
IndexBuffer []uint32 // the element index VBO data
Faces uint32 // the number of faces in the text string
Width float32 // the width in pixels of the text string
Height float32 // the height in pixels of the text string
AdvanceHeight float32 // the amount of pixels to move the pen in the verticle direction
CursorOverflowRight bool // whether or not the cursor was too far to the right for string width
}
// CreateText makes a new renderable object from the supplied string
// using the data in the font. The data is returned as a TextRenderData object.
func (f *Font) CreateText(pos mgl.Vec3, color mgl.Vec4, msg string) TextRenderData {
return f.CreateTextAdv(pos, color, -1.0, -1, -1, msg)
}
// CreateTextAdv makes a new renderable object from the supplied string
// using the data in the font. The string returned will be the maximum amount of the msg that fits
// the specified maxWidth (if greater than 0.0) starting at the charOffset specified.
// The data is returned as a TextRenderData object.
func (f *Font) CreateTextAdv(pos mgl.Vec3, color mgl.Vec4, maxWidth float32, charOffset int, cursorPosition int, msg string) TextRenderData {
// this is the texture ID of the font to use in the shader; by default
// the library always binds the font to the first texture sampler.
const floatTexturePosition = 0.0
// sanity checks
originalLen := len(msg)
trimmedMsg := msg
if originalLen == 0 {
return TextRenderData{
ComboBuffer: nil,
IndexBuffer: nil,
Faces: 0,
Width: 0.0,
Height: 0.0,
AdvanceHeight: 0.0,
CursorOverflowRight: false,
}
}
if charOffset > 0 && charOffset < originalLen {
// trim the string based on incoming character offset
trimmedMsg = trimmedMsg[charOffset:]
}
// get the length of our message
msgLength := len(trimmedMsg)
// create the arrays to hold the data to buffer to OpenGL
comboBuffer := make([]float32, 0, msgLength*(2+2+4)*4) // pos, uv, color4
indexBuffer := make([]uint32, 0, msgLength*6) // two faces * three indexes
// do a preliminary test to see how much room the message will take up
dimX, dimY, advH := f.GetRenderSize(trimmedMsg)
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
// loop through the message
var totalChars = 0
var scaledSize float32 = 0.0
var cursorOverflowRight bool
var penX = pos[0]
var penY = pos[1] - float32(advH)
for chi, ch := range trimmedMsg {
// get the rune data
chData := f.locations[ch]
/*
bounds, _, _ := f.face.GlyphBounds(ch)
glyphD := bounds.Max.Sub(bounds.Min)
glyphAdvW, _ := f.face.GlyphAdvance(ch)
metrics := f.face.Metrics()
glyphAdvH := float32(metrics.Ascent.Round())
glyphH := float32(glyphD.Y.Round())
glyphW := float32(glyphD.X.Round())
advHeight := glyphAdvH
advWidth := float32(glyphAdvW.Round())
*/
glyphH := f.GlyphHeight
glyphW := f.GlyphWidth
advHeight := chData.advanceHeight
advWidth := chData.advanceWidth
// possibly stop here if we're going to overflow the max width
if maxWidth > 0.0 && scaledSize+(advWidth*fontScale) > maxWidth {
// we overflowed the size of the string, now check to see if
// the cursor position is covered within this string or if that hasn't
// been reached yet.
if cursorPosition >= 0 && cursorPosition-charOffset > chi {
cursorOverflowRight = true
}
// adjust the dimX here since we shortened the string
dimX = scaledSize
break
}
scaledSize += advWidth * fontScale
// setup the coordinates for ther vetexes
x0 := penX
y0 := penY - (glyphH-advHeight)*fontScale
x1 := x0 + glyphW*fontScale
y1 := y0 + glyphH*fontScale
s0 := chData.uvMinX
t0 := chData.uvMinY
s1 := chData.uvMaxX
t1 := chData.uvMaxY
// set the vertex data
comboBuffer = append(comboBuffer, x1)
comboBuffer = append(comboBuffer, y0)
comboBuffer = append(comboBuffer, s1)
comboBuffer = append(comboBuffer, t0)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x1)
comboBuffer = append(comboBuffer, y1)
comboBuffer = append(comboBuffer, s1)
comboBuffer = append(comboBuffer, t1)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x0)
comboBuffer = append(comboBuffer, y1)
comboBuffer = append(comboBuffer, s0)
comboBuffer = append(comboBuffer, t1)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x0)
comboBuffer = append(comboBuffer, y0)
comboBuffer = append(comboBuffer, s0)
comboBuffer = append(comboBuffer, t0)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
startIndex := uint32(chi) * 4
indexBuffer = append(indexBuffer, startIndex)
indexBuffer = append(indexBuffer, startIndex+1)
indexBuffer = append(indexBuffer, startIndex+2)
indexBuffer = append(indexBuffer, startIndex+2)
indexBuffer = append(indexBuffer, startIndex+3)
indexBuffer = append(indexBuffer, startIndex)
// advance the pen
penX += advWidth * fontScale
totalChars++
}
return TextRenderData{
ComboBuffer: comboBuffer,
IndexBuffer: indexBuffer,
Faces: uint32(totalChars * 2),
Width: float32(dimX),
Height: float32(dimY),
AdvanceHeight: float32(advH),
CursorOverflowRight: cursorOverflowRight,
}
}
// loadRGBAToTexture takes a byte slice and throws it into an OpenGL texture.
func (f *Font) loadRGBAToTexture(rgba []byte, imageSize int32) graphics.Texture {
return f.loadRGBAToTextureExt(rgba, imageSize, graphics.LINEAR, graphics.LINEAR, graphics.CLAMP_TO_EDGE, graphics.CLAMP_TO_EDGE)
}
// loadRGBAToTextureExt takes a byte slice and throws it into an OpenGL texture.
func (f *Font) loadRGBAToTextureExt(rgba []byte, imageSize, magFilter, minFilter, wrapS, wrapT int32) graphics.Texture {
tex := f.Owner.gfx.GenTexture()
f.Owner.gfx.ActiveTexture(graphics.TEXTURE0)
f.Owner.gfx.BindTexture(graphics.TEXTURE_2D, tex)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_MAG_FILTER, magFilter)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_MIN_FILTER, minFilter)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_WRAP_S, wrapS)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_WRAP_T, wrapT)
f.Owner.gfx.TexImage2D(graphics.TEXTURE_2D, 0, graphics.RGBA, imageSize, imageSize, 0, graphics.RGBA, graphics.UNSIGNED_BYTE, f.Owner.gfx.Ptr(rgba), len(rgba))
return tex
}
| fixedInt26ToFloat | identifier_name |
font.go | // Copyright 2015, Timothy Bogdala <[email protected]>
// See the LICENSE file for more details.
package eweygewey
/*
Based primarily on gltext found at https://github.com/go-gl/gltext
But also based on examples from the freetype-go project:
https://github.com/golang/freetype
This implementation differs in the way the images are rendered and then
copied into an OpenGL texture. In addition to that, this module can
create a renderable 'string' node which is a bunch of polygons with uv's
mapped to the appropriate glyphs.
*/
import (
"fmt"
"image"
"image/color"
"image/draw"
"io/ioutil"
"math"
"os"
mgl "github.com/go-gl/mathgl/mgl32"
ft "github.com/golang/freetype"
"github.com/golang/freetype/truetype"
graphics "github.com/tbogdala/fizzle/graphicsprovider"
imgfont "golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
// runeData stores information pulled from the freetype parsing of glyphs.
type runeData struct {
imgX, imgY int // offset into the image texture for the top left position of rune
advanceWidth, leftSideBearing float32 // HMetric data from glyph
advanceHeight, topSideBearing float32 // VMetric data from glyph
uvMinX, uvMinY float32
uvMaxX, uvMaxY float32
}
// Font contains data regarding a font and the texture that was created
// with the specified set of glyphs. It can then be used to create
// renderable string objects.
type Font struct {
Texture graphics.Texture
TextureSize int
Glyphs string
GlyphHeight float32
GlyphWidth float32
Owner *Manager
locations map[rune]runeData
opts truetype.Options
face imgfont.Face
}
// newFont takes a fontFilepath and uses the Go freetype library to parse it
// and render the specified glyphs to a texture that is then buffered into OpenGL.
func newFont(owner *Manager, fontFilepath string, scaleInt int, glyphs string) (f *Font, e error) {
// Load the font used for UI interaction
fontFile, err := os.Open(fontFilepath)
if err != nil {
return f, fmt.Errorf("Failed to open the font file.\n%v", err)
}
defer fontFile.Close()
// load in the font
fontBytes, err := ioutil.ReadAll(fontFile)
if err != nil {
return f, fmt.Errorf("Failed to load font data from stream.\n%v", err)
}
return newFontBytes(owner, fontBytes, scaleInt, glyphs)
}
// newFontBytes takes a byte slice representing the font and uses the Go freetype library to parse it
// and render the specified glyphs to a texture that is then buffered into OpenGL.
func newFontBytes(owner *Manager, fontBytes []byte, scaleInt int, glyphs string) (f *Font, e error) {
f = new(Font)
scale := fixed.I(scaleInt)
// allocate the location map
f.locations = make(map[rune]runeData)
// parse the truetype font data
ttfData, err := ft.ParseFont(fontBytes)
if err != nil {
return f, fmt.Errorf("Failed to prase the truetype font data.\n%v", err)
}
f.opts.Size = float64(scaleInt)
f.face = truetype.NewFace(ttfData, &f.opts)
// this may have negative components, but get the bounds for the font
glyphBounds := ttfData.Bounds(scale)
// width and height are getting +2 here since the glyph will be buffered by a
// pixel in the texture
glyphDimensions := glyphBounds.Max.Sub(glyphBounds.Min)
glyphWidth := fixedInt26ToFloat(glyphDimensions.X)
glyphHeight := fixedInt26ToFloat(glyphDimensions.Y)
glyphCeilWidth := int(math.Ceil(float64(glyphWidth)))
glyphCeilHeight := int(math.Ceil(float64(glyphHeight)))
// create the buffer image used to draw the glyphs
glyphRect := image.Rect(0, 0, glyphCeilWidth, glyphCeilHeight)
glyphImg := image.NewRGBA(glyphRect)
// calculate the area needed for the font texture
var fontTexSize = 2
minAreaNeeded := (glyphCeilWidth) * (glyphCeilHeight) * len(glyphs)
for (fontTexSize * fontTexSize) < minAreaNeeded {
fontTexSize *= 2
if fontTexSize > 2048 {
return f, fmt.Errorf("Font texture was going to exceed 2048x2048 and that's currently not supported.")
}
}
// create the font image
fontImgRect := image.Rect(0, 0, fontTexSize, fontTexSize)
fontImg := image.NewRGBA(fontImgRect)
// the number of glyphs
fontRowSize := fontTexSize / glyphCeilWidth
// create the freetype context
c := ft.NewContext()
c.SetDPI(72)
c.SetFont(ttfData)
c.SetFontSize(float64(scaleInt))
c.SetClip(glyphImg.Bounds())
c.SetDst(glyphImg)
c.SetSrc(image.White)
// NOTE: always disabled for now since it causes a stack overflow error
//c.SetHinting(imgfont.HintingFull)
var fx, fy int
for _, ch := range glyphs {
index := ttfData.Index(ch)
metricH := ttfData.HMetric(scale, index)
metricV := ttfData.VMetric(scale, index)
fxGW := fx * glyphCeilWidth
fyGH := fy * glyphCeilHeight
f.locations[ch] = runeData{
fxGW, fyGH,
fixedInt26ToFloat(metricH.AdvanceWidth), fixedInt26ToFloat(metricH.LeftSideBearing),
fixedInt26ToFloat(metricV.AdvanceHeight), fixedInt26ToFloat(metricV.TopSideBearing),
float32(fxGW) / float32(fontTexSize), (float32(fyGH) + glyphHeight) / float32(fontTexSize),
(float32(fxGW) + glyphWidth) / float32(fontTexSize), float32(fyGH) / float32(fontTexSize),
}
pt := ft.Pt(1, 1+int(c.PointToFixed(float64(scaleInt))>>6))
_, err := c.DrawString(string(ch), pt)
if err != nil {
return f, fmt.Errorf("Freetype returned an error while drawing a glyph: %v.", err)
}
// copy the glyph image into the font image
for subY := 0; subY < glyphCeilHeight; subY++ {
for subX := 0; subX < glyphCeilWidth; subX++ {
glyphRGBA := glyphImg.RGBAAt(subX, subY)
fontImg.SetRGBA((fxGW)+subX, (fyGH)+subY, glyphRGBA)
}
}
// erase the glyph image buffer
draw.Draw(glyphImg, glyphImg.Bounds(), image.Transparent, image.ZP, draw.Src)
// adjust the pointers into the font image
fx++
if fx > fontRowSize {
fx = 0
fy++
}
}
// set the white point
fontImg.SetRGBA(fontTexSize-1, fontTexSize-1, color.RGBA{R: 255, G: 255, B: 255, A: 255})
// buffer the font image into an OpenGL texture
f.Glyphs = glyphs
f.TextureSize = fontTexSize
f.GlyphWidth = glyphWidth
f.GlyphHeight = glyphHeight
f.Owner = owner
f.Texture = f.loadRGBAToTexture(fontImg.Pix, int32(fontImg.Rect.Max.X))
return
}
// Destroy releases the OpenGL texture for the font.
func (f *Font) Destroy() {
f.Owner.gfx.DeleteTexture(f.Texture)
}
// GetCurrentScale returns the scale value for the font based on the current
// Manager's resolution vs the resolution the UI was designed for.
func (f *Font) GetCurrentScale() float32 {
_, uiHeight := f.Owner.GetResolution()
designHeight := f.Owner.GetDesignHeight()
return float32(uiHeight) / float32(designHeight)
}
// GetRenderSize returns the width and height necessary in pixels for the
// font to display a string. The third return value is the advance height the string.
func (f *Font) GetRenderSize(msg string) (float32, float32, float32) {
var w, h float32
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for _, ch := range msg {
bounds, _, _ := f.face.GlyphBounds(ch)
glyphDimensions := bounds.Max.Sub(bounds.Min)
adv, _ := f.face.GlyphAdvance(ch)
w += fixedInt26ToFloat(adv)
glyphDYf := fixedInt26ToFloat(glyphDimensions.Y)
if h < glyphDYf {
h = glyphDYf
}
}
metrics := f.face.Metrics()
advH := fixedInt26ToFloat(metrics.Ascent)
return w * fontScale, h * fontScale, advH * fontScale
}
// OffsetFloor returns the maximum width offset that will fit between characters that
// is still smaller than the offset passed in.
func (f *Font) OffsetFloor(msg string, offset float32) float32 {
var w float32
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for _, ch := range msg {
adv, ok := f.face.GlyphAdvance(ch)
if !ok {
fmt.Printf("ERROR on glyphadvance for %c!\n", ch)
}
advf := fixedInt26ToFloat(adv)
// break if we go over the distance
if w+advf > offset {
break
}
w += advf
}
return w * fontScale
}
// OffsetForIndex returns the width offset that will fit just before the `stopIndex`
// number character in the msg.
func (f *Font) OffsetForIndex(msg string, stopIndex int) float32 {
return f.OffsetForIndexAdv(msg, 0, stopIndex)
}
// OffsetForIndexAdv returns the width offset that will fit just before the `stopIndex`
// number character in the msg, starting at charStartIndex.
func (f *Font) OffsetForIndexAdv(msg string, charStartIndex int, stopIndex int) float32 {
var w float32
// sanity test the input
if len(msg) < 1 {
return 0.0
}
if charStartIndex > stopIndex {
return 0.0
}
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for i, ch := range msg[charStartIndex:] |
return w * fontScale
}
// fixedInt26ToFloat converts a fixed int 26:6 precision to a float32.
func fixedInt26ToFloat(fixedInt fixed.Int26_6) float32 {
var result float32
i := int32(fixedInt)
result += float32(i >> 6)
result += float32(i&0x003F) / float32(64.0)
return result
}
// TextRenderData is a structure containing the raw OpenGL VBO data needed
// to render a text string for a given texture.
type TextRenderData struct {
ComboBuffer []float32 // the combo VBO data (vert/uv/color)
IndexBuffer []uint32 // the element index VBO data
Faces uint32 // the number of faces in the text string
Width float32 // the width in pixels of the text string
Height float32 // the height in pixels of the text string
AdvanceHeight float32 // the amount of pixels to move the pen in the verticle direction
CursorOverflowRight bool // whether or not the cursor was too far to the right for string width
}
// CreateText makes a new renderable object from the supplied string
// using the data in the font. The data is returned as a TextRenderData object.
func (f *Font) CreateText(pos mgl.Vec3, color mgl.Vec4, msg string) TextRenderData {
return f.CreateTextAdv(pos, color, -1.0, -1, -1, msg)
}
// CreateTextAdv makes a new renderable object from the supplied string
// using the data in the font. The string returned will be the maximum amount of the msg that fits
// the specified maxWidth (if greater than 0.0) starting at the charOffset specified.
// The data is returned as a TextRenderData object.
func (f *Font) CreateTextAdv(pos mgl.Vec3, color mgl.Vec4, maxWidth float32, charOffset int, cursorPosition int, msg string) TextRenderData {
// this is the texture ID of the font to use in the shader; by default
// the library always binds the font to the first texture sampler.
const floatTexturePosition = 0.0
// sanity checks
originalLen := len(msg)
trimmedMsg := msg
if originalLen == 0 {
return TextRenderData{
ComboBuffer: nil,
IndexBuffer: nil,
Faces: 0,
Width: 0.0,
Height: 0.0,
AdvanceHeight: 0.0,
CursorOverflowRight: false,
}
}
if charOffset > 0 && charOffset < originalLen {
// trim the string based on incoming character offset
trimmedMsg = trimmedMsg[charOffset:]
}
// get the length of our message
msgLength := len(trimmedMsg)
// create the arrays to hold the data to buffer to OpenGL
comboBuffer := make([]float32, 0, msgLength*(2+2+4)*4) // pos, uv, color4
indexBuffer := make([]uint32, 0, msgLength*6) // two faces * three indexes
// do a preliminary test to see how much room the message will take up
dimX, dimY, advH := f.GetRenderSize(trimmedMsg)
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
// loop through the message
var totalChars = 0
var scaledSize float32 = 0.0
var cursorOverflowRight bool
var penX = pos[0]
var penY = pos[1] - float32(advH)
for chi, ch := range trimmedMsg {
// get the rune data
chData := f.locations[ch]
/*
bounds, _, _ := f.face.GlyphBounds(ch)
glyphD := bounds.Max.Sub(bounds.Min)
glyphAdvW, _ := f.face.GlyphAdvance(ch)
metrics := f.face.Metrics()
glyphAdvH := float32(metrics.Ascent.Round())
glyphH := float32(glyphD.Y.Round())
glyphW := float32(glyphD.X.Round())
advHeight := glyphAdvH
advWidth := float32(glyphAdvW.Round())
*/
glyphH := f.GlyphHeight
glyphW := f.GlyphWidth
advHeight := chData.advanceHeight
advWidth := chData.advanceWidth
// possibly stop here if we're going to overflow the max width
if maxWidth > 0.0 && scaledSize+(advWidth*fontScale) > maxWidth {
// we overflowed the size of the string, now check to see if
// the cursor position is covered within this string or if that hasn't
// been reached yet.
if cursorPosition >= 0 && cursorPosition-charOffset > chi {
cursorOverflowRight = true
}
// adjust the dimX here since we shortened the string
dimX = scaledSize
break
}
scaledSize += advWidth * fontScale
// setup the coordinates for ther vetexes
x0 := penX
y0 := penY - (glyphH-advHeight)*fontScale
x1 := x0 + glyphW*fontScale
y1 := y0 + glyphH*fontScale
s0 := chData.uvMinX
t0 := chData.uvMinY
s1 := chData.uvMaxX
t1 := chData.uvMaxY
// set the vertex data
comboBuffer = append(comboBuffer, x1)
comboBuffer = append(comboBuffer, y0)
comboBuffer = append(comboBuffer, s1)
comboBuffer = append(comboBuffer, t0)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x1)
comboBuffer = append(comboBuffer, y1)
comboBuffer = append(comboBuffer, s1)
comboBuffer = append(comboBuffer, t1)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x0)
comboBuffer = append(comboBuffer, y1)
comboBuffer = append(comboBuffer, s0)
comboBuffer = append(comboBuffer, t1)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x0)
comboBuffer = append(comboBuffer, y0)
comboBuffer = append(comboBuffer, s0)
comboBuffer = append(comboBuffer, t0)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
startIndex := uint32(chi) * 4
indexBuffer = append(indexBuffer, startIndex)
indexBuffer = append(indexBuffer, startIndex+1)
indexBuffer = append(indexBuffer, startIndex+2)
indexBuffer = append(indexBuffer, startIndex+2)
indexBuffer = append(indexBuffer, startIndex+3)
indexBuffer = append(indexBuffer, startIndex)
// advance the pen
penX += advWidth * fontScale
totalChars++
}
return TextRenderData{
ComboBuffer: comboBuffer,
IndexBuffer: indexBuffer,
Faces: uint32(totalChars * 2),
Width: float32(dimX),
Height: float32(dimY),
AdvanceHeight: float32(advH),
CursorOverflowRight: cursorOverflowRight,
}
}
// loadRGBAToTexture takes a byte slice and throws it into an OpenGL texture.
func (f *Font) loadRGBAToTexture(rgba []byte, imageSize int32) graphics.Texture {
return f.loadRGBAToTextureExt(rgba, imageSize, graphics.LINEAR, graphics.LINEAR, graphics.CLAMP_TO_EDGE, graphics.CLAMP_TO_EDGE)
}
// loadRGBAToTextureExt takes a byte slice and throws it into an OpenGL texture.
func (f *Font) loadRGBAToTextureExt(rgba []byte, imageSize, magFilter, minFilter, wrapS, wrapT int32) graphics.Texture {
tex := f.Owner.gfx.GenTexture()
f.Owner.gfx.ActiveTexture(graphics.TEXTURE0)
f.Owner.gfx.BindTexture(graphics.TEXTURE_2D, tex)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_MAG_FILTER, magFilter)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_MIN_FILTER, minFilter)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_WRAP_S, wrapS)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_WRAP_T, wrapT)
f.Owner.gfx.TexImage2D(graphics.TEXTURE_2D, 0, graphics.RGBA, imageSize, imageSize, 0, graphics.RGBA, graphics.UNSIGNED_BYTE, f.Owner.gfx.Ptr(rgba), len(rgba))
return tex
}
| {
// calculate up to the stopIndex but do not include it
if i+charStartIndex >= stopIndex {
break
}
adv, _ := f.face.GlyphAdvance(ch)
w += fixedInt26ToFloat(adv)
} | conditional_block |
font.go | // Copyright 2015, Timothy Bogdala <[email protected]>
// See the LICENSE file for more details.
package eweygewey
/*
Based primarily on gltext found at https://github.com/go-gl/gltext
But also based on examples from the freetype-go project:
https://github.com/golang/freetype
This implementation differs in the way the images are rendered and then
copied into an OpenGL texture. In addition to that, this module can
create a renderable 'string' node which is a bunch of polygons with uv's
mapped to the appropriate glyphs.
*/
import (
"fmt"
"image"
"image/color"
"image/draw"
"io/ioutil"
"math"
"os"
mgl "github.com/go-gl/mathgl/mgl32"
ft "github.com/golang/freetype"
"github.com/golang/freetype/truetype"
graphics "github.com/tbogdala/fizzle/graphicsprovider"
imgfont "golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
// runeData stores information pulled from the freetype parsing of glyphs.
type runeData struct {
imgX, imgY int // offset into the image texture for the top left position of rune
advanceWidth, leftSideBearing float32 // HMetric data from glyph
advanceHeight, topSideBearing float32 // VMetric data from glyph
uvMinX, uvMinY float32
uvMaxX, uvMaxY float32
}
// Font contains data regarding a font and the texture that was created
// with the specified set of glyphs. It can then be used to create
// renderable string objects.
type Font struct {
Texture graphics.Texture
TextureSize int
Glyphs string
GlyphHeight float32
GlyphWidth float32
Owner *Manager
locations map[rune]runeData
opts truetype.Options
face imgfont.Face
}
// newFont takes a fontFilepath and uses the Go freetype library to parse it
// and render the specified glyphs to a texture that is then buffered into OpenGL.
func newFont(owner *Manager, fontFilepath string, scaleInt int, glyphs string) (f *Font, e error) {
// Load the font used for UI interaction
fontFile, err := os.Open(fontFilepath)
if err != nil {
return f, fmt.Errorf("Failed to open the font file.\n%v", err)
}
defer fontFile.Close()
// load in the font
fontBytes, err := ioutil.ReadAll(fontFile)
if err != nil {
return f, fmt.Errorf("Failed to load font data from stream.\n%v", err)
}
return newFontBytes(owner, fontBytes, scaleInt, glyphs)
}
// newFontBytes takes a byte slice representing the font and uses the Go freetype library to parse it
// and render the specified glyphs to a texture that is then buffered into OpenGL.
func newFontBytes(owner *Manager, fontBytes []byte, scaleInt int, glyphs string) (f *Font, e error) {
f = new(Font)
scale := fixed.I(scaleInt)
// allocate the location map
f.locations = make(map[rune]runeData)
// parse the truetype font data
ttfData, err := ft.ParseFont(fontBytes)
if err != nil {
return f, fmt.Errorf("Failed to prase the truetype font data.\n%v", err)
}
f.opts.Size = float64(scaleInt)
f.face = truetype.NewFace(ttfData, &f.opts)
// this may have negative components, but get the bounds for the font
glyphBounds := ttfData.Bounds(scale)
// width and height are getting +2 here since the glyph will be buffered by a
// pixel in the texture
glyphDimensions := glyphBounds.Max.Sub(glyphBounds.Min)
glyphWidth := fixedInt26ToFloat(glyphDimensions.X)
glyphHeight := fixedInt26ToFloat(glyphDimensions.Y)
glyphCeilWidth := int(math.Ceil(float64(glyphWidth)))
glyphCeilHeight := int(math.Ceil(float64(glyphHeight)))
// create the buffer image used to draw the glyphs
glyphRect := image.Rect(0, 0, glyphCeilWidth, glyphCeilHeight)
glyphImg := image.NewRGBA(glyphRect)
// calculate the area needed for the font texture
var fontTexSize = 2
minAreaNeeded := (glyphCeilWidth) * (glyphCeilHeight) * len(glyphs)
for (fontTexSize * fontTexSize) < minAreaNeeded {
fontTexSize *= 2
if fontTexSize > 2048 {
return f, fmt.Errorf("Font texture was going to exceed 2048x2048 and that's currently not supported.")
}
}
// create the font image
fontImgRect := image.Rect(0, 0, fontTexSize, fontTexSize)
fontImg := image.NewRGBA(fontImgRect)
// the number of glyphs
fontRowSize := fontTexSize / glyphCeilWidth
// create the freetype context
c := ft.NewContext()
c.SetDPI(72)
c.SetFont(ttfData)
c.SetFontSize(float64(scaleInt))
c.SetClip(glyphImg.Bounds())
c.SetDst(glyphImg)
c.SetSrc(image.White)
// NOTE: always disabled for now since it causes a stack overflow error
//c.SetHinting(imgfont.HintingFull)
var fx, fy int
for _, ch := range glyphs {
index := ttfData.Index(ch)
metricH := ttfData.HMetric(scale, index)
metricV := ttfData.VMetric(scale, index)
fxGW := fx * glyphCeilWidth
fyGH := fy * glyphCeilHeight
f.locations[ch] = runeData{
fxGW, fyGH,
fixedInt26ToFloat(metricH.AdvanceWidth), fixedInt26ToFloat(metricH.LeftSideBearing),
fixedInt26ToFloat(metricV.AdvanceHeight), fixedInt26ToFloat(metricV.TopSideBearing),
float32(fxGW) / float32(fontTexSize), (float32(fyGH) + glyphHeight) / float32(fontTexSize),
(float32(fxGW) + glyphWidth) / float32(fontTexSize), float32(fyGH) / float32(fontTexSize),
}
pt := ft.Pt(1, 1+int(c.PointToFixed(float64(scaleInt))>>6))
_, err := c.DrawString(string(ch), pt)
if err != nil {
return f, fmt.Errorf("Freetype returned an error while drawing a glyph: %v.", err)
}
// copy the glyph image into the font image
for subY := 0; subY < glyphCeilHeight; subY++ {
for subX := 0; subX < glyphCeilWidth; subX++ {
glyphRGBA := glyphImg.RGBAAt(subX, subY)
fontImg.SetRGBA((fxGW)+subX, (fyGH)+subY, glyphRGBA)
}
}
// erase the glyph image buffer
draw.Draw(glyphImg, glyphImg.Bounds(), image.Transparent, image.ZP, draw.Src)
// adjust the pointers into the font image
fx++
if fx > fontRowSize {
fx = 0
fy++
}
}
// set the white point
fontImg.SetRGBA(fontTexSize-1, fontTexSize-1, color.RGBA{R: 255, G: 255, B: 255, A: 255})
// buffer the font image into an OpenGL texture
f.Glyphs = glyphs
f.TextureSize = fontTexSize
f.GlyphWidth = glyphWidth
f.GlyphHeight = glyphHeight
f.Owner = owner
f.Texture = f.loadRGBAToTexture(fontImg.Pix, int32(fontImg.Rect.Max.X))
return
}
// Destroy releases the OpenGL texture for the font.
func (f *Font) Destroy() {
f.Owner.gfx.DeleteTexture(f.Texture)
}
// GetCurrentScale returns the scale value for the font based on the current
// Manager's resolution vs the resolution the UI was designed for.
func (f *Font) GetCurrentScale() float32 {
_, uiHeight := f.Owner.GetResolution()
designHeight := f.Owner.GetDesignHeight()
return float32(uiHeight) / float32(designHeight)
}
// GetRenderSize returns the width and height necessary in pixels for the
// font to display a string. The third return value is the advance height the string.
func (f *Font) GetRenderSize(msg string) (float32, float32, float32) {
var w, h float32
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for _, ch := range msg {
bounds, _, _ := f.face.GlyphBounds(ch)
glyphDimensions := bounds.Max.Sub(bounds.Min)
adv, _ := f.face.GlyphAdvance(ch)
w += fixedInt26ToFloat(adv)
glyphDYf := fixedInt26ToFloat(glyphDimensions.Y)
if h < glyphDYf {
h = glyphDYf
}
}
metrics := f.face.Metrics()
advH := fixedInt26ToFloat(metrics.Ascent)
return w * fontScale, h * fontScale, advH * fontScale
}
// OffsetFloor returns the maximum width offset that will fit between characters that
// is still smaller than the offset passed in.
func (f *Font) OffsetFloor(msg string, offset float32) float32 {
var w float32
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for _, ch := range msg {
adv, ok := f.face.GlyphAdvance(ch)
if !ok {
fmt.Printf("ERROR on glyphadvance for %c!\n", ch)
}
advf := fixedInt26ToFloat(adv)
// break if we go over the distance
if w+advf > offset {
break
}
w += advf
} | }
// OffsetForIndex returns the width offset that will fit just before the `stopIndex`
// number character in the msg.
func (f *Font) OffsetForIndex(msg string, stopIndex int) float32 {
return f.OffsetForIndexAdv(msg, 0, stopIndex)
}
// OffsetForIndexAdv returns the width offset that will fit just before the `stopIndex`
// number character in the msg, starting at charStartIndex.
func (f *Font) OffsetForIndexAdv(msg string, charStartIndex int, stopIndex int) float32 {
var w float32
// sanity test the input
if len(msg) < 1 {
return 0.0
}
if charStartIndex > stopIndex {
return 0.0
}
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
for i, ch := range msg[charStartIndex:] {
// calculate up to the stopIndex but do not include it
if i+charStartIndex >= stopIndex {
break
}
adv, _ := f.face.GlyphAdvance(ch)
w += fixedInt26ToFloat(adv)
}
return w * fontScale
}
// fixedInt26ToFloat converts a fixed int 26:6 precision to a float32.
func fixedInt26ToFloat(fixedInt fixed.Int26_6) float32 {
var result float32
i := int32(fixedInt)
result += float32(i >> 6)
result += float32(i&0x003F) / float32(64.0)
return result
}
// TextRenderData is a structure containing the raw OpenGL VBO data needed
// to render a text string for a given texture.
type TextRenderData struct {
ComboBuffer []float32 // the combo VBO data (vert/uv/color)
IndexBuffer []uint32 // the element index VBO data
Faces uint32 // the number of faces in the text string
Width float32 // the width in pixels of the text string
Height float32 // the height in pixels of the text string
AdvanceHeight float32 // the amount of pixels to move the pen in the verticle direction
CursorOverflowRight bool // whether or not the cursor was too far to the right for string width
}
// CreateText makes a new renderable object from the supplied string
// using the data in the font. The data is returned as a TextRenderData object.
func (f *Font) CreateText(pos mgl.Vec3, color mgl.Vec4, msg string) TextRenderData {
return f.CreateTextAdv(pos, color, -1.0, -1, -1, msg)
}
// CreateTextAdv makes a new renderable object from the supplied string
// using the data in the font. The string returned will be the maximum amount of the msg that fits
// the specified maxWidth (if greater than 0.0) starting at the charOffset specified.
// The data is returned as a TextRenderData object.
func (f *Font) CreateTextAdv(pos mgl.Vec3, color mgl.Vec4, maxWidth float32, charOffset int, cursorPosition int, msg string) TextRenderData {
// this is the texture ID of the font to use in the shader; by default
// the library always binds the font to the first texture sampler.
const floatTexturePosition = 0.0
// sanity checks
originalLen := len(msg)
trimmedMsg := msg
if originalLen == 0 {
return TextRenderData{
ComboBuffer: nil,
IndexBuffer: nil,
Faces: 0,
Width: 0.0,
Height: 0.0,
AdvanceHeight: 0.0,
CursorOverflowRight: false,
}
}
if charOffset > 0 && charOffset < originalLen {
// trim the string based on incoming character offset
trimmedMsg = trimmedMsg[charOffset:]
}
// get the length of our message
msgLength := len(trimmedMsg)
// create the arrays to hold the data to buffer to OpenGL
comboBuffer := make([]float32, 0, msgLength*(2+2+4)*4) // pos, uv, color4
indexBuffer := make([]uint32, 0, msgLength*6) // two faces * three indexes
// do a preliminary test to see how much room the message will take up
dimX, dimY, advH := f.GetRenderSize(trimmedMsg)
// see how much to scale the size based on current resolution vs desgin resolution
fontScale := f.GetCurrentScale()
// loop through the message
var totalChars = 0
var scaledSize float32 = 0.0
var cursorOverflowRight bool
var penX = pos[0]
var penY = pos[1] - float32(advH)
for chi, ch := range trimmedMsg {
// get the rune data
chData := f.locations[ch]
/*
bounds, _, _ := f.face.GlyphBounds(ch)
glyphD := bounds.Max.Sub(bounds.Min)
glyphAdvW, _ := f.face.GlyphAdvance(ch)
metrics := f.face.Metrics()
glyphAdvH := float32(metrics.Ascent.Round())
glyphH := float32(glyphD.Y.Round())
glyphW := float32(glyphD.X.Round())
advHeight := glyphAdvH
advWidth := float32(glyphAdvW.Round())
*/
glyphH := f.GlyphHeight
glyphW := f.GlyphWidth
advHeight := chData.advanceHeight
advWidth := chData.advanceWidth
// possibly stop here if we're going to overflow the max width
if maxWidth > 0.0 && scaledSize+(advWidth*fontScale) > maxWidth {
// we overflowed the size of the string, now check to see if
// the cursor position is covered within this string or if that hasn't
// been reached yet.
if cursorPosition >= 0 && cursorPosition-charOffset > chi {
cursorOverflowRight = true
}
// adjust the dimX here since we shortened the string
dimX = scaledSize
break
}
scaledSize += advWidth * fontScale
// setup the coordinates for ther vetexes
x0 := penX
y0 := penY - (glyphH-advHeight)*fontScale
x1 := x0 + glyphW*fontScale
y1 := y0 + glyphH*fontScale
s0 := chData.uvMinX
t0 := chData.uvMinY
s1 := chData.uvMaxX
t1 := chData.uvMaxY
// set the vertex data
comboBuffer = append(comboBuffer, x1)
comboBuffer = append(comboBuffer, y0)
comboBuffer = append(comboBuffer, s1)
comboBuffer = append(comboBuffer, t0)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x1)
comboBuffer = append(comboBuffer, y1)
comboBuffer = append(comboBuffer, s1)
comboBuffer = append(comboBuffer, t1)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x0)
comboBuffer = append(comboBuffer, y1)
comboBuffer = append(comboBuffer, s0)
comboBuffer = append(comboBuffer, t1)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
comboBuffer = append(comboBuffer, x0)
comboBuffer = append(comboBuffer, y0)
comboBuffer = append(comboBuffer, s0)
comboBuffer = append(comboBuffer, t0)
comboBuffer = append(comboBuffer, floatTexturePosition)
comboBuffer = append(comboBuffer, color[:]...)
startIndex := uint32(chi) * 4
indexBuffer = append(indexBuffer, startIndex)
indexBuffer = append(indexBuffer, startIndex+1)
indexBuffer = append(indexBuffer, startIndex+2)
indexBuffer = append(indexBuffer, startIndex+2)
indexBuffer = append(indexBuffer, startIndex+3)
indexBuffer = append(indexBuffer, startIndex)
// advance the pen
penX += advWidth * fontScale
totalChars++
}
return TextRenderData{
ComboBuffer: comboBuffer,
IndexBuffer: indexBuffer,
Faces: uint32(totalChars * 2),
Width: float32(dimX),
Height: float32(dimY),
AdvanceHeight: float32(advH),
CursorOverflowRight: cursorOverflowRight,
}
}
// loadRGBAToTexture takes a byte slice and throws it into an OpenGL texture.
func (f *Font) loadRGBAToTexture(rgba []byte, imageSize int32) graphics.Texture {
return f.loadRGBAToTextureExt(rgba, imageSize, graphics.LINEAR, graphics.LINEAR, graphics.CLAMP_TO_EDGE, graphics.CLAMP_TO_EDGE)
}
// loadRGBAToTextureExt takes a byte slice and throws it into an OpenGL texture.
func (f *Font) loadRGBAToTextureExt(rgba []byte, imageSize, magFilter, minFilter, wrapS, wrapT int32) graphics.Texture {
tex := f.Owner.gfx.GenTexture()
f.Owner.gfx.ActiveTexture(graphics.TEXTURE0)
f.Owner.gfx.BindTexture(graphics.TEXTURE_2D, tex)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_MAG_FILTER, magFilter)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_MIN_FILTER, minFilter)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_WRAP_S, wrapS)
f.Owner.gfx.TexParameteri(graphics.TEXTURE_2D, graphics.TEXTURE_WRAP_T, wrapT)
f.Owner.gfx.TexImage2D(graphics.TEXTURE_2D, 0, graphics.RGBA, imageSize, imageSize, 0, graphics.RGBA, graphics.UNSIGNED_BYTE, f.Owner.gfx.Ptr(rgba), len(rgba))
return tex
} |
return w * fontScale | random_line_split |
component_catalog.py | #
# Copyright 2018-2023 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from logging import Logger
import os
from pathlib import Path
from queue import Empty
from queue import Queue
from threading import Event
from threading import Thread
import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import entrypoints
from jinja2 import Environment
from jinja2 import PackageLoader
from jinja2 import Template
from jupyter_core.paths import jupyter_runtime_dir
from traitlets.config import SingletonConfigurable
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from elyra.metadata.manager import MetadataManager
from elyra.metadata.metadata import Metadata
from elyra.metadata.schemaspaces import ComponentCatalogs
from elyra.pipeline.catalog_connector import ComponentCatalogConnector
from elyra.pipeline.component import Component
from elyra.pipeline.component import ComponentParser
from elyra.pipeline.component_metadata import ComponentCatalogMetadata
from elyra.pipeline.properties import ComponentProperty
from elyra.pipeline.registry import PipelineProcessorRegistry
from elyra.pipeline.runtime_type import RuntimeProcessorType
BLOCKING_TIMEOUT = 0.5
NONBLOCKING_TIMEOUT = 0.10
# Issue warnings if catalog update takes longer than this value in seconds
CATALOG_UPDATE_TIMEOUT = int(os.getenv("ELYRA_CATALOG_UPDATE_TIMEOUT", 15))
# Issue warnings when outstanding worker thread counts exceed this value
WORKER_THREAD_WARNING_THRESHOLD = int(os.getenv("ELYRA_WORKER_THREAD_WARNING_THRESHOLD", 10))
# Define custom type to describe the component cache
ComponentCacheType = Dict[str, Dict[str, Dict[str, Dict[str, Union[Component, str, List[str]]]]]]
class RefreshInProgressError(Exception):
def __init__(self):
super().__init__("A catalog refresh is in progress. Try the request later.")
class RefreshQueue(Queue):
"""Entries are associated with a complete refresh of the Component Cache."""
_refreshing: bool
def __init__(self):
super().__init__()
self._refreshing = False
@property
def refreshing(self) -> bool:
return self._refreshing
@refreshing.setter
def refreshing(self, value: bool) -> None:
self._refreshing = value
def get(self, block: bool = True, timeout: Optional[float] = None):
"""Overrides the superclass method to set the refreshing property to false when empty."""
try:
entry = super().get(block=block, timeout=timeout)
except Empty:
self.refreshing = False
raise
return entry
def put(self, item, block=True, timeout=None):
"""Overrides the superclass method to set the refreshing property to true."""
super().put(item, block=block, timeout=timeout)
self.refreshing = True
class UpdateQueue(Queue):
"""Entries are associated with a single update of the Component Cache.
This class merely exists to distinguish it from the RefreshQueue instance.
"""
pass
class CacheUpdateManager(Thread):
"""
Primary thread for maintaining consistency of the component cache.
The component cache manager maintains the cache queue, whose entries are a
tuple of 'catalog' and 'action'. The 'catalog' is a catalog instance against
which the 'action' is applied. The 'action' is one of 'modify' or 'delete'.
For 'delete' the components of the referenced catalog are removed. For 'modify'
the components of the referenced catalog are inserted or updated (depending on
its prior existence).
"""
def __init__(
self, log: Logger, component_cache: ComponentCacheType, refresh_queue: RefreshQueue, update_queue: UpdateQueue
):
super().__init__()
self.daemon = True
self.name = "CacheUpdateManager"
self.log: Logger = log
self._component_cache: ComponentCacheType = component_cache
self._refresh_queue: RefreshQueue = refresh_queue
self._update_queue: UpdateQueue = update_queue
self._check_refresh_queue = False
self._threads: List[CacheUpdateWorker] = []
self.stop_event: Event = Event() # Set when server process stops
def run(self):
"""Process queue queue entries until server is stopped."""
while not self.stop_event.is_set():
self.manage_cache_tasks()
def manage_cache_tasks(self):
"""
Check the cache queue for a cache update action and start
a corresponding worker thread to complete the update
"""
outstanding_threads = self._has_outstanding_threads()
try:
# Get a task from the cache queue, waiting less if we have active threads.
timeout = NONBLOCKING_TIMEOUT if outstanding_threads else BLOCKING_TIMEOUT
# Toggle between refresh and update queues so as to prevent starvation.
self._check_refresh_queue = not self._check_refresh_queue
if self._check_refresh_queue:
catalog, action = self._refresh_queue.get(timeout=timeout)
else:
catalog, action = self._update_queue.get(timeout=timeout)
except Empty:
# No task exists in the cache queue, proceed to check for thread execution
pass
else:
# Create and start a thread for the task
updater_thread = CacheUpdateWorker(
self._component_cache,
self._refresh_queue if self._check_refresh_queue else self._update_queue,
catalog,
action,
)
updater_thread.start()
queue_clause = "refreshing" if self._check_refresh_queue else "updating"
self.log.debug(f"CacheUpdateWorker {queue_clause} catalog: '{updater_thread.name}', action: '{action}'...")
self._threads.append(updater_thread)
def _has_outstanding_threads(self) -> bool:
"""
Join finished threads and report on long-running threads as needed.
"""
outstanding_threads = False
for thread in self._threads:
# Attempt to join thread within the given amount of time
thread.join(timeout=NONBLOCKING_TIMEOUT)
cumulative_run_time = int(time.time() - thread.task_start_time)
if thread.is_alive():
# Thread is still running (thread join timed out)
outstanding_threads = True
# Report on a long-running thread if CATALOG_UPDATE_TIMEOUT is exceeded
time_since_last_check = int(time.time() - thread.last_warn_time)
if time_since_last_check > CATALOG_UPDATE_TIMEOUT:
thread.last_warn_time = time.time()
self.log.warning(
f"Cache update for catalog '{thread.name}' is still processing "
f"after {cumulative_run_time} seconds ..."
)
else:
self.log.debug(f"CacheUpdateWorker completed for catalog: '{thread.name}', action: '{thread.action}'.")
# Thread has been joined and can be removed from the list
self._threads.remove(thread)
# Mark cache task as complete
thread.queue.task_done()
# Report successful join for threads that have previously logged a
# cache update duration warning
if thread.last_warn_time != thread.task_start_time:
self.log.info(
f"Cache update for catalog '{thread.name}' has "
f"completed after {cumulative_run_time} seconds"
)
if len(self._threads) > WORKER_THREAD_WARNING_THRESHOLD:
self.log.warning(
f"CacheUpdateWorker outstanding threads threshold "
f"({WORKER_THREAD_WARNING_THRESHOLD}) has been exceeded. "
f"{len(self._threads)} threads are outstanding. This may "
f"indicate a possible issue."
)
return outstanding_threads
def is_refreshing(self) -> bool:
return self._refresh_queue.refreshing
def init_refresh(self) -> None:
self._refresh_queue.refreshing = True
def stop(self):
"""Trigger completion of the manager thread."""
self._refresh_queue.refreshing = False
self.stop_event.set()
self.log.debug("CacheUpdateManager stopped.")
class CacheUpdateWorker(Thread):
"""Spawned by the CacheUpdateManager to perform work against the component cache."""
def __init__(
self,
component_cache: ComponentCacheType,
queue: Queue,
catalog: ComponentCatalogMetadata,
action: Optional[str] = None,
):
super().__init__()
self.daemon = True
self.name = catalog.name # Let the name of the thread reflect the catalog being managed
self._component_cache: ComponentCacheType = component_cache
# Task-specific properties
self.queue: Queue = queue
self.catalog: ComponentCatalogMetadata = catalog
self.action: str = action
# Thread metadata
self.task_start_time = time.time()
self.last_warn_time = self.task_start_time
# Prepare component cache for modification
runtime_type = None
if self.catalog.metadata:
runtime_type = self.catalog.runtime_type.name
self.prepare_cache_for_catalog(runtime_type)
def run(self):
"""Apply the relative action to the given catalog entry in the cache."""
if self.action == "delete":
# Check all runtime types in cache for an entry of the given name.
# If found, remove only the components from this catalog
for runtime_type in self._component_cache:
if self.catalog.name in self._component_cache[runtime_type]:
self._component_cache[runtime_type].pop(self.catalog.name, None)
break
else: # 'modify' - replace (or add) components from the given catalog an update its status
runtime_type = self.catalog.runtime_type.name
catalog_state = self._component_cache[runtime_type][self.catalog.name].get("status")
try:
# Replace all components for the given catalog
self._component_cache[runtime_type][self.catalog.name][
"components"
] = ComponentCache.instance().read_component_catalog(self.catalog)
catalog_state["state"] = "current"
catalog_state["errors"] = [] # reset any errors that may have been present
except Exception as e:
# Update state with an 'error' action and the relevant message
catalog_state["state"] = "error"
catalog_state["errors"].append(str(e))
def prepare_cache_for_catalog(self, runtime_type: Optional[str] = None):
"""
Add entries to the component cache for the runtime type and/or catalog
of focus for this thread, and set the catalog state to 'updating'.
"""
if self.action == "delete":
# On 'delete' the runtime_type parameter will be None and since catalog names
# are essentially unique across runtime types, we can break out of this loop
# on first occurrence and let _that_ runtime type be used in the following code.
for runtime_type in self._component_cache:
if self.catalog.name in self._component_cache[runtime_type]:
break
# Add sub-dictionary for this runtime type if not present
if not self._component_cache.get(runtime_type):
self._component_cache[runtime_type] = {}
# Add sub-dictionary for this catalog if not present - this will occur when
# a catalog instance is created, so we're essentially adding a placeholder.
if not self._component_cache[runtime_type].get(self.catalog.name):
self._component_cache[runtime_type][self.catalog.name] = {
"components": {},
"status": {"state": "updating", "errors": []},
}
else: # Set state to 'updating' for an existing entry
self._component_cache[runtime_type][self.catalog.name]["status"]["state"] = "updating"
class ComponentCache(SingletonConfigurable):
"""Represents the cache of component definitions indexed by runtime-type, then by catalog name."""
# The component_cache is indexed at the top level by runtime type name, e.g. 'APACHE_AIRFLOW',
# and has as its value another dictionary. At the second level, each sub-dictionary is indexed by
# a ComponentCatalogMetadata instance name; its value is also a sub-dictionary. This sub-dictionary
# consists of two additional dictionaries: 1.) one with key "components" whose dictionary is
# indexed by component id and maps to the corresponding Component object, and 2.) one with key
# "status" and value of a final sub-dictionary with key-value pairs "state":"<current/updating/errors>"
# and "errors":["<error1>", "<error2>", ...] to dynamically indicate the status of this catalog instance
_component_cache: ComponentCacheType = {}
_generic_category_label = "Elyra"
_generic_components: Dict[str, Component] = {
"notebook": Component(
id="notebook",
name="Notebook",
description="Run notebook file",
op="execute-notebook-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".ipynb"],
categories=[_generic_category_label],
),
"python-script": Component(
id="python-script",
name="Python Script",
description="Run Python script",
op="execute-python-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".py"],
categories=[_generic_category_label],
),
"r-script": Component(
id="r-script",
name="R Script",
description="Run R script",
op="execute-r-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".r"],
categories=[_generic_category_label],
),
}
def __init__(self, **kwargs):
emulate_server_app: bool = kwargs.pop("emulate_server_app", False)
super().__init__(**kwargs)
self._component_cache = {}
self.is_server_process = ComponentCache._determine_server_process(emulate_server_app, **kwargs)
self.manifest_dir = jupyter_runtime_dir()
# Ensure queue attribute exists for non-server instances as well.
self.refresh_queue: Optional[RefreshQueue] = None
self.update_queue: Optional[UpdateQueue] = None
if self.is_server_process:
self.refresh_queue = RefreshQueue()
self.update_queue = UpdateQueue()
# Set up watchdog for manifest file for out-of-process updates
self.observer = Observer()
self.observer.schedule(ManifestFileChangeHandler(self), self.manifest_dir)
# Start a thread to manage updates to the component cache
manager = CacheUpdateManager(self.log, self._component_cache, self.refresh_queue, self.update_queue)
self.cache_manager = manager
self.cache_manager.start()
self.log.debug("CacheUpdateManager started...")
else:
self.manifest_filename = os.path.join(self.manifest_dir, f"elyra-component-manifest-{os.getpid()}.json")
@staticmethod
def _determine_server_process(emulate_server_app: bool, **kwargs) -> bool:
"""Determines if this process is a server (extension) process."""
app_names = ["ServerApp", "ElyraApp"]
is_server_process = False
if "parent" in kwargs and kwargs["parent"].__class__.__name__ in app_names:
is_server_process = True
elif emulate_server_app: # Used in unittests
is_server_process = True
return is_server_process
def load(self):
"""
Completes a series of actions during system startup, such as creating
the component manifest file and triggering the build of the component
cache for existing ComponentCatalog metadata instances.
"""
# Proceed only if singleton instance has been created
if self.initialized:
# The cache manager will work on manifest and cache tasks on an
# in-process basis as load() is only called during startup from
# the server process.
if self.is_server_process:
# Remove all existing manifest files from previous processes
self._remove_all_manifest_files()
# Start the watchdog if it's not alive, prevents redundant starts
if not self.observer.is_alive():
self.observer.start()
# Fetch all component catalog instances and trigger their add to the
# component cache if this is not already happening (it seems some server
# test fixtures could be loading the server extensions multiple times).
if not self.cache_manager.is_refreshing():
self.refresh()
def refresh(self):
"""Triggers a refresh of all catalogs in the component cache.
Raises RefreshInProgressError if a complete refresh is in progress.
Note that we do not preclude non-server processes from performing a
complete refresh. In such cases, each of the catalog entries will be
written to the manifest, which will be placed into the update queue.
As a result, non-server applications could by-pass the "refresh in progress"
constraint, but we're assuming a CLI application won't be as likely to
"pound" refresh like a UI application can.
"""
if self.is_server_process and self.cache_manager.is_refreshing():
raise RefreshInProgressError()
catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()
for catalog in catalogs:
self._insert_request(self.refresh_queue, catalog, "modify")
def update(self, catalog: Metadata, action: str):
"""
Triggers an update of the component cache for the given catalog name. If this is a non-server
process, the entry is written to the manifest file where it will be "processed" by the watchdog
and inserted into the component cache queue, otherwise we update the cache queue directly.
"""
self._insert_request(self.update_queue, catalog, action)
def _insert_request(self, queue: Queue, catalog: ComponentCatalogMetadata, action: str):
"""
If running as a server process, the request is submitted to the desired queue, otherwise
it is posted to the manifest where the server process (if running) can detect the manifest
file update and send the request to the update queue.
Note that any calls to ComponentCache.refresh() from non-server processes will still
perform the refresh, but via the update queue rather than the refresh queue. We could,
instead, raise NotImplementedError in such cases, but we may want the ability to refresh
the entire component cache from a CLI utility and the current implementation would allow that.
"""
# Ensure referenced runtime is available
if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):
return
if self.is_server_process:
queue.put((catalog, action))
else:
manifest: Dict[str, str] = self._load_manifest()
manifest[catalog.name] = action
self.update_manifest(manifest=manifest)
def _remove_all_manifest_files(self):
"""
Remove all existing manifest files in the Jupyter runtimes directory.
"""
manifest_files = Path(self.manifest_dir).glob("**/elyra-component-manifest-*.json")
for file in manifest_files:
os.remove(str(file))
def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:
"""Read and return the contents of a manifest file.
If 'filename' is not provided, this process's manifest file will be read.
"""
filename = filename or self.manifest_filename
if not os.path.isfile(filename):
self.log.debug(f"Manifest file '{filename}' doesn't exist and will be created.")
return {}
with open(filename, "r") as f:
manifest: Dict[str, str] = json.load(f)
self.log.debug(f"Reading manifest '{manifest}' from file '{filename}'")
return manifest
def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:
"""Update the manifest file with the given entry."""
filename = filename or self.manifest_filename
manifest = manifest or {}
self.log.debug(f"Updating manifest '{manifest}' to file '{filename}'")
with open(filename, "w") as f:
json.dump(manifest, f, indent=2)
def wait_for_all_cache_tasks(self):
"""
Block execution and wait for all tasks in the cache task update queue to complete.
Primarily used for testing.
"""
if self.is_server_process:
self.update_queue.join()
self.refresh_queue.join()
def get_all_components(self, platform: RuntimeProcessorType) -> List[Component]:
"""
Retrieve all components from component catalog cache
"""
components: List[Component] = []
catalogs = self._component_cache.get(platform.name, {})
for catalog_name, catalog_properties in catalogs.items():
components.extend(list(catalog_properties.get("components", {}).values()))
if not components and platform != RuntimeProcessorType.LOCAL:
self.log.error(f"No components could be found in any catalog for platform type '{platform.name}'.")
return components
def get_component(self, platform: RuntimeProcessorType, component_id: str) -> Optional[Component]:
"""
Retrieve the component with a given component_id from component catalog cache
"""
component: Optional[Component] = None
catalogs = self._component_cache.get(platform.name, {})
for catalog_name, catalog_properties in catalogs.items():
component = catalog_properties.get("components", {}).get(component_id)
if component:
break
if not component:
self.log.error(f"Component with ID '{component_id}' could not be found in any catalog.")
return component
def _load_catalog_reader_class(
self, catalog: ComponentCatalogMetadata, file_types: List[str]
) -> Optional[ComponentCatalogConnector]:
"""
Load the appropriate entrypoint class based on the schema name indicated in
the ComponentCatalogMetadata instance and the file types associated with the component
parser in use
"""
try:
catalog_reader = entrypoints.get_group_named("elyra.component.catalog_types").get(catalog.schema_name)
if not catalog_reader:
self.log.error(
f"No entrypoint with name '{catalog.schema_name}' was found in group "
f"'elyra.component.catalog_types' to match the 'schema_name' given in catalog "
f"'{catalog.display_name}'. Skipping..."
)
return None
catalog_reader = catalog_reader.load()(file_types, parent=self.parent)
except Exception as e:
self.log.error(f"Could not load appropriate ComponentCatalogConnector class: {e}. Skipping...")
return None
return catalog_reader
def read_component_catalog(self, catalog: ComponentCatalogMetadata) -> Dict[str, Component]:
"""
Read a component catalog and return a dictionary of components indexed by component_id.
:param catalog: a metadata instances from which to read and construct Component objects
:returns: a dictionary of component id to Component object for all read/parsed components
"""
components: Dict[str, Component] = {}
# Ensure referenced runtime is available
if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):
return components
# Assign component parser based on the runtime platform type
parser = ComponentParser.create_instance(platform=catalog.runtime_type)
# Assign reader based on the type of the catalog (the 'schema_name')
catalog_reader = self._load_catalog_reader_class(catalog, parser.file_types)
if not catalog_reader:
return components
# Get content of component definition file for each component in this catalog
self.log.debug(f"Processing components in catalog '{catalog.display_name}'")
catalog_entries = catalog_reader.read_component_definitions(catalog)
if not catalog_entries:
return components
for catalog_entry in catalog_entries:
# Parse the entry to get a fully qualified Component object
try:
parsed_components = parser.parse(catalog_entry) or []
except Exception as e:
self.log.warning(
f"Could not parse definition for component with identifying information: "
f"'{catalog_entry.entry_reference}' -> {str(e)}"
)
else:
for component in parsed_components:
components[component.id] = component
return components
@staticmethod
def get_generic_components() -> List[Component]:
return list(ComponentCache._generic_components.values())
@staticmethod
def get_generic_component(component_id: str) -> Optional[Component]:
return ComponentCache._generic_components.get(component_id)
@staticmethod
def get_generic_component_from_op(component_op: str) -> Optional[Component]:
for component in ComponentCache.get_generic_components():
if component.op == component_op:
return component
return None
@staticmethod
def get_generic_component_ops() -> List[str]:
return [component.op for component in ComponentCache.get_generic_components()]
@staticmethod
def load_jinja_template(template_name: str) -> Template:
"""
Loads the jinja template of the given name from the
elyra/templates/components folder
"""
loader = PackageLoader("elyra", "templates/components")
template_env = Environment(loader=loader)
template_env.policies["json.dumps_kwargs"] = {"sort_keys": False} # prevent automatic key sort on 'tojson'
return template_env.get_template(template_name)
@staticmethod
def to_canvas_palette(components: List[Component]) -> Dict:
"""
Converts catalog components into appropriate canvas palette format
"""
template = ComponentCache.load_jinja_template("canvas_palette_template.jinja2")
# Define a fallback category for components with no given categories
fallback_category_name = "No Category"
# Convert the list of all components into a dictionary of
# component lists keyed by category
category_to_components: Dict[str, List[Component]] = {}
for component in components:
categories = component.categories
# Assign a fallback category so that component is not
# lost during palette render
if not categories:
categories = [fallback_category_name]
for category in categories:
if category not in category_to_components.keys():
category_to_components[category] = []
if component.id not in [comp.id for comp in category_to_components[category]]:
category_to_components[category].append(component)
# Render template
canvas_palette = template.render(category_dict=category_to_components)
return json.loads(canvas_palette)
@staticmethod
def to_canvas_properties(component: Component) -> Dict:
"""
Converts catalog components into appropriate canvas properties format.
If component_id is one of the generic set, generic template is rendered,
otherwise, the runtime-specific property template is rendered.
"""
if ComponentCache.get_generic_component(component.id) is not None:
template = ComponentCache.load_jinja_template("generic_properties_template.jinja2")
else:
template = ComponentCache.load_jinja_template("canvas_properties_template.jinja2")
template_vars = {
"elyra_owned_properties": component.get_elyra_properties(),
"render_property_details": ComponentProperty.render_property_details,
}
template.globals.update(template_vars)
canvas_properties = template.render(component=component)
return json.loads(canvas_properties)
class ManifestFileChangeHandler(FileSystemEventHandler):
"""Watchdog handler that filters on .json files within specific metadata directories."""
def __init__(self, component_cache: ComponentCache, **kwargs):
super().__init__(**kwargs)
self.component_cache = component_cache
self.log = component_cache.log
def dispatch(self, event):
|
def on_modified(self, event):
"""Fires when the component manifest file is modified."""
self.log.debug(f"ManifestFileChangeHandler: file '{event.src_path}' has been modified.")
manifest = self.component_cache._load_manifest(filename=event.src_path)
if manifest: # only update the manifest if there is work to do
for catalog, action in manifest.items():
self.log.debug(f"ManifestFileChangeHandler: inserting ({catalog},{action}) into update queue...")
if action == "delete":
# The metadata instance has already been deleted, so we must
# fabricate an instance that only consists of a catalog name
catalog_instance = ComponentCatalogMetadata(name=catalog)
else: # cache_action == 'modify':
# Fetch the catalog instance associated with this action
catalog_instance = MetadataManager(
schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID
).get(name=catalog)
self.component_cache.update(catalog=catalog_instance, action=action)
self.component_cache.update_manifest(filename=event.src_path) # clear the manifest
| """Dispatches delete and modification events pertaining to the manifest filename"""
if "elyra-component-manifest" in event.src_path:
super().dispatch(event) | identifier_body |
component_catalog.py | #
# Copyright 2018-2023 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from logging import Logger
import os
from pathlib import Path
from queue import Empty
from queue import Queue
from threading import Event
from threading import Thread
import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import entrypoints
from jinja2 import Environment
from jinja2 import PackageLoader
from jinja2 import Template
from jupyter_core.paths import jupyter_runtime_dir
from traitlets.config import SingletonConfigurable
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from elyra.metadata.manager import MetadataManager
from elyra.metadata.metadata import Metadata
from elyra.metadata.schemaspaces import ComponentCatalogs
from elyra.pipeline.catalog_connector import ComponentCatalogConnector
from elyra.pipeline.component import Component
from elyra.pipeline.component import ComponentParser
from elyra.pipeline.component_metadata import ComponentCatalogMetadata
from elyra.pipeline.properties import ComponentProperty
from elyra.pipeline.registry import PipelineProcessorRegistry
from elyra.pipeline.runtime_type import RuntimeProcessorType
BLOCKING_TIMEOUT = 0.5
NONBLOCKING_TIMEOUT = 0.10
# Issue warnings if catalog update takes longer than this value in seconds
CATALOG_UPDATE_TIMEOUT = int(os.getenv("ELYRA_CATALOG_UPDATE_TIMEOUT", 15))
# Issue warnings when outstanding worker thread counts exceed this value
WORKER_THREAD_WARNING_THRESHOLD = int(os.getenv("ELYRA_WORKER_THREAD_WARNING_THRESHOLD", 10))
# Define custom type to describe the component cache
ComponentCacheType = Dict[str, Dict[str, Dict[str, Dict[str, Union[Component, str, List[str]]]]]]
class RefreshInProgressError(Exception):
def __init__(self):
super().__init__("A catalog refresh is in progress. Try the request later.")
class RefreshQueue(Queue):
"""Entries are associated with a complete refresh of the Component Cache."""
_refreshing: bool
def __init__(self):
super().__init__()
self._refreshing = False
@property
def refreshing(self) -> bool:
return self._refreshing
@refreshing.setter
def refreshing(self, value: bool) -> None:
self._refreshing = value
def get(self, block: bool = True, timeout: Optional[float] = None):
"""Overrides the superclass method to set the refreshing property to false when empty."""
try:
entry = super().get(block=block, timeout=timeout)
except Empty:
self.refreshing = False
raise
return entry
def put(self, item, block=True, timeout=None):
"""Overrides the superclass method to set the refreshing property to true."""
super().put(item, block=block, timeout=timeout)
self.refreshing = True
class UpdateQueue(Queue):
"""Entries are associated with a single update of the Component Cache.
This class merely exists to distinguish it from the RefreshQueue instance.
"""
pass
class CacheUpdateManager(Thread):
"""
Primary thread for maintaining consistency of the component cache.
The component cache manager maintains the cache queue, whose entries are a
tuple of 'catalog' and 'action'. The 'catalog' is a catalog instance against
which the 'action' is applied. The 'action' is one of 'modify' or 'delete'.
For 'delete' the components of the referenced catalog are removed. For 'modify'
the components of the referenced catalog are inserted or updated (depending on
its prior existence).
"""
def __init__(
self, log: Logger, component_cache: ComponentCacheType, refresh_queue: RefreshQueue, update_queue: UpdateQueue
):
super().__init__()
self.daemon = True
self.name = "CacheUpdateManager"
self.log: Logger = log
self._component_cache: ComponentCacheType = component_cache
self._refresh_queue: RefreshQueue = refresh_queue
self._update_queue: UpdateQueue = update_queue
self._check_refresh_queue = False
self._threads: List[CacheUpdateWorker] = []
self.stop_event: Event = Event() # Set when server process stops
def run(self):
"""Process queue queue entries until server is stopped."""
while not self.stop_event.is_set():
self.manage_cache_tasks()
def manage_cache_tasks(self):
"""
Check the cache queue for a cache update action and start
a corresponding worker thread to complete the update
"""
outstanding_threads = self._has_outstanding_threads()
try:
# Get a task from the cache queue, waiting less if we have active threads.
timeout = NONBLOCKING_TIMEOUT if outstanding_threads else BLOCKING_TIMEOUT
# Toggle between refresh and update queues so as to prevent starvation.
self._check_refresh_queue = not self._check_refresh_queue
if self._check_refresh_queue:
catalog, action = self._refresh_queue.get(timeout=timeout)
else:
catalog, action = self._update_queue.get(timeout=timeout)
except Empty:
# No task exists in the cache queue, proceed to check for thread execution
pass
else:
# Create and start a thread for the task
updater_thread = CacheUpdateWorker(
self._component_cache,
self._refresh_queue if self._check_refresh_queue else self._update_queue,
catalog,
action,
)
updater_thread.start()
queue_clause = "refreshing" if self._check_refresh_queue else "updating"
self.log.debug(f"CacheUpdateWorker {queue_clause} catalog: '{updater_thread.name}', action: '{action}'...")
self._threads.append(updater_thread)
def _has_outstanding_threads(self) -> bool:
"""
Join finished threads and report on long-running threads as needed.
"""
outstanding_threads = False
for thread in self._threads:
# Attempt to join thread within the given amount of time
thread.join(timeout=NONBLOCKING_TIMEOUT)
cumulative_run_time = int(time.time() - thread.task_start_time)
if thread.is_alive():
# Thread is still running (thread join timed out)
|
else:
self.log.debug(f"CacheUpdateWorker completed for catalog: '{thread.name}', action: '{thread.action}'.")
# Thread has been joined and can be removed from the list
self._threads.remove(thread)
# Mark cache task as complete
thread.queue.task_done()
# Report successful join for threads that have previously logged a
# cache update duration warning
if thread.last_warn_time != thread.task_start_time:
self.log.info(
f"Cache update for catalog '{thread.name}' has "
f"completed after {cumulative_run_time} seconds"
)
if len(self._threads) > WORKER_THREAD_WARNING_THRESHOLD:
self.log.warning(
f"CacheUpdateWorker outstanding threads threshold "
f"({WORKER_THREAD_WARNING_THRESHOLD}) has been exceeded. "
f"{len(self._threads)} threads are outstanding. This may "
f"indicate a possible issue."
)
return outstanding_threads
def is_refreshing(self) -> bool:
return self._refresh_queue.refreshing
def init_refresh(self) -> None:
self._refresh_queue.refreshing = True
def stop(self):
"""Trigger completion of the manager thread."""
self._refresh_queue.refreshing = False
self.stop_event.set()
self.log.debug("CacheUpdateManager stopped.")
class CacheUpdateWorker(Thread):
"""Spawned by the CacheUpdateManager to perform work against the component cache."""
def __init__(
self,
component_cache: ComponentCacheType,
queue: Queue,
catalog: ComponentCatalogMetadata,
action: Optional[str] = None,
):
super().__init__()
self.daemon = True
self.name = catalog.name # Let the name of the thread reflect the catalog being managed
self._component_cache: ComponentCacheType = component_cache
# Task-specific properties
self.queue: Queue = queue
self.catalog: ComponentCatalogMetadata = catalog
self.action: str = action
# Thread metadata
self.task_start_time = time.time()
self.last_warn_time = self.task_start_time
# Prepare component cache for modification
runtime_type = None
if self.catalog.metadata:
runtime_type = self.catalog.runtime_type.name
self.prepare_cache_for_catalog(runtime_type)
def run(self):
"""Apply the relative action to the given catalog entry in the cache."""
if self.action == "delete":
# Check all runtime types in cache for an entry of the given name.
# If found, remove only the components from this catalog
for runtime_type in self._component_cache:
if self.catalog.name in self._component_cache[runtime_type]:
self._component_cache[runtime_type].pop(self.catalog.name, None)
break
else: # 'modify' - replace (or add) components from the given catalog an update its status
runtime_type = self.catalog.runtime_type.name
catalog_state = self._component_cache[runtime_type][self.catalog.name].get("status")
try:
# Replace all components for the given catalog
self._component_cache[runtime_type][self.catalog.name][
"components"
] = ComponentCache.instance().read_component_catalog(self.catalog)
catalog_state["state"] = "current"
catalog_state["errors"] = [] # reset any errors that may have been present
except Exception as e:
# Update state with an 'error' action and the relevant message
catalog_state["state"] = "error"
catalog_state["errors"].append(str(e))
def prepare_cache_for_catalog(self, runtime_type: Optional[str] = None):
"""
Add entries to the component cache for the runtime type and/or catalog
of focus for this thread, and set the catalog state to 'updating'.
"""
if self.action == "delete":
# On 'delete' the runtime_type parameter will be None and since catalog names
# are essentially unique across runtime types, we can break out of this loop
# on first occurrence and let _that_ runtime type be used in the following code.
for runtime_type in self._component_cache:
if self.catalog.name in self._component_cache[runtime_type]:
break
# Add sub-dictionary for this runtime type if not present
if not self._component_cache.get(runtime_type):
self._component_cache[runtime_type] = {}
# Add sub-dictionary for this catalog if not present - this will occur when
# a catalog instance is created, so we're essentially adding a placeholder.
if not self._component_cache[runtime_type].get(self.catalog.name):
self._component_cache[runtime_type][self.catalog.name] = {
"components": {},
"status": {"state": "updating", "errors": []},
}
else: # Set state to 'updating' for an existing entry
self._component_cache[runtime_type][self.catalog.name]["status"]["state"] = "updating"
class ComponentCache(SingletonConfigurable):
"""Represents the cache of component definitions indexed by runtime-type, then by catalog name."""
# The component_cache is indexed at the top level by runtime type name, e.g. 'APACHE_AIRFLOW',
# and has as its value another dictionary. At the second level, each sub-dictionary is indexed by
# a ComponentCatalogMetadata instance name; its value is also a sub-dictionary. This sub-dictionary
# consists of two additional dictionaries: 1.) one with key "components" whose dictionary is
# indexed by component id and maps to the corresponding Component object, and 2.) one with key
# "status" and value of a final sub-dictionary with key-value pairs "state":"<current/updating/errors>"
# and "errors":["<error1>", "<error2>", ...] to dynamically indicate the status of this catalog instance
_component_cache: ComponentCacheType = {}
_generic_category_label = "Elyra"
_generic_components: Dict[str, Component] = {
"notebook": Component(
id="notebook",
name="Notebook",
description="Run notebook file",
op="execute-notebook-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".ipynb"],
categories=[_generic_category_label],
),
"python-script": Component(
id="python-script",
name="Python Script",
description="Run Python script",
op="execute-python-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".py"],
categories=[_generic_category_label],
),
"r-script": Component(
id="r-script",
name="R Script",
description="Run R script",
op="execute-r-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".r"],
categories=[_generic_category_label],
),
}
def __init__(self, **kwargs):
emulate_server_app: bool = kwargs.pop("emulate_server_app", False)
super().__init__(**kwargs)
self._component_cache = {}
self.is_server_process = ComponentCache._determine_server_process(emulate_server_app, **kwargs)
self.manifest_dir = jupyter_runtime_dir()
# Ensure queue attribute exists for non-server instances as well.
self.refresh_queue: Optional[RefreshQueue] = None
self.update_queue: Optional[UpdateQueue] = None
if self.is_server_process:
self.refresh_queue = RefreshQueue()
self.update_queue = UpdateQueue()
# Set up watchdog for manifest file for out-of-process updates
self.observer = Observer()
self.observer.schedule(ManifestFileChangeHandler(self), self.manifest_dir)
# Start a thread to manage updates to the component cache
manager = CacheUpdateManager(self.log, self._component_cache, self.refresh_queue, self.update_queue)
self.cache_manager = manager
self.cache_manager.start()
self.log.debug("CacheUpdateManager started...")
else:
self.manifest_filename = os.path.join(self.manifest_dir, f"elyra-component-manifest-{os.getpid()}.json")
@staticmethod
def _determine_server_process(emulate_server_app: bool, **kwargs) -> bool:
"""Determines if this process is a server (extension) process."""
app_names = ["ServerApp", "ElyraApp"]
is_server_process = False
if "parent" in kwargs and kwargs["parent"].__class__.__name__ in app_names:
is_server_process = True
elif emulate_server_app: # Used in unittests
is_server_process = True
return is_server_process
def load(self):
"""
Completes a series of actions during system startup, such as creating
the component manifest file and triggering the build of the component
cache for existing ComponentCatalog metadata instances.
"""
# Proceed only if singleton instance has been created
if self.initialized:
# The cache manager will work on manifest and cache tasks on an
# in-process basis as load() is only called during startup from
# the server process.
if self.is_server_process:
# Remove all existing manifest files from previous processes
self._remove_all_manifest_files()
# Start the watchdog if it's not alive, prevents redundant starts
if not self.observer.is_alive():
self.observer.start()
# Fetch all component catalog instances and trigger their add to the
# component cache if this is not already happening (it seems some server
# test fixtures could be loading the server extensions multiple times).
if not self.cache_manager.is_refreshing():
self.refresh()
def refresh(self):
"""Triggers a refresh of all catalogs in the component cache.
Raises RefreshInProgressError if a complete refresh is in progress.
Note that we do not preclude non-server processes from performing a
complete refresh. In such cases, each of the catalog entries will be
written to the manifest, which will be placed into the update queue.
As a result, non-server applications could by-pass the "refresh in progress"
constraint, but we're assuming a CLI application won't be as likely to
"pound" refresh like a UI application can.
"""
if self.is_server_process and self.cache_manager.is_refreshing():
raise RefreshInProgressError()
catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()
for catalog in catalogs:
self._insert_request(self.refresh_queue, catalog, "modify")
def update(self, catalog: Metadata, action: str):
"""
Triggers an update of the component cache for the given catalog name. If this is a non-server
process, the entry is written to the manifest file where it will be "processed" by the watchdog
and inserted into the component cache queue, otherwise we update the cache queue directly.
"""
self._insert_request(self.update_queue, catalog, action)
def _insert_request(self, queue: Queue, catalog: ComponentCatalogMetadata, action: str):
"""
If running as a server process, the request is submitted to the desired queue, otherwise
it is posted to the manifest where the server process (if running) can detect the manifest
file update and send the request to the update queue.
Note that any calls to ComponentCache.refresh() from non-server processes will still
perform the refresh, but via the update queue rather than the refresh queue. We could,
instead, raise NotImplementedError in such cases, but we may want the ability to refresh
the entire component cache from a CLI utility and the current implementation would allow that.
"""
# Ensure referenced runtime is available
if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):
return
if self.is_server_process:
queue.put((catalog, action))
else:
manifest: Dict[str, str] = self._load_manifest()
manifest[catalog.name] = action
self.update_manifest(manifest=manifest)
def _remove_all_manifest_files(self):
"""
Remove all existing manifest files in the Jupyter runtimes directory.
"""
manifest_files = Path(self.manifest_dir).glob("**/elyra-component-manifest-*.json")
for file in manifest_files:
os.remove(str(file))
def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:
"""Read and return the contents of a manifest file.
If 'filename' is not provided, this process's manifest file will be read.
"""
filename = filename or self.manifest_filename
if not os.path.isfile(filename):
self.log.debug(f"Manifest file '{filename}' doesn't exist and will be created.")
return {}
with open(filename, "r") as f:
manifest: Dict[str, str] = json.load(f)
self.log.debug(f"Reading manifest '{manifest}' from file '{filename}'")
return manifest
def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:
"""Update the manifest file with the given entry."""
filename = filename or self.manifest_filename
manifest = manifest or {}
self.log.debug(f"Updating manifest '{manifest}' to file '{filename}'")
with open(filename, "w") as f:
json.dump(manifest, f, indent=2)
def wait_for_all_cache_tasks(self):
"""
Block execution and wait for all tasks in the cache task update queue to complete.
Primarily used for testing.
"""
if self.is_server_process:
self.update_queue.join()
self.refresh_queue.join()
def get_all_components(self, platform: RuntimeProcessorType) -> List[Component]:
"""
Retrieve all components from component catalog cache
"""
components: List[Component] = []
catalogs = self._component_cache.get(platform.name, {})
for catalog_name, catalog_properties in catalogs.items():
components.extend(list(catalog_properties.get("components", {}).values()))
if not components and platform != RuntimeProcessorType.LOCAL:
self.log.error(f"No components could be found in any catalog for platform type '{platform.name}'.")
return components
def get_component(self, platform: RuntimeProcessorType, component_id: str) -> Optional[Component]:
"""
Retrieve the component with a given component_id from component catalog cache
"""
component: Optional[Component] = None
catalogs = self._component_cache.get(platform.name, {})
for catalog_name, catalog_properties in catalogs.items():
component = catalog_properties.get("components", {}).get(component_id)
if component:
break
if not component:
self.log.error(f"Component with ID '{component_id}' could not be found in any catalog.")
return component
def _load_catalog_reader_class(
self, catalog: ComponentCatalogMetadata, file_types: List[str]
) -> Optional[ComponentCatalogConnector]:
"""
Load the appropriate entrypoint class based on the schema name indicated in
the ComponentCatalogMetadata instance and the file types associated with the component
parser in use
"""
try:
catalog_reader = entrypoints.get_group_named("elyra.component.catalog_types").get(catalog.schema_name)
if not catalog_reader:
self.log.error(
f"No entrypoint with name '{catalog.schema_name}' was found in group "
f"'elyra.component.catalog_types' to match the 'schema_name' given in catalog "
f"'{catalog.display_name}'. Skipping..."
)
return None
catalog_reader = catalog_reader.load()(file_types, parent=self.parent)
except Exception as e:
self.log.error(f"Could not load appropriate ComponentCatalogConnector class: {e}. Skipping...")
return None
return catalog_reader
def read_component_catalog(self, catalog: ComponentCatalogMetadata) -> Dict[str, Component]:
"""
Read a component catalog and return a dictionary of components indexed by component_id.
:param catalog: a metadata instances from which to read and construct Component objects
:returns: a dictionary of component id to Component object for all read/parsed components
"""
components: Dict[str, Component] = {}
# Ensure referenced runtime is available
if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):
return components
# Assign component parser based on the runtime platform type
parser = ComponentParser.create_instance(platform=catalog.runtime_type)
# Assign reader based on the type of the catalog (the 'schema_name')
catalog_reader = self._load_catalog_reader_class(catalog, parser.file_types)
if not catalog_reader:
return components
# Get content of component definition file for each component in this catalog
self.log.debug(f"Processing components in catalog '{catalog.display_name}'")
catalog_entries = catalog_reader.read_component_definitions(catalog)
if not catalog_entries:
return components
for catalog_entry in catalog_entries:
# Parse the entry to get a fully qualified Component object
try:
parsed_components = parser.parse(catalog_entry) or []
except Exception as e:
self.log.warning(
f"Could not parse definition for component with identifying information: "
f"'{catalog_entry.entry_reference}' -> {str(e)}"
)
else:
for component in parsed_components:
components[component.id] = component
return components
@staticmethod
def get_generic_components() -> List[Component]:
return list(ComponentCache._generic_components.values())
@staticmethod
def get_generic_component(component_id: str) -> Optional[Component]:
return ComponentCache._generic_components.get(component_id)
@staticmethod
def get_generic_component_from_op(component_op: str) -> Optional[Component]:
for component in ComponentCache.get_generic_components():
if component.op == component_op:
return component
return None
@staticmethod
def get_generic_component_ops() -> List[str]:
return [component.op for component in ComponentCache.get_generic_components()]
@staticmethod
def load_jinja_template(template_name: str) -> Template:
"""
Loads the jinja template of the given name from the
elyra/templates/components folder
"""
loader = PackageLoader("elyra", "templates/components")
template_env = Environment(loader=loader)
template_env.policies["json.dumps_kwargs"] = {"sort_keys": False} # prevent automatic key sort on 'tojson'
return template_env.get_template(template_name)
@staticmethod
def to_canvas_palette(components: List[Component]) -> Dict:
"""
Converts catalog components into appropriate canvas palette format
"""
template = ComponentCache.load_jinja_template("canvas_palette_template.jinja2")
# Define a fallback category for components with no given categories
fallback_category_name = "No Category"
# Convert the list of all components into a dictionary of
# component lists keyed by category
category_to_components: Dict[str, List[Component]] = {}
for component in components:
categories = component.categories
# Assign a fallback category so that component is not
# lost during palette render
if not categories:
categories = [fallback_category_name]
for category in categories:
if category not in category_to_components.keys():
category_to_components[category] = []
if component.id not in [comp.id for comp in category_to_components[category]]:
category_to_components[category].append(component)
# Render template
canvas_palette = template.render(category_dict=category_to_components)
return json.loads(canvas_palette)
@staticmethod
def to_canvas_properties(component: Component) -> Dict:
"""
Converts catalog components into appropriate canvas properties format.
If component_id is one of the generic set, generic template is rendered,
otherwise, the runtime-specific property template is rendered.
"""
if ComponentCache.get_generic_component(component.id) is not None:
template = ComponentCache.load_jinja_template("generic_properties_template.jinja2")
else:
template = ComponentCache.load_jinja_template("canvas_properties_template.jinja2")
template_vars = {
"elyra_owned_properties": component.get_elyra_properties(),
"render_property_details": ComponentProperty.render_property_details,
}
template.globals.update(template_vars)
canvas_properties = template.render(component=component)
return json.loads(canvas_properties)
class ManifestFileChangeHandler(FileSystemEventHandler):
"""Watchdog handler that filters on .json files within specific metadata directories."""
def __init__(self, component_cache: ComponentCache, **kwargs):
super().__init__(**kwargs)
self.component_cache = component_cache
self.log = component_cache.log
def dispatch(self, event):
"""Dispatches delete and modification events pertaining to the manifest filename"""
if "elyra-component-manifest" in event.src_path:
super().dispatch(event)
def on_modified(self, event):
"""Fires when the component manifest file is modified."""
self.log.debug(f"ManifestFileChangeHandler: file '{event.src_path}' has been modified.")
manifest = self.component_cache._load_manifest(filename=event.src_path)
if manifest: # only update the manifest if there is work to do
for catalog, action in manifest.items():
self.log.debug(f"ManifestFileChangeHandler: inserting ({catalog},{action}) into update queue...")
if action == "delete":
# The metadata instance has already been deleted, so we must
# fabricate an instance that only consists of a catalog name
catalog_instance = ComponentCatalogMetadata(name=catalog)
else: # cache_action == 'modify':
# Fetch the catalog instance associated with this action
catalog_instance = MetadataManager(
schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID
).get(name=catalog)
self.component_cache.update(catalog=catalog_instance, action=action)
self.component_cache.update_manifest(filename=event.src_path) # clear the manifest
| outstanding_threads = True
# Report on a long-running thread if CATALOG_UPDATE_TIMEOUT is exceeded
time_since_last_check = int(time.time() - thread.last_warn_time)
if time_since_last_check > CATALOG_UPDATE_TIMEOUT:
thread.last_warn_time = time.time()
self.log.warning(
f"Cache update for catalog '{thread.name}' is still processing "
f"after {cumulative_run_time} seconds ..."
) | conditional_block |
component_catalog.py | #
# Copyright 2018-2023 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from logging import Logger
import os
from pathlib import Path
from queue import Empty
from queue import Queue
from threading import Event
from threading import Thread
import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import entrypoints
from jinja2 import Environment
from jinja2 import PackageLoader
from jinja2 import Template
from jupyter_core.paths import jupyter_runtime_dir
from traitlets.config import SingletonConfigurable
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from elyra.metadata.manager import MetadataManager
from elyra.metadata.metadata import Metadata
from elyra.metadata.schemaspaces import ComponentCatalogs
from elyra.pipeline.catalog_connector import ComponentCatalogConnector
from elyra.pipeline.component import Component
from elyra.pipeline.component import ComponentParser
from elyra.pipeline.component_metadata import ComponentCatalogMetadata
from elyra.pipeline.properties import ComponentProperty
from elyra.pipeline.registry import PipelineProcessorRegistry
from elyra.pipeline.runtime_type import RuntimeProcessorType
BLOCKING_TIMEOUT = 0.5
NONBLOCKING_TIMEOUT = 0.10
# Issue warnings if catalog update takes longer than this value in seconds
CATALOG_UPDATE_TIMEOUT = int(os.getenv("ELYRA_CATALOG_UPDATE_TIMEOUT", 15))
# Issue warnings when outstanding worker thread counts exceed this value
WORKER_THREAD_WARNING_THRESHOLD = int(os.getenv("ELYRA_WORKER_THREAD_WARNING_THRESHOLD", 10))
# Define custom type to describe the component cache
ComponentCacheType = Dict[str, Dict[str, Dict[str, Dict[str, Union[Component, str, List[str]]]]]]
class RefreshInProgressError(Exception):
def __init__(self):
super().__init__("A catalog refresh is in progress. Try the request later.")
class RefreshQueue(Queue):
"""Entries are associated with a complete refresh of the Component Cache."""
_refreshing: bool
def __init__(self):
super().__init__()
self._refreshing = False
@property
def refreshing(self) -> bool:
return self._refreshing
@refreshing.setter
def refreshing(self, value: bool) -> None:
self._refreshing = value
def get(self, block: bool = True, timeout: Optional[float] = None):
"""Overrides the superclass method to set the refreshing property to false when empty."""
try:
entry = super().get(block=block, timeout=timeout)
except Empty:
self.refreshing = False
raise
return entry
def put(self, item, block=True, timeout=None):
"""Overrides the superclass method to set the refreshing property to true."""
super().put(item, block=block, timeout=timeout)
self.refreshing = True
class UpdateQueue(Queue):
"""Entries are associated with a single update of the Component Cache.
This class merely exists to distinguish it from the RefreshQueue instance.
"""
pass
class CacheUpdateManager(Thread):
"""
Primary thread for maintaining consistency of the component cache.
The component cache manager maintains the cache queue, whose entries are a
tuple of 'catalog' and 'action'. The 'catalog' is a catalog instance against
which the 'action' is applied. The 'action' is one of 'modify' or 'delete'.
For 'delete' the components of the referenced catalog are removed. For 'modify'
the components of the referenced catalog are inserted or updated (depending on
its prior existence).
"""
def __init__(
self, log: Logger, component_cache: ComponentCacheType, refresh_queue: RefreshQueue, update_queue: UpdateQueue
):
super().__init__()
self.daemon = True
self.name = "CacheUpdateManager"
self.log: Logger = log
self._component_cache: ComponentCacheType = component_cache
self._refresh_queue: RefreshQueue = refresh_queue
self._update_queue: UpdateQueue = update_queue
self._check_refresh_queue = False
self._threads: List[CacheUpdateWorker] = []
self.stop_event: Event = Event() # Set when server process stops
def run(self):
"""Process queue queue entries until server is stopped."""
while not self.stop_event.is_set():
self.manage_cache_tasks()
def manage_cache_tasks(self):
"""
Check the cache queue for a cache update action and start
a corresponding worker thread to complete the update
"""
outstanding_threads = self._has_outstanding_threads()
try:
# Get a task from the cache queue, waiting less if we have active threads.
timeout = NONBLOCKING_TIMEOUT if outstanding_threads else BLOCKING_TIMEOUT
# Toggle between refresh and update queues so as to prevent starvation.
self._check_refresh_queue = not self._check_refresh_queue
if self._check_refresh_queue:
catalog, action = self._refresh_queue.get(timeout=timeout)
else:
catalog, action = self._update_queue.get(timeout=timeout)
except Empty:
# No task exists in the cache queue, proceed to check for thread execution
pass
else:
# Create and start a thread for the task
updater_thread = CacheUpdateWorker(
self._component_cache,
self._refresh_queue if self._check_refresh_queue else self._update_queue,
catalog,
action,
)
updater_thread.start()
queue_clause = "refreshing" if self._check_refresh_queue else "updating"
self.log.debug(f"CacheUpdateWorker {queue_clause} catalog: '{updater_thread.name}', action: '{action}'...")
self._threads.append(updater_thread)
def _has_outstanding_threads(self) -> bool:
"""
Join finished threads and report on long-running threads as needed.
"""
outstanding_threads = False
for thread in self._threads:
# Attempt to join thread within the given amount of time
thread.join(timeout=NONBLOCKING_TIMEOUT)
cumulative_run_time = int(time.time() - thread.task_start_time)
if thread.is_alive():
# Thread is still running (thread join timed out)
outstanding_threads = True
# Report on a long-running thread if CATALOG_UPDATE_TIMEOUT is exceeded
time_since_last_check = int(time.time() - thread.last_warn_time)
if time_since_last_check > CATALOG_UPDATE_TIMEOUT:
thread.last_warn_time = time.time()
self.log.warning(
f"Cache update for catalog '{thread.name}' is still processing "
f"after {cumulative_run_time} seconds ..."
)
else:
self.log.debug(f"CacheUpdateWorker completed for catalog: '{thread.name}', action: '{thread.action}'.")
# Thread has been joined and can be removed from the list
self._threads.remove(thread)
# Mark cache task as complete
thread.queue.task_done()
# Report successful join for threads that have previously logged a
# cache update duration warning
if thread.last_warn_time != thread.task_start_time:
self.log.info(
f"Cache update for catalog '{thread.name}' has "
f"completed after {cumulative_run_time} seconds"
)
if len(self._threads) > WORKER_THREAD_WARNING_THRESHOLD:
self.log.warning(
f"CacheUpdateWorker outstanding threads threshold "
f"({WORKER_THREAD_WARNING_THRESHOLD}) has been exceeded. "
f"{len(self._threads)} threads are outstanding. This may "
f"indicate a possible issue."
)
return outstanding_threads
def is_refreshing(self) -> bool:
return self._refresh_queue.refreshing
def init_refresh(self) -> None:
self._refresh_queue.refreshing = True
def stop(self):
"""Trigger completion of the manager thread."""
self._refresh_queue.refreshing = False
self.stop_event.set()
self.log.debug("CacheUpdateManager stopped.")
class CacheUpdateWorker(Thread):
"""Spawned by the CacheUpdateManager to perform work against the component cache."""
def __init__(
self,
component_cache: ComponentCacheType,
queue: Queue,
catalog: ComponentCatalogMetadata,
action: Optional[str] = None,
):
super().__init__()
self.daemon = True
self.name = catalog.name # Let the name of the thread reflect the catalog being managed
self._component_cache: ComponentCacheType = component_cache
# Task-specific properties
self.queue: Queue = queue
self.catalog: ComponentCatalogMetadata = catalog
self.action: str = action
# Thread metadata
self.task_start_time = time.time()
self.last_warn_time = self.task_start_time
# Prepare component cache for modification
runtime_type = None
if self.catalog.metadata:
runtime_type = self.catalog.runtime_type.name
self.prepare_cache_for_catalog(runtime_type)
def run(self):
"""Apply the relative action to the given catalog entry in the cache."""
if self.action == "delete":
# Check all runtime types in cache for an entry of the given name.
# If found, remove only the components from this catalog
for runtime_type in self._component_cache:
if self.catalog.name in self._component_cache[runtime_type]:
self._component_cache[runtime_type].pop(self.catalog.name, None)
break
else: # 'modify' - replace (or add) components from the given catalog an update its status
runtime_type = self.catalog.runtime_type.name
catalog_state = self._component_cache[runtime_type][self.catalog.name].get("status")
try:
# Replace all components for the given catalog
self._component_cache[runtime_type][self.catalog.name][
"components"
] = ComponentCache.instance().read_component_catalog(self.catalog)
catalog_state["state"] = "current"
catalog_state["errors"] = [] # reset any errors that may have been present
except Exception as e:
# Update state with an 'error' action and the relevant message
catalog_state["state"] = "error"
catalog_state["errors"].append(str(e))
def prepare_cache_for_catalog(self, runtime_type: Optional[str] = None):
"""
Add entries to the component cache for the runtime type and/or catalog
of focus for this thread, and set the catalog state to 'updating'.
"""
if self.action == "delete":
# On 'delete' the runtime_type parameter will be None and since catalog names
# are essentially unique across runtime types, we can break out of this loop
# on first occurrence and let _that_ runtime type be used in the following code.
for runtime_type in self._component_cache:
if self.catalog.name in self._component_cache[runtime_type]:
break
# Add sub-dictionary for this runtime type if not present
if not self._component_cache.get(runtime_type):
self._component_cache[runtime_type] = {}
# Add sub-dictionary for this catalog if not present - this will occur when
# a catalog instance is created, so we're essentially adding a placeholder.
if not self._component_cache[runtime_type].get(self.catalog.name):
self._component_cache[runtime_type][self.catalog.name] = {
"components": {},
"status": {"state": "updating", "errors": []},
}
else: # Set state to 'updating' for an existing entry
self._component_cache[runtime_type][self.catalog.name]["status"]["state"] = "updating"
class ComponentCache(SingletonConfigurable):
"""Represents the cache of component definitions indexed by runtime-type, then by catalog name."""
# The component_cache is indexed at the top level by runtime type name, e.g. 'APACHE_AIRFLOW',
# and has as its value another dictionary. At the second level, each sub-dictionary is indexed by
# a ComponentCatalogMetadata instance name; its value is also a sub-dictionary. This sub-dictionary
# consists of two additional dictionaries: 1.) one with key "components" whose dictionary is
# indexed by component id and maps to the corresponding Component object, and 2.) one with key
# "status" and value of a final sub-dictionary with key-value pairs "state":"<current/updating/errors>"
# and "errors":["<error1>", "<error2>", ...] to dynamically indicate the status of this catalog instance
_component_cache: ComponentCacheType = {}
_generic_category_label = "Elyra"
_generic_components: Dict[str, Component] = {
"notebook": Component(
id="notebook",
name="Notebook",
description="Run notebook file",
op="execute-notebook-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".ipynb"],
categories=[_generic_category_label],
),
"python-script": Component(
id="python-script",
name="Python Script",
description="Run Python script",
op="execute-python-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".py"],
categories=[_generic_category_label],
),
"r-script": Component(
id="r-script",
name="R Script",
description="Run R script",
op="execute-r-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".r"],
categories=[_generic_category_label],
),
}
def __init__(self, **kwargs):
emulate_server_app: bool = kwargs.pop("emulate_server_app", False)
super().__init__(**kwargs)
self._component_cache = {}
self.is_server_process = ComponentCache._determine_server_process(emulate_server_app, **kwargs)
self.manifest_dir = jupyter_runtime_dir()
# Ensure queue attribute exists for non-server instances as well.
self.refresh_queue: Optional[RefreshQueue] = None
self.update_queue: Optional[UpdateQueue] = None
if self.is_server_process:
self.refresh_queue = RefreshQueue()
self.update_queue = UpdateQueue()
# Set up watchdog for manifest file for out-of-process updates
self.observer = Observer()
self.observer.schedule(ManifestFileChangeHandler(self), self.manifest_dir)
# Start a thread to manage updates to the component cache
manager = CacheUpdateManager(self.log, self._component_cache, self.refresh_queue, self.update_queue)
self.cache_manager = manager
self.cache_manager.start()
self.log.debug("CacheUpdateManager started...")
else:
self.manifest_filename = os.path.join(self.manifest_dir, f"elyra-component-manifest-{os.getpid()}.json")
@staticmethod
def _determine_server_process(emulate_server_app: bool, **kwargs) -> bool:
"""Determines if this process is a server (extension) process."""
app_names = ["ServerApp", "ElyraApp"]
is_server_process = False
if "parent" in kwargs and kwargs["parent"].__class__.__name__ in app_names:
is_server_process = True
elif emulate_server_app: # Used in unittests
is_server_process = True
return is_server_process
def load(self):
"""
Completes a series of actions during system startup, such as creating
the component manifest file and triggering the build of the component
cache for existing ComponentCatalog metadata instances.
"""
# Proceed only if singleton instance has been created
if self.initialized:
# The cache manager will work on manifest and cache tasks on an
# in-process basis as load() is only called during startup from
# the server process.
if self.is_server_process:
# Remove all existing manifest files from previous processes
self._remove_all_manifest_files()
# Start the watchdog if it's not alive, prevents redundant starts
if not self.observer.is_alive():
self.observer.start()
# Fetch all component catalog instances and trigger their add to the
# component cache if this is not already happening (it seems some server
# test fixtures could be loading the server extensions multiple times).
if not self.cache_manager.is_refreshing():
self.refresh()
def refresh(self):
"""Triggers a refresh of all catalogs in the component cache.
Raises RefreshInProgressError if a complete refresh is in progress.
Note that we do not preclude non-server processes from performing a
complete refresh. In such cases, each of the catalog entries will be
written to the manifest, which will be placed into the update queue.
As a result, non-server applications could by-pass the "refresh in progress"
constraint, but we're assuming a CLI application won't be as likely to
"pound" refresh like a UI application can.
"""
if self.is_server_process and self.cache_manager.is_refreshing():
raise RefreshInProgressError()
catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()
for catalog in catalogs:
self._insert_request(self.refresh_queue, catalog, "modify")
def update(self, catalog: Metadata, action: str):
"""
Triggers an update of the component cache for the given catalog name. If this is a non-server
process, the entry is written to the manifest file where it will be "processed" by the watchdog
and inserted into the component cache queue, otherwise we update the cache queue directly.
"""
self._insert_request(self.update_queue, catalog, action)
def _insert_request(self, queue: Queue, catalog: ComponentCatalogMetadata, action: str):
"""
If running as a server process, the request is submitted to the desired queue, otherwise
it is posted to the manifest where the server process (if running) can detect the manifest
file update and send the request to the update queue.
Note that any calls to ComponentCache.refresh() from non-server processes will still
perform the refresh, but via the update queue rather than the refresh queue. We could,
instead, raise NotImplementedError in such cases, but we may want the ability to refresh
the entire component cache from a CLI utility and the current implementation would allow that.
"""
# Ensure referenced runtime is available
if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):
return
if self.is_server_process:
queue.put((catalog, action))
else:
manifest: Dict[str, str] = self._load_manifest()
manifest[catalog.name] = action
self.update_manifest(manifest=manifest)
def _remove_all_manifest_files(self):
"""
Remove all existing manifest files in the Jupyter runtimes directory.
"""
manifest_files = Path(self.manifest_dir).glob("**/elyra-component-manifest-*.json")
for file in manifest_files:
os.remove(str(file))
def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:
"""Read and return the contents of a manifest file.
If 'filename' is not provided, this process's manifest file will be read.
"""
filename = filename or self.manifest_filename
if not os.path.isfile(filename):
self.log.debug(f"Manifest file '{filename}' doesn't exist and will be created.")
return {}
with open(filename, "r") as f:
manifest: Dict[str, str] = json.load(f)
self.log.debug(f"Reading manifest '{manifest}' from file '{filename}'")
return manifest
def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:
"""Update the manifest file with the given entry."""
filename = filename or self.manifest_filename
manifest = manifest or {}
self.log.debug(f"Updating manifest '{manifest}' to file '{filename}'")
with open(filename, "w") as f:
json.dump(manifest, f, indent=2)
def wait_for_all_cache_tasks(self):
"""
Block execution and wait for all tasks in the cache task update queue to complete.
Primarily used for testing.
"""
if self.is_server_process:
self.update_queue.join()
self.refresh_queue.join()
def get_all_components(self, platform: RuntimeProcessorType) -> List[Component]:
"""
Retrieve all components from component catalog cache
"""
components: List[Component] = []
catalogs = self._component_cache.get(platform.name, {})
for catalog_name, catalog_properties in catalogs.items():
components.extend(list(catalog_properties.get("components", {}).values()))
if not components and platform != RuntimeProcessorType.LOCAL:
self.log.error(f"No components could be found in any catalog for platform type '{platform.name}'.")
return components
def get_component(self, platform: RuntimeProcessorType, component_id: str) -> Optional[Component]:
"""
Retrieve the component with a given component_id from component catalog cache
"""
component: Optional[Component] = None
catalogs = self._component_cache.get(platform.name, {})
for catalog_name, catalog_properties in catalogs.items():
component = catalog_properties.get("components", {}).get(component_id)
if component:
break
if not component:
self.log.error(f"Component with ID '{component_id}' could not be found in any catalog.")
return component
def _load_catalog_reader_class(
self, catalog: ComponentCatalogMetadata, file_types: List[str]
) -> Optional[ComponentCatalogConnector]:
"""
Load the appropriate entrypoint class based on the schema name indicated in
the ComponentCatalogMetadata instance and the file types associated with the component
parser in use
"""
try:
catalog_reader = entrypoints.get_group_named("elyra.component.catalog_types").get(catalog.schema_name)
if not catalog_reader:
self.log.error(
f"No entrypoint with name '{catalog.schema_name}' was found in group "
f"'elyra.component.catalog_types' to match the 'schema_name' given in catalog "
f"'{catalog.display_name}'. Skipping..."
)
return None
catalog_reader = catalog_reader.load()(file_types, parent=self.parent)
except Exception as e:
self.log.error(f"Could not load appropriate ComponentCatalogConnector class: {e}. Skipping...")
return None
return catalog_reader
def read_component_catalog(self, catalog: ComponentCatalogMetadata) -> Dict[str, Component]:
"""
Read a component catalog and return a dictionary of components indexed by component_id.
:param catalog: a metadata instances from which to read and construct Component objects
:returns: a dictionary of component id to Component object for all read/parsed components
"""
components: Dict[str, Component] = {}
# Ensure referenced runtime is available
if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):
return components
# Assign component parser based on the runtime platform type
parser = ComponentParser.create_instance(platform=catalog.runtime_type)
# Assign reader based on the type of the catalog (the 'schema_name')
catalog_reader = self._load_catalog_reader_class(catalog, parser.file_types)
if not catalog_reader:
return components
# Get content of component definition file for each component in this catalog
self.log.debug(f"Processing components in catalog '{catalog.display_name}'")
catalog_entries = catalog_reader.read_component_definitions(catalog)
if not catalog_entries:
return components
for catalog_entry in catalog_entries:
# Parse the entry to get a fully qualified Component object
try:
parsed_components = parser.parse(catalog_entry) or []
except Exception as e:
self.log.warning(
f"Could not parse definition for component with identifying information: "
f"'{catalog_entry.entry_reference}' -> {str(e)}"
)
else:
for component in parsed_components:
components[component.id] = component
return components
@staticmethod
def get_generic_components() -> List[Component]:
return list(ComponentCache._generic_components.values())
@staticmethod
def get_generic_component(component_id: str) -> Optional[Component]:
return ComponentCache._generic_components.get(component_id)
@staticmethod
def get_generic_component_from_op(component_op: str) -> Optional[Component]:
for component in ComponentCache.get_generic_components():
if component.op == component_op:
return component
return None
@staticmethod
def get_generic_component_ops() -> List[str]:
return [component.op for component in ComponentCache.get_generic_components()]
@staticmethod
def | (template_name: str) -> Template:
"""
Loads the jinja template of the given name from the
elyra/templates/components folder
"""
loader = PackageLoader("elyra", "templates/components")
template_env = Environment(loader=loader)
template_env.policies["json.dumps_kwargs"] = {"sort_keys": False} # prevent automatic key sort on 'tojson'
return template_env.get_template(template_name)
@staticmethod
def to_canvas_palette(components: List[Component]) -> Dict:
"""
Converts catalog components into appropriate canvas palette format
"""
template = ComponentCache.load_jinja_template("canvas_palette_template.jinja2")
# Define a fallback category for components with no given categories
fallback_category_name = "No Category"
# Convert the list of all components into a dictionary of
# component lists keyed by category
category_to_components: Dict[str, List[Component]] = {}
for component in components:
categories = component.categories
# Assign a fallback category so that component is not
# lost during palette render
if not categories:
categories = [fallback_category_name]
for category in categories:
if category not in category_to_components.keys():
category_to_components[category] = []
if component.id not in [comp.id for comp in category_to_components[category]]:
category_to_components[category].append(component)
# Render template
canvas_palette = template.render(category_dict=category_to_components)
return json.loads(canvas_palette)
@staticmethod
def to_canvas_properties(component: Component) -> Dict:
"""
Converts catalog components into appropriate canvas properties format.
If component_id is one of the generic set, generic template is rendered,
otherwise, the runtime-specific property template is rendered.
"""
if ComponentCache.get_generic_component(component.id) is not None:
template = ComponentCache.load_jinja_template("generic_properties_template.jinja2")
else:
template = ComponentCache.load_jinja_template("canvas_properties_template.jinja2")
template_vars = {
"elyra_owned_properties": component.get_elyra_properties(),
"render_property_details": ComponentProperty.render_property_details,
}
template.globals.update(template_vars)
canvas_properties = template.render(component=component)
return json.loads(canvas_properties)
class ManifestFileChangeHandler(FileSystemEventHandler):
"""Watchdog handler that filters on .json files within specific metadata directories."""
def __init__(self, component_cache: ComponentCache, **kwargs):
super().__init__(**kwargs)
self.component_cache = component_cache
self.log = component_cache.log
def dispatch(self, event):
"""Dispatches delete and modification events pertaining to the manifest filename"""
if "elyra-component-manifest" in event.src_path:
super().dispatch(event)
def on_modified(self, event):
"""Fires when the component manifest file is modified."""
self.log.debug(f"ManifestFileChangeHandler: file '{event.src_path}' has been modified.")
manifest = self.component_cache._load_manifest(filename=event.src_path)
if manifest: # only update the manifest if there is work to do
for catalog, action in manifest.items():
self.log.debug(f"ManifestFileChangeHandler: inserting ({catalog},{action}) into update queue...")
if action == "delete":
# The metadata instance has already been deleted, so we must
# fabricate an instance that only consists of a catalog name
catalog_instance = ComponentCatalogMetadata(name=catalog)
else: # cache_action == 'modify':
# Fetch the catalog instance associated with this action
catalog_instance = MetadataManager(
schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID
).get(name=catalog)
self.component_cache.update(catalog=catalog_instance, action=action)
self.component_cache.update_manifest(filename=event.src_path) # clear the manifest
| load_jinja_template | identifier_name |
component_catalog.py | #
# Copyright 2018-2023 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from logging import Logger
import os
from pathlib import Path
from queue import Empty
from queue import Queue
from threading import Event
from threading import Thread
import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import entrypoints
from jinja2 import Environment
from jinja2 import PackageLoader
from jinja2 import Template
from jupyter_core.paths import jupyter_runtime_dir
from traitlets.config import SingletonConfigurable
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from elyra.metadata.manager import MetadataManager
from elyra.metadata.metadata import Metadata
from elyra.metadata.schemaspaces import ComponentCatalogs
from elyra.pipeline.catalog_connector import ComponentCatalogConnector
from elyra.pipeline.component import Component
from elyra.pipeline.component import ComponentParser
from elyra.pipeline.component_metadata import ComponentCatalogMetadata
from elyra.pipeline.properties import ComponentProperty
from elyra.pipeline.registry import PipelineProcessorRegistry
from elyra.pipeline.runtime_type import RuntimeProcessorType
BLOCKING_TIMEOUT = 0.5
NONBLOCKING_TIMEOUT = 0.10
# Issue warnings if catalog update takes longer than this value in seconds
CATALOG_UPDATE_TIMEOUT = int(os.getenv("ELYRA_CATALOG_UPDATE_TIMEOUT", 15))
# Issue warnings when outstanding worker thread counts exceed this value
WORKER_THREAD_WARNING_THRESHOLD = int(os.getenv("ELYRA_WORKER_THREAD_WARNING_THRESHOLD", 10))
# Define custom type to describe the component cache
ComponentCacheType = Dict[str, Dict[str, Dict[str, Dict[str, Union[Component, str, List[str]]]]]]
class RefreshInProgressError(Exception):
def __init__(self):
super().__init__("A catalog refresh is in progress. Try the request later.")
class RefreshQueue(Queue):
"""Entries are associated with a complete refresh of the Component Cache."""
_refreshing: bool
def __init__(self):
super().__init__()
self._refreshing = False
@property
def refreshing(self) -> bool:
return self._refreshing
@refreshing.setter
def refreshing(self, value: bool) -> None:
self._refreshing = value
def get(self, block: bool = True, timeout: Optional[float] = None):
"""Overrides the superclass method to set the refreshing property to false when empty."""
try:
entry = super().get(block=block, timeout=timeout)
except Empty:
self.refreshing = False
raise
return entry
def put(self, item, block=True, timeout=None):
"""Overrides the superclass method to set the refreshing property to true."""
super().put(item, block=block, timeout=timeout)
self.refreshing = True
class UpdateQueue(Queue):
"""Entries are associated with a single update of the Component Cache.
This class merely exists to distinguish it from the RefreshQueue instance.
"""
pass
class CacheUpdateManager(Thread):
"""
Primary thread for maintaining consistency of the component cache.
The component cache manager maintains the cache queue, whose entries are a
tuple of 'catalog' and 'action'. The 'catalog' is a catalog instance against
which the 'action' is applied. The 'action' is one of 'modify' or 'delete'.
For 'delete' the components of the referenced catalog are removed. For 'modify'
the components of the referenced catalog are inserted or updated (depending on
its prior existence).
"""
def __init__(
self, log: Logger, component_cache: ComponentCacheType, refresh_queue: RefreshQueue, update_queue: UpdateQueue
):
super().__init__()
self.daemon = True
self.name = "CacheUpdateManager"
self.log: Logger = log
self._component_cache: ComponentCacheType = component_cache
self._refresh_queue: RefreshQueue = refresh_queue
self._update_queue: UpdateQueue = update_queue
self._check_refresh_queue = False
self._threads: List[CacheUpdateWorker] = []
self.stop_event: Event = Event() # Set when server process stops
def run(self):
"""Process queue queue entries until server is stopped."""
while not self.stop_event.is_set():
self.manage_cache_tasks()
def manage_cache_tasks(self):
"""
Check the cache queue for a cache update action and start
a corresponding worker thread to complete the update
"""
outstanding_threads = self._has_outstanding_threads()
try:
# Get a task from the cache queue, waiting less if we have active threads.
timeout = NONBLOCKING_TIMEOUT if outstanding_threads else BLOCKING_TIMEOUT
# Toggle between refresh and update queues so as to prevent starvation.
self._check_refresh_queue = not self._check_refresh_queue
if self._check_refresh_queue:
catalog, action = self._refresh_queue.get(timeout=timeout)
else:
catalog, action = self._update_queue.get(timeout=timeout)
except Empty:
# No task exists in the cache queue, proceed to check for thread execution
pass
else:
# Create and start a thread for the task
updater_thread = CacheUpdateWorker(
self._component_cache,
self._refresh_queue if self._check_refresh_queue else self._update_queue,
catalog,
action,
)
updater_thread.start()
queue_clause = "refreshing" if self._check_refresh_queue else "updating"
self.log.debug(f"CacheUpdateWorker {queue_clause} catalog: '{updater_thread.name}', action: '{action}'...")
self._threads.append(updater_thread)
def _has_outstanding_threads(self) -> bool:
"""
Join finished threads and report on long-running threads as needed.
"""
outstanding_threads = False
for thread in self._threads:
# Attempt to join thread within the given amount of time
thread.join(timeout=NONBLOCKING_TIMEOUT)
cumulative_run_time = int(time.time() - thread.task_start_time)
if thread.is_alive():
# Thread is still running (thread join timed out)
outstanding_threads = True
# Report on a long-running thread if CATALOG_UPDATE_TIMEOUT is exceeded
time_since_last_check = int(time.time() - thread.last_warn_time)
if time_since_last_check > CATALOG_UPDATE_TIMEOUT:
thread.last_warn_time = time.time()
self.log.warning(
f"Cache update for catalog '{thread.name}' is still processing "
f"after {cumulative_run_time} seconds ..."
)
else:
self.log.debug(f"CacheUpdateWorker completed for catalog: '{thread.name}', action: '{thread.action}'.")
# Thread has been joined and can be removed from the list
self._threads.remove(thread)
# Mark cache task as complete
thread.queue.task_done()
# Report successful join for threads that have previously logged a
# cache update duration warning
if thread.last_warn_time != thread.task_start_time:
self.log.info(
f"Cache update for catalog '{thread.name}' has "
f"completed after {cumulative_run_time} seconds"
)
if len(self._threads) > WORKER_THREAD_WARNING_THRESHOLD:
self.log.warning(
f"CacheUpdateWorker outstanding threads threshold "
f"({WORKER_THREAD_WARNING_THRESHOLD}) has been exceeded. "
f"{len(self._threads)} threads are outstanding. This may "
f"indicate a possible issue."
)
return outstanding_threads
def is_refreshing(self) -> bool:
return self._refresh_queue.refreshing
def init_refresh(self) -> None:
self._refresh_queue.refreshing = True
def stop(self):
"""Trigger completion of the manager thread."""
self._refresh_queue.refreshing = False
self.stop_event.set()
self.log.debug("CacheUpdateManager stopped.")
class CacheUpdateWorker(Thread):
"""Spawned by the CacheUpdateManager to perform work against the component cache."""
def __init__(
self,
component_cache: ComponentCacheType,
queue: Queue,
catalog: ComponentCatalogMetadata,
action: Optional[str] = None,
):
super().__init__()
self.daemon = True
self.name = catalog.name # Let the name of the thread reflect the catalog being managed
self._component_cache: ComponentCacheType = component_cache
# Task-specific properties
self.queue: Queue = queue
self.catalog: ComponentCatalogMetadata = catalog
self.action: str = action
# Thread metadata
self.task_start_time = time.time()
self.last_warn_time = self.task_start_time
# Prepare component cache for modification
runtime_type = None
if self.catalog.metadata:
runtime_type = self.catalog.runtime_type.name
self.prepare_cache_for_catalog(runtime_type)
def run(self):
"""Apply the relative action to the given catalog entry in the cache."""
if self.action == "delete":
# Check all runtime types in cache for an entry of the given name.
# If found, remove only the components from this catalog
for runtime_type in self._component_cache:
if self.catalog.name in self._component_cache[runtime_type]:
self._component_cache[runtime_type].pop(self.catalog.name, None)
break
else: # 'modify' - replace (or add) components from the given catalog an update its status
runtime_type = self.catalog.runtime_type.name
catalog_state = self._component_cache[runtime_type][self.catalog.name].get("status")
try:
# Replace all components for the given catalog
self._component_cache[runtime_type][self.catalog.name][
"components"
] = ComponentCache.instance().read_component_catalog(self.catalog)
catalog_state["state"] = "current"
catalog_state["errors"] = [] # reset any errors that may have been present
except Exception as e:
# Update state with an 'error' action and the relevant message
catalog_state["state"] = "error"
catalog_state["errors"].append(str(e))
def prepare_cache_for_catalog(self, runtime_type: Optional[str] = None):
"""
Add entries to the component cache for the runtime type and/or catalog
of focus for this thread, and set the catalog state to 'updating'.
"""
if self.action == "delete":
# On 'delete' the runtime_type parameter will be None and since catalog names
# are essentially unique across runtime types, we can break out of this loop
# on first occurrence and let _that_ runtime type be used in the following code.
for runtime_type in self._component_cache:
if self.catalog.name in self._component_cache[runtime_type]:
break
# Add sub-dictionary for this runtime type if not present
if not self._component_cache.get(runtime_type):
self._component_cache[runtime_type] = {}
# Add sub-dictionary for this catalog if not present - this will occur when
# a catalog instance is created, so we're essentially adding a placeholder.
if not self._component_cache[runtime_type].get(self.catalog.name):
self._component_cache[runtime_type][self.catalog.name] = {
"components": {},
"status": {"state": "updating", "errors": []},
}
else: # Set state to 'updating' for an existing entry
self._component_cache[runtime_type][self.catalog.name]["status"]["state"] = "updating"
class ComponentCache(SingletonConfigurable):
"""Represents the cache of component definitions indexed by runtime-type, then by catalog name."""
# The component_cache is indexed at the top level by runtime type name, e.g. 'APACHE_AIRFLOW',
# and has as its value another dictionary. At the second level, each sub-dictionary is indexed by
# a ComponentCatalogMetadata instance name; its value is also a sub-dictionary. This sub-dictionary
# consists of two additional dictionaries: 1.) one with key "components" whose dictionary is
# indexed by component id and maps to the corresponding Component object, and 2.) one with key
# "status" and value of a final sub-dictionary with key-value pairs "state":"<current/updating/errors>"
# and "errors":["<error1>", "<error2>", ...] to dynamically indicate the status of this catalog instance
_component_cache: ComponentCacheType = {}
_generic_category_label = "Elyra"
_generic_components: Dict[str, Component] = {
"notebook": Component(
id="notebook",
name="Notebook",
description="Run notebook file",
op="execute-notebook-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".ipynb"],
categories=[_generic_category_label],
),
"python-script": Component(
id="python-script",
name="Python Script",
description="Run Python script",
op="execute-python-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".py"],
categories=[_generic_category_label],
),
"r-script": Component(
id="r-script",
name="R Script",
description="Run R script",
op="execute-r-node",
catalog_type="elyra",
component_reference="elyra",
extensions=[".r"],
categories=[_generic_category_label],
),
}
def __init__(self, **kwargs):
emulate_server_app: bool = kwargs.pop("emulate_server_app", False)
super().__init__(**kwargs)
self._component_cache = {}
self.is_server_process = ComponentCache._determine_server_process(emulate_server_app, **kwargs)
self.manifest_dir = jupyter_runtime_dir()
# Ensure queue attribute exists for non-server instances as well.
self.refresh_queue: Optional[RefreshQueue] = None
self.update_queue: Optional[UpdateQueue] = None
if self.is_server_process:
self.refresh_queue = RefreshQueue()
self.update_queue = UpdateQueue()
# Set up watchdog for manifest file for out-of-process updates
self.observer = Observer()
self.observer.schedule(ManifestFileChangeHandler(self), self.manifest_dir)
# Start a thread to manage updates to the component cache
manager = CacheUpdateManager(self.log, self._component_cache, self.refresh_queue, self.update_queue)
self.cache_manager = manager
self.cache_manager.start()
self.log.debug("CacheUpdateManager started...")
else:
self.manifest_filename = os.path.join(self.manifest_dir, f"elyra-component-manifest-{os.getpid()}.json")
@staticmethod
def _determine_server_process(emulate_server_app: bool, **kwargs) -> bool:
"""Determines if this process is a server (extension) process."""
app_names = ["ServerApp", "ElyraApp"]
is_server_process = False
if "parent" in kwargs and kwargs["parent"].__class__.__name__ in app_names:
is_server_process = True
elif emulate_server_app: # Used in unittests
is_server_process = True
return is_server_process
def load(self):
"""
Completes a series of actions during system startup, such as creating
the component manifest file and triggering the build of the component
cache for existing ComponentCatalog metadata instances.
""" | if self.is_server_process:
# Remove all existing manifest files from previous processes
self._remove_all_manifest_files()
# Start the watchdog if it's not alive, prevents redundant starts
if not self.observer.is_alive():
self.observer.start()
# Fetch all component catalog instances and trigger their add to the
# component cache if this is not already happening (it seems some server
# test fixtures could be loading the server extensions multiple times).
if not self.cache_manager.is_refreshing():
self.refresh()
def refresh(self):
"""Triggers a refresh of all catalogs in the component cache.
Raises RefreshInProgressError if a complete refresh is in progress.
Note that we do not preclude non-server processes from performing a
complete refresh. In such cases, each of the catalog entries will be
written to the manifest, which will be placed into the update queue.
As a result, non-server applications could by-pass the "refresh in progress"
constraint, but we're assuming a CLI application won't be as likely to
"pound" refresh like a UI application can.
"""
if self.is_server_process and self.cache_manager.is_refreshing():
raise RefreshInProgressError()
catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()
for catalog in catalogs:
self._insert_request(self.refresh_queue, catalog, "modify")
def update(self, catalog: Metadata, action: str):
"""
Triggers an update of the component cache for the given catalog name. If this is a non-server
process, the entry is written to the manifest file where it will be "processed" by the watchdog
and inserted into the component cache queue, otherwise we update the cache queue directly.
"""
self._insert_request(self.update_queue, catalog, action)
def _insert_request(self, queue: Queue, catalog: ComponentCatalogMetadata, action: str):
"""
If running as a server process, the request is submitted to the desired queue, otherwise
it is posted to the manifest where the server process (if running) can detect the manifest
file update and send the request to the update queue.
Note that any calls to ComponentCache.refresh() from non-server processes will still
perform the refresh, but via the update queue rather than the refresh queue. We could,
instead, raise NotImplementedError in such cases, but we may want the ability to refresh
the entire component cache from a CLI utility and the current implementation would allow that.
"""
# Ensure referenced runtime is available
if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):
return
if self.is_server_process:
queue.put((catalog, action))
else:
manifest: Dict[str, str] = self._load_manifest()
manifest[catalog.name] = action
self.update_manifest(manifest=manifest)
def _remove_all_manifest_files(self):
"""
Remove all existing manifest files in the Jupyter runtimes directory.
"""
manifest_files = Path(self.manifest_dir).glob("**/elyra-component-manifest-*.json")
for file in manifest_files:
os.remove(str(file))
def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:
"""Read and return the contents of a manifest file.
If 'filename' is not provided, this process's manifest file will be read.
"""
filename = filename or self.manifest_filename
if not os.path.isfile(filename):
self.log.debug(f"Manifest file '{filename}' doesn't exist and will be created.")
return {}
with open(filename, "r") as f:
manifest: Dict[str, str] = json.load(f)
self.log.debug(f"Reading manifest '{manifest}' from file '{filename}'")
return manifest
def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:
"""Update the manifest file with the given entry."""
filename = filename or self.manifest_filename
manifest = manifest or {}
self.log.debug(f"Updating manifest '{manifest}' to file '{filename}'")
with open(filename, "w") as f:
json.dump(manifest, f, indent=2)
def wait_for_all_cache_tasks(self):
"""
Block execution and wait for all tasks in the cache task update queue to complete.
Primarily used for testing.
"""
if self.is_server_process:
self.update_queue.join()
self.refresh_queue.join()
def get_all_components(self, platform: RuntimeProcessorType) -> List[Component]:
"""
Retrieve all components from component catalog cache
"""
components: List[Component] = []
catalogs = self._component_cache.get(platform.name, {})
for catalog_name, catalog_properties in catalogs.items():
components.extend(list(catalog_properties.get("components", {}).values()))
if not components and platform != RuntimeProcessorType.LOCAL:
self.log.error(f"No components could be found in any catalog for platform type '{platform.name}'.")
return components
def get_component(self, platform: RuntimeProcessorType, component_id: str) -> Optional[Component]:
"""
Retrieve the component with a given component_id from component catalog cache
"""
component: Optional[Component] = None
catalogs = self._component_cache.get(platform.name, {})
for catalog_name, catalog_properties in catalogs.items():
component = catalog_properties.get("components", {}).get(component_id)
if component:
break
if not component:
self.log.error(f"Component with ID '{component_id}' could not be found in any catalog.")
return component
def _load_catalog_reader_class(
self, catalog: ComponentCatalogMetadata, file_types: List[str]
) -> Optional[ComponentCatalogConnector]:
"""
Load the appropriate entrypoint class based on the schema name indicated in
the ComponentCatalogMetadata instance and the file types associated with the component
parser in use
"""
try:
catalog_reader = entrypoints.get_group_named("elyra.component.catalog_types").get(catalog.schema_name)
if not catalog_reader:
self.log.error(
f"No entrypoint with name '{catalog.schema_name}' was found in group "
f"'elyra.component.catalog_types' to match the 'schema_name' given in catalog "
f"'{catalog.display_name}'. Skipping..."
)
return None
catalog_reader = catalog_reader.load()(file_types, parent=self.parent)
except Exception as e:
self.log.error(f"Could not load appropriate ComponentCatalogConnector class: {e}. Skipping...")
return None
return catalog_reader
def read_component_catalog(self, catalog: ComponentCatalogMetadata) -> Dict[str, Component]:
"""
Read a component catalog and return a dictionary of components indexed by component_id.
:param catalog: a metadata instances from which to read and construct Component objects
:returns: a dictionary of component id to Component object for all read/parsed components
"""
components: Dict[str, Component] = {}
# Ensure referenced runtime is available
if not PipelineProcessorRegistry.instance().is_valid_runtime_type(catalog.runtime_type.name):
return components
# Assign component parser based on the runtime platform type
parser = ComponentParser.create_instance(platform=catalog.runtime_type)
# Assign reader based on the type of the catalog (the 'schema_name')
catalog_reader = self._load_catalog_reader_class(catalog, parser.file_types)
if not catalog_reader:
return components
# Get content of component definition file for each component in this catalog
self.log.debug(f"Processing components in catalog '{catalog.display_name}'")
catalog_entries = catalog_reader.read_component_definitions(catalog)
if not catalog_entries:
return components
for catalog_entry in catalog_entries:
# Parse the entry to get a fully qualified Component object
try:
parsed_components = parser.parse(catalog_entry) or []
except Exception as e:
self.log.warning(
f"Could not parse definition for component with identifying information: "
f"'{catalog_entry.entry_reference}' -> {str(e)}"
)
else:
for component in parsed_components:
components[component.id] = component
return components
@staticmethod
def get_generic_components() -> List[Component]:
return list(ComponentCache._generic_components.values())
@staticmethod
def get_generic_component(component_id: str) -> Optional[Component]:
return ComponentCache._generic_components.get(component_id)
@staticmethod
def get_generic_component_from_op(component_op: str) -> Optional[Component]:
for component in ComponentCache.get_generic_components():
if component.op == component_op:
return component
return None
@staticmethod
def get_generic_component_ops() -> List[str]:
return [component.op for component in ComponentCache.get_generic_components()]
@staticmethod
def load_jinja_template(template_name: str) -> Template:
"""
Loads the jinja template of the given name from the
elyra/templates/components folder
"""
loader = PackageLoader("elyra", "templates/components")
template_env = Environment(loader=loader)
template_env.policies["json.dumps_kwargs"] = {"sort_keys": False} # prevent automatic key sort on 'tojson'
return template_env.get_template(template_name)
@staticmethod
def to_canvas_palette(components: List[Component]) -> Dict:
"""
Converts catalog components into appropriate canvas palette format
"""
template = ComponentCache.load_jinja_template("canvas_palette_template.jinja2")
# Define a fallback category for components with no given categories
fallback_category_name = "No Category"
# Convert the list of all components into a dictionary of
# component lists keyed by category
category_to_components: Dict[str, List[Component]] = {}
for component in components:
categories = component.categories
# Assign a fallback category so that component is not
# lost during palette render
if not categories:
categories = [fallback_category_name]
for category in categories:
if category not in category_to_components.keys():
category_to_components[category] = []
if component.id not in [comp.id for comp in category_to_components[category]]:
category_to_components[category].append(component)
# Render template
canvas_palette = template.render(category_dict=category_to_components)
return json.loads(canvas_palette)
@staticmethod
def to_canvas_properties(component: Component) -> Dict:
"""
Converts catalog components into appropriate canvas properties format.
If component_id is one of the generic set, generic template is rendered,
otherwise, the runtime-specific property template is rendered.
"""
if ComponentCache.get_generic_component(component.id) is not None:
template = ComponentCache.load_jinja_template("generic_properties_template.jinja2")
else:
template = ComponentCache.load_jinja_template("canvas_properties_template.jinja2")
template_vars = {
"elyra_owned_properties": component.get_elyra_properties(),
"render_property_details": ComponentProperty.render_property_details,
}
template.globals.update(template_vars)
canvas_properties = template.render(component=component)
return json.loads(canvas_properties)
class ManifestFileChangeHandler(FileSystemEventHandler):
"""Watchdog handler that filters on .json files within specific metadata directories."""
def __init__(self, component_cache: ComponentCache, **kwargs):
super().__init__(**kwargs)
self.component_cache = component_cache
self.log = component_cache.log
def dispatch(self, event):
"""Dispatches delete and modification events pertaining to the manifest filename"""
if "elyra-component-manifest" in event.src_path:
super().dispatch(event)
def on_modified(self, event):
"""Fires when the component manifest file is modified."""
self.log.debug(f"ManifestFileChangeHandler: file '{event.src_path}' has been modified.")
manifest = self.component_cache._load_manifest(filename=event.src_path)
if manifest: # only update the manifest if there is work to do
for catalog, action in manifest.items():
self.log.debug(f"ManifestFileChangeHandler: inserting ({catalog},{action}) into update queue...")
if action == "delete":
# The metadata instance has already been deleted, so we must
# fabricate an instance that only consists of a catalog name
catalog_instance = ComponentCatalogMetadata(name=catalog)
else: # cache_action == 'modify':
# Fetch the catalog instance associated with this action
catalog_instance = MetadataManager(
schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID
).get(name=catalog)
self.component_cache.update(catalog=catalog_instance, action=action)
self.component_cache.update_manifest(filename=event.src_path) # clear the manifest | # Proceed only if singleton instance has been created
if self.initialized:
# The cache manager will work on manifest and cache tasks on an
# in-process basis as load() is only called during startup from
# the server process. | random_line_split |
run.py | #!/usr/bin/env python
from time import sleep
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.debug("Setup logging")
from miniboa import TelnetServer
from . import state
PRODUCT_NAME = "iMyFace"
AUTH_DB = {}
SERVER_RUN = True
AUTH_RETRY = 2
def on_connect(client):
""" handler for new connections
"""
logging.info("Opened connection to %s" % client.addrport() )
state.set_client_list(client)
state.initialize_client_state(client)
client.send("")
client.send("Welcome to the %s Server, %s.\n" % (PRODUCT_NAME, client.addrport()) )
client.send("Enter your user_id, or type \"enroll\" to create a new account: ")
def on_disconnect(client):
|
def process_clients():
"""
Check each client, if client.cmd_ready == True then there is a line of
input available via client.get_command().
"""
for client in state.CLIENT_LIST:
if client.active and client.cmd_ready:
logging.debug("Found a message, processing...")
msg_processor(client)
def _enroll(client, msg):
current_state = CLIENT_STATE[client]['auth_status']
def get_user():
client.send("New username: ")
CLIENT_STATE[client]['auth_status'] = 'enroll_user'
def enroll_user():
if not AUTH_DB.has_key(msg):
CLIENT_STATE[client]['user_id'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_pass'
client.send("New password: ")
client.password_mode_on()
else:
client.send("%s already taken, try something else: " % msg)
CLIENT_STATE[client]['auth_status'] = 'enroll_user'
def enroll_pass():
CLIENT_STATE[client]['pass1'] = msg
client.send("Please type your password again: ")
CLIENT_STATE[client]['auth_status'] = 'enroll_pass2'
def enroll_pass2():
if CLIENT_STATE[client]['pass1'] == msg:
client.password_mode_off()
logging.debug("New password confirmed.")
CLIENT_STATE[client]['temp_password'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_first_name'
client.send("\nWhat is your first name: ")
else:
logging.debug("Running target state %s in enroll module" % current_state)
CLIENT_STATE[client]['auth_status'] = 'enroll_pass'
def enroll_first_name():
if msg is not None or msg is not "":
CLIENT_STATE[client]['first_name'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_last_name'
client.send("What is your last name: ")
def enroll_last_name():
if msg is not None or msg is not "":
CLIENT_STATE[client]['last_name'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_save'
fn = CLIENT_STATE[client]['first_name']
ln = CLIENT_STATE[client]['last_name']
client.send("\nAbout to create new user: %s %s" % (fn, ln))
client.send("\nType yes, and hit enter to continue: ")
def enroll_save():
if not msg.lower() == 'yes':
CLIENT_STATE[client]['auth_status'] = 'startup'
client.active = False
return
user_id = CLIENT_STATE[client]['user_id']
password = CLIENT_STATE[client]['temp_password']
first_name = CLIENT_STATE[client]['first_name']
last_name = CLIENT_STATE[client]['last_name']
AUTH_DB[user_id] = {'password': password, 'first_name': first_name, 'last_name': last_name}
# cleanup password session vars
# del(CLIENT_STATE[client]['temp_password'])
# del(CLIENT_STATE[client]['pass1'])
logging.debug("Saved user_id and password in auth_db")
client.send("Your account has been created. \n")
client.send("Please enter your new username: ")
CLIENT_STATE[client]['auth_status'] = 'startup'
process_clients()
cmds = {'enroll_start': get_user,
'enroll_user': enroll_user,
'enroll_pass': enroll_pass,
'enroll_pass2': enroll_pass2,
'enroll_first_name': enroll_first_name,
'enroll_last_name': enroll_last_name,
'enroll_save': enroll_save,
}
if cmds.has_key(current_state):
logging.debug("Running target state %s in enroll module" % current_state)
cmds[current_state]()
else:
logging.warn("Can not find target state in enroll module")
def _set_user_id(client, msg):
logging.debug("_set_user_id got message: " + msg)
if msg == 'enroll':
CLIENT_STATE[client]['auth_status'] = 'enroll_start'
_enroll(client, msg)
else:
CLIENT_STATE[client]['user_id'] = msg
logging.debug("Client set user_id to: " + msg)
# next step:
CLIENT_STATE[client]['auth_status'] = 'set_pass'
client.password_mode_on()
client.send("Enter your password: ")
process_clients()
def _set_password(client, msg):
CLIENT_STATE[client]['password'] = msg
client.password_mode_off()
logging.debug("Client set password to: " + msg)
CLIENT_STATE[client]['auth_status'] = 'auth_lookup'
msg_processor(client)
def _auth_lookup(client, msg):
def login_failed():
client.send("\nUser_id or password incorrect, enter your user_id again: ")
CLIENT_STATE[client]['auth_retry'] += 1
CLIENT_STATE[client]['auth_status'] = 'startup'
user_id = CLIENT_STATE[client]['user_id']
client_password = CLIENT_STATE[client]['password']
if AUTH_DB.has_key(user_id):
if AUTH_DB[user_id]['password'] == client_password:
logging.debug("auth_db lookup success for: " + user_id)
CLIENT_STATE[client]['auth_status'] = 'auth_success'
broadcast(client, '%s just connected.\n' % user_id )
msg_processor(client)
else:
login_failed()
else:
login_failed()
def auth(client, msg):
# msg = str(client.get_command())
logging.debug('Auth for %s:%s' % (client.addrport(), msg))
auth_status = CLIENT_STATE[client].get('auth_status', 'startup')
logging.debug("The auth status for %s is %s" % (str(client), auth_status))
command_dict = {'startup': _set_user_id,
'set_pass': _set_password,
'auth_lookup': _auth_lookup,
'enroll_start': _enroll,
'enroll_user': _enroll,
'enroll_pass': _enroll,
'enroll_pass2': _enroll,
'enroll_first_name': _enroll,
'enroll_last_name': _enroll,
'enroll_save': _enroll,
}
if auth_status == 'auth_success':
return True
else:
cmd = command_dict[auth_status]
logging.debug("Running auth function: " + str(cmd))
cmd(client, msg)
def broadcast(client, msg):
"""
Send msg to every client.
"""
for client_target in CLIENT_LIST:
if client_target != client:
client_target.send(msg)
def _whos_online(client, msg):
client.send("\nList of people who are currently online:\n")
for client_name in CLIENT_LIST:
user_id = CLIENT_STATE[client_name]['user_id']
first_name = AUTH_DB[user_id]['first_name']
last_name = AUTH_DB[user_id]['last_name']
client.send("\n-")
client.send(" ".join([first_name, last_name]))
client.send("\n\n")
def _new_post(client, msg):
pass
def _facedup(client, msg):
pass
def _inmyface(client, msg):
pass
def _outtamyface(client, msg):
pass
def _about_me(client, msg):
user_id = CLIENT_STATE[client]['user_id']
fn = AUTH_DB[user_id]['first_name']
ln = AUTH_DB[user_id]['last_name']
client.send("\nYour user_id is: " + user_id)
client.send("\nYour Name is: %s %s" % (fn, ln))
client.send("\n")
def _read_posts(client, msg):
pass
def _quit(client, msg):
client.active = False
def _draw_main_menu(client, commands):
"""pass the client, and the list of commands, to draw the main menu page
"""
client.send("\n")
client.send_wrapped("~=" * 20)
client.send(" " * 16 +"Welcome to " + PRODUCT_NAME + "\n")
client.send_wrapped("~=" * 20)
client.send("\n")
client.send(" | ".join(commands))
client.send("\n")
def _main_menu(client, msg):
logging.debug("Client %s at main menu" % (CLIENT_STATE[client]['user_id']))
commands = {'whos_online':_whos_online,
'new_post':_new_post,
'facedup': _facedup,
'inmyface': _inmyface,
'outtamyface': _outtamyface,
'about_me': _about_me,
'read_posts': _read_posts,
'quit': _quit,
}
_draw_main_menu(client, commands.keys())
if commands.has_key(msg):
cmd = commands.get(msg)
cmd(client, msg)
else:
client.send("Please enter a command: ")
def msg_processor(client):
"""
"""
global SERVER_RUN
msg = client.get_command()
logging.debug('%s says, "%s"' % (client.addrport(), msg))
if msg == "":
return
if msg == 'debug':
logging.debug(str(state.CLIENT_STATE))
logging.debug(str(AUTH_DB))
logging.debug(str(state.CLIENT_LIST))
return
if not CLIENT_STATE[client].has_key('auth_retry'):
CLIENT_STATE[client]['auth_retry'] = 0
if auth(client, msg):
logging.info("Client %s logged in." % (CLIENT_STATE[client]['user_id']))
_main_menu(client, msg)
else:
logging.debug("Client not logged in")
if CLIENT_STATE[client]['auth_retry'] > AUTH_RETRY:
logging.debug("Kicked %s for too many login attempts." % (client.addrport()))
client.active = False
# for guest in CLIENT_LIST:
# if guest != client:
# guest.send('%s says, %s\n' % (client.addrport(), msg))
# else:
# guest.send('You say, %s\n' % msg)
# ## bye = disconnect
# if cmd == 'bye':
# client.active = False
# ## shutdown == stop the server
# elif cmd == 'shutdown':
# SERVER_RUN = False
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
if __name__ == '__main__':
telnet_server = TelnetServer(
port=7777,
address='',
on_connect=on_connect,
on_disconnect=on_disconnect,
timeout = .05
)
logging.info("Listening for connections on port %d. CTRL-C to break."
% telnet_server.port)
## Server Loop
while SERVER_RUN:
telnet_server.poll() ## Send, Recv, and look for new connections
state.kick_idle() ## Check for idle clients
process_clients() ## Check for client input
sleep(0.1)
logging.info(">> Server shutdown.")
| """ lost, or disconnected clients
"""
logging.info("Lost connection to %s" % client.addrport() )
state.prune_client_state(client)
state.prune_client_list(client)
#broadcast('%s leaves the conversation.\n' % client.addrport() ) | identifier_body |
run.py | #!/usr/bin/env python
from time import sleep
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.debug("Setup logging")
from miniboa import TelnetServer
from . import state
PRODUCT_NAME = "iMyFace"
AUTH_DB = {}
SERVER_RUN = True
AUTH_RETRY = 2
def on_connect(client):
""" handler for new connections
"""
logging.info("Opened connection to %s" % client.addrport() )
state.set_client_list(client)
state.initialize_client_state(client)
client.send("")
client.send("Welcome to the %s Server, %s.\n" % (PRODUCT_NAME, client.addrport()) )
client.send("Enter your user_id, or type \"enroll\" to create a new account: ")
def on_disconnect(client):
""" lost, or disconnected clients
"""
logging.info("Lost connection to %s" % client.addrport() )
state.prune_client_state(client)
state.prune_client_list(client)
#broadcast('%s leaves the conversation.\n' % client.addrport() )
def process_clients():
"""
Check each client, if client.cmd_ready == True then there is a line of
input available via client.get_command().
"""
for client in state.CLIENT_LIST:
if client.active and client.cmd_ready:
logging.debug("Found a message, processing...")
msg_processor(client)
def _enroll(client, msg):
current_state = CLIENT_STATE[client]['auth_status']
def get_user():
client.send("New username: ")
CLIENT_STATE[client]['auth_status'] = 'enroll_user'
def enroll_user():
if not AUTH_DB.has_key(msg):
CLIENT_STATE[client]['user_id'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_pass'
client.send("New password: ")
client.password_mode_on()
else:
client.send("%s already taken, try something else: " % msg)
CLIENT_STATE[client]['auth_status'] = 'enroll_user'
def enroll_pass():
CLIENT_STATE[client]['pass1'] = msg
client.send("Please type your password again: ")
CLIENT_STATE[client]['auth_status'] = 'enroll_pass2'
def enroll_pass2():
if CLIENT_STATE[client]['pass1'] == msg:
client.password_mode_off()
logging.debug("New password confirmed.")
CLIENT_STATE[client]['temp_password'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_first_name'
client.send("\nWhat is your first name: ")
else:
logging.debug("Running target state %s in enroll module" % current_state)
CLIENT_STATE[client]['auth_status'] = 'enroll_pass'
def enroll_first_name():
if msg is not None or msg is not "":
CLIENT_STATE[client]['first_name'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_last_name'
client.send("What is your last name: ")
def enroll_last_name():
if msg is not None or msg is not "":
CLIENT_STATE[client]['last_name'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_save'
fn = CLIENT_STATE[client]['first_name']
ln = CLIENT_STATE[client]['last_name']
client.send("\nAbout to create new user: %s %s" % (fn, ln))
client.send("\nType yes, and hit enter to continue: ")
def enroll_save():
if not msg.lower() == 'yes':
CLIENT_STATE[client]['auth_status'] = 'startup'
client.active = False
return
user_id = CLIENT_STATE[client]['user_id']
password = CLIENT_STATE[client]['temp_password']
first_name = CLIENT_STATE[client]['first_name']
last_name = CLIENT_STATE[client]['last_name']
AUTH_DB[user_id] = {'password': password, 'first_name': first_name, 'last_name': last_name}
# cleanup password session vars
# del(CLIENT_STATE[client]['temp_password'])
# del(CLIENT_STATE[client]['pass1'])
logging.debug("Saved user_id and password in auth_db")
client.send("Your account has been created. \n")
client.send("Please enter your new username: ")
CLIENT_STATE[client]['auth_status'] = 'startup'
process_clients()
cmds = {'enroll_start': get_user,
'enroll_user': enroll_user,
'enroll_pass': enroll_pass,
'enroll_pass2': enroll_pass2,
'enroll_first_name': enroll_first_name,
'enroll_last_name': enroll_last_name,
'enroll_save': enroll_save,
}
if cmds.has_key(current_state):
logging.debug("Running target state %s in enroll module" % current_state)
cmds[current_state]()
else:
logging.warn("Can not find target state in enroll module")
def _set_user_id(client, msg):
logging.debug("_set_user_id got message: " + msg)
if msg == 'enroll':
CLIENT_STATE[client]['auth_status'] = 'enroll_start'
_enroll(client, msg)
else:
CLIENT_STATE[client]['user_id'] = msg
logging.debug("Client set user_id to: " + msg)
# next step:
CLIENT_STATE[client]['auth_status'] = 'set_pass'
client.password_mode_on()
client.send("Enter your password: ")
process_clients()
def _set_password(client, msg):
CLIENT_STATE[client]['password'] = msg
client.password_mode_off()
logging.debug("Client set password to: " + msg)
CLIENT_STATE[client]['auth_status'] = 'auth_lookup'
msg_processor(client)
def _auth_lookup(client, msg):
def login_failed():
client.send("\nUser_id or password incorrect, enter your user_id again: ")
CLIENT_STATE[client]['auth_retry'] += 1
CLIENT_STATE[client]['auth_status'] = 'startup'
user_id = CLIENT_STATE[client]['user_id']
client_password = CLIENT_STATE[client]['password']
if AUTH_DB.has_key(user_id):
if AUTH_DB[user_id]['password'] == client_password:
logging.debug("auth_db lookup success for: " + user_id)
CLIENT_STATE[client]['auth_status'] = 'auth_success'
broadcast(client, '%s just connected.\n' % user_id )
msg_processor(client)
else:
login_failed()
else:
login_failed()
def auth(client, msg):
# msg = str(client.get_command())
logging.debug('Auth for %s:%s' % (client.addrport(), msg))
auth_status = CLIENT_STATE[client].get('auth_status', 'startup')
logging.debug("The auth status for %s is %s" % (str(client), auth_status))
command_dict = {'startup': _set_user_id,
'set_pass': _set_password,
'auth_lookup': _auth_lookup,
'enroll_start': _enroll,
'enroll_user': _enroll,
'enroll_pass': _enroll,
'enroll_pass2': _enroll,
'enroll_first_name': _enroll,
'enroll_last_name': _enroll,
'enroll_save': _enroll,
}
if auth_status == 'auth_success':
return True
else:
cmd = command_dict[auth_status]
logging.debug("Running auth function: " + str(cmd))
cmd(client, msg)
def broadcast(client, msg):
"""
Send msg to every client.
"""
for client_target in CLIENT_LIST:
if client_target != client:
client_target.send(msg)
def _whos_online(client, msg):
client.send("\nList of people who are currently online:\n")
for client_name in CLIENT_LIST:
user_id = CLIENT_STATE[client_name]['user_id']
first_name = AUTH_DB[user_id]['first_name']
last_name = AUTH_DB[user_id]['last_name']
client.send("\n-")
client.send(" ".join([first_name, last_name]))
client.send("\n\n")
def _new_post(client, msg):
pass
def _facedup(client, msg):
pass
def _inmyface(client, msg):
pass
def _outtamyface(client, msg):
pass
def _about_me(client, msg):
user_id = CLIENT_STATE[client]['user_id']
fn = AUTH_DB[user_id]['first_name']
ln = AUTH_DB[user_id]['last_name']
client.send("\nYour user_id is: " + user_id)
client.send("\nYour Name is: %s %s" % (fn, ln))
client.send("\n")
def _read_posts(client, msg):
pass
def _quit(client, msg):
client.active = False
def _draw_main_menu(client, commands):
"""pass the client, and the list of commands, to draw the main menu page
"""
client.send("\n")
client.send_wrapped("~=" * 20)
client.send(" " * 16 +"Welcome to " + PRODUCT_NAME + "\n")
client.send_wrapped("~=" * 20)
client.send("\n")
client.send(" | ".join(commands))
client.send("\n")
def _main_menu(client, msg):
logging.debug("Client %s at main menu" % (CLIENT_STATE[client]['user_id']))
commands = {'whos_online':_whos_online,
'new_post':_new_post,
'facedup': _facedup,
'inmyface': _inmyface,
'outtamyface': _outtamyface,
'about_me': _about_me,
'read_posts': _read_posts,
'quit': _quit,
}
_draw_main_menu(client, commands.keys())
if commands.has_key(msg):
cmd = commands.get(msg)
cmd(client, msg)
else:
client.send("Please enter a command: ")
def msg_processor(client):
"""
"""
global SERVER_RUN
msg = client.get_command()
logging.debug('%s says, "%s"' % (client.addrport(), msg))
if msg == "":
return
if msg == 'debug':
|
if not CLIENT_STATE[client].has_key('auth_retry'):
CLIENT_STATE[client]['auth_retry'] = 0
if auth(client, msg):
logging.info("Client %s logged in." % (CLIENT_STATE[client]['user_id']))
_main_menu(client, msg)
else:
logging.debug("Client not logged in")
if CLIENT_STATE[client]['auth_retry'] > AUTH_RETRY:
logging.debug("Kicked %s for too many login attempts." % (client.addrport()))
client.active = False
# for guest in CLIENT_LIST:
# if guest != client:
# guest.send('%s says, %s\n' % (client.addrport(), msg))
# else:
# guest.send('You say, %s\n' % msg)
# ## bye = disconnect
# if cmd == 'bye':
# client.active = False
# ## shutdown == stop the server
# elif cmd == 'shutdown':
# SERVER_RUN = False
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
if __name__ == '__main__':
telnet_server = TelnetServer(
port=7777,
address='',
on_connect=on_connect,
on_disconnect=on_disconnect,
timeout = .05
)
logging.info("Listening for connections on port %d. CTRL-C to break."
% telnet_server.port)
## Server Loop
while SERVER_RUN:
telnet_server.poll() ## Send, Recv, and look for new connections
state.kick_idle() ## Check for idle clients
process_clients() ## Check for client input
sleep(0.1)
logging.info(">> Server shutdown.")
| logging.debug(str(state.CLIENT_STATE))
logging.debug(str(AUTH_DB))
logging.debug(str(state.CLIENT_LIST))
return | conditional_block |
run.py | #!/usr/bin/env python
from time import sleep
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.debug("Setup logging")
from miniboa import TelnetServer
from . import state
PRODUCT_NAME = "iMyFace"
AUTH_DB = {}
SERVER_RUN = True
AUTH_RETRY = 2
def on_connect(client):
""" handler for new connections
"""
logging.info("Opened connection to %s" % client.addrport() )
state.set_client_list(client)
state.initialize_client_state(client)
client.send("")
client.send("Welcome to the %s Server, %s.\n" % (PRODUCT_NAME, client.addrport()) )
client.send("Enter your user_id, or type \"enroll\" to create a new account: ")
def on_disconnect(client):
""" lost, or disconnected clients
"""
logging.info("Lost connection to %s" % client.addrport() )
state.prune_client_state(client)
state.prune_client_list(client)
#broadcast('%s leaves the conversation.\n' % client.addrport() )
def process_clients():
"""
Check each client, if client.cmd_ready == True then there is a line of
input available via client.get_command().
"""
for client in state.CLIENT_LIST:
if client.active and client.cmd_ready:
logging.debug("Found a message, processing...")
msg_processor(client)
def _enroll(client, msg):
current_state = CLIENT_STATE[client]['auth_status']
def get_user():
client.send("New username: ")
CLIENT_STATE[client]['auth_status'] = 'enroll_user'
def enroll_user():
if not AUTH_DB.has_key(msg):
CLIENT_STATE[client]['user_id'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_pass'
client.send("New password: ")
client.password_mode_on()
else:
client.send("%s already taken, try something else: " % msg)
CLIENT_STATE[client]['auth_status'] = 'enroll_user'
def enroll_pass():
CLIENT_STATE[client]['pass1'] = msg
client.send("Please type your password again: ")
CLIENT_STATE[client]['auth_status'] = 'enroll_pass2'
def enroll_pass2():
if CLIENT_STATE[client]['pass1'] == msg:
client.password_mode_off()
logging.debug("New password confirmed.")
CLIENT_STATE[client]['temp_password'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_first_name'
client.send("\nWhat is your first name: ")
else:
logging.debug("Running target state %s in enroll module" % current_state)
CLIENT_STATE[client]['auth_status'] = 'enroll_pass'
def enroll_first_name():
if msg is not None or msg is not "":
CLIENT_STATE[client]['first_name'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_last_name'
client.send("What is your last name: ")
def enroll_last_name():
if msg is not None or msg is not "":
CLIENT_STATE[client]['last_name'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_save'
fn = CLIENT_STATE[client]['first_name']
ln = CLIENT_STATE[client]['last_name']
client.send("\nAbout to create new user: %s %s" % (fn, ln))
client.send("\nType yes, and hit enter to continue: ")
def enroll_save():
if not msg.lower() == 'yes':
CLIENT_STATE[client]['auth_status'] = 'startup'
client.active = False
return
user_id = CLIENT_STATE[client]['user_id']
password = CLIENT_STATE[client]['temp_password']
first_name = CLIENT_STATE[client]['first_name']
last_name = CLIENT_STATE[client]['last_name']
AUTH_DB[user_id] = {'password': password, 'first_name': first_name, 'last_name': last_name}
# cleanup password session vars
# del(CLIENT_STATE[client]['temp_password'])
# del(CLIENT_STATE[client]['pass1'])
logging.debug("Saved user_id and password in auth_db")
client.send("Your account has been created. \n")
client.send("Please enter your new username: ")
CLIENT_STATE[client]['auth_status'] = 'startup'
process_clients()
cmds = {'enroll_start': get_user,
'enroll_user': enroll_user,
'enroll_pass': enroll_pass,
'enroll_pass2': enroll_pass2,
'enroll_first_name': enroll_first_name,
'enroll_last_name': enroll_last_name,
'enroll_save': enroll_save,
}
if cmds.has_key(current_state):
logging.debug("Running target state %s in enroll module" % current_state)
cmds[current_state]()
else:
logging.warn("Can not find target state in enroll module")
def _set_user_id(client, msg):
logging.debug("_set_user_id got message: " + msg)
if msg == 'enroll':
CLIENT_STATE[client]['auth_status'] = 'enroll_start'
_enroll(client, msg)
else:
CLIENT_STATE[client]['user_id'] = msg
logging.debug("Client set user_id to: " + msg)
# next step:
CLIENT_STATE[client]['auth_status'] = 'set_pass'
client.password_mode_on()
client.send("Enter your password: ")
process_clients()
def _set_password(client, msg):
CLIENT_STATE[client]['password'] = msg
client.password_mode_off()
logging.debug("Client set password to: " + msg)
CLIENT_STATE[client]['auth_status'] = 'auth_lookup'
msg_processor(client)
def _auth_lookup(client, msg):
def login_failed():
client.send("\nUser_id or password incorrect, enter your user_id again: ")
CLIENT_STATE[client]['auth_retry'] += 1
CLIENT_STATE[client]['auth_status'] = 'startup'
user_id = CLIENT_STATE[client]['user_id']
client_password = CLIENT_STATE[client]['password']
if AUTH_DB.has_key(user_id):
if AUTH_DB[user_id]['password'] == client_password:
logging.debug("auth_db lookup success for: " + user_id)
CLIENT_STATE[client]['auth_status'] = 'auth_success'
broadcast(client, '%s just connected.\n' % user_id )
msg_processor(client)
else:
login_failed()
else:
login_failed()
def auth(client, msg):
# msg = str(client.get_command())
logging.debug('Auth for %s:%s' % (client.addrport(), msg))
auth_status = CLIENT_STATE[client].get('auth_status', 'startup')
logging.debug("The auth status for %s is %s" % (str(client), auth_status))
command_dict = {'startup': _set_user_id,
'set_pass': _set_password,
'auth_lookup': _auth_lookup,
'enroll_start': _enroll,
'enroll_user': _enroll,
'enroll_pass': _enroll,
'enroll_pass2': _enroll,
'enroll_first_name': _enroll,
'enroll_last_name': _enroll,
'enroll_save': _enroll,
}
if auth_status == 'auth_success':
return True
else:
cmd = command_dict[auth_status]
logging.debug("Running auth function: " + str(cmd))
cmd(client, msg)
def broadcast(client, msg):
"""
Send msg to every client.
"""
for client_target in CLIENT_LIST:
if client_target != client:
client_target.send(msg)
def _whos_online(client, msg):
client.send("\nList of people who are currently online:\n")
for client_name in CLIENT_LIST:
user_id = CLIENT_STATE[client_name]['user_id']
first_name = AUTH_DB[user_id]['first_name']
last_name = AUTH_DB[user_id]['last_name']
client.send("\n-")
client.send(" ".join([first_name, last_name]))
client.send("\n\n")
def _new_post(client, msg):
pass
def _facedup(client, msg):
pass
def _inmyface(client, msg):
pass
def _outtamyface(client, msg):
pass
def _about_me(client, msg):
user_id = CLIENT_STATE[client]['user_id']
fn = AUTH_DB[user_id]['first_name']
ln = AUTH_DB[user_id]['last_name']
client.send("\nYour user_id is: " + user_id)
client.send("\nYour Name is: %s %s" % (fn, ln))
client.send("\n")
def _read_posts(client, msg):
pass
def _quit(client, msg):
client.active = False
def _draw_main_menu(client, commands):
"""pass the client, and the list of commands, to draw the main menu page
"""
client.send("\n")
client.send_wrapped("~=" * 20)
client.send(" " * 16 +"Welcome to " + PRODUCT_NAME + "\n")
client.send_wrapped("~=" * 20)
client.send("\n")
client.send(" | ".join(commands))
client.send("\n")
def _main_menu(client, msg):
logging.debug("Client %s at main menu" % (CLIENT_STATE[client]['user_id']))
commands = {'whos_online':_whos_online,
'new_post':_new_post,
'facedup': _facedup,
'inmyface': _inmyface,
'outtamyface': _outtamyface,
'about_me': _about_me,
'read_posts': _read_posts,
'quit': _quit,
}
_draw_main_menu(client, commands.keys())
if commands.has_key(msg):
cmd = commands.get(msg)
cmd(client, msg)
else:
client.send("Please enter a command: ")
def msg_processor(client):
"""
"""
global SERVER_RUN
msg = client.get_command()
logging.debug('%s says, "%s"' % (client.addrport(), msg))
if msg == "":
return
if msg == 'debug':
logging.debug(str(state.CLIENT_STATE))
logging.debug(str(AUTH_DB))
logging.debug(str(state.CLIENT_LIST))
return
if not CLIENT_STATE[client].has_key('auth_retry'):
CLIENT_STATE[client]['auth_retry'] = 0
if auth(client, msg):
logging.info("Client %s logged in." % (CLIENT_STATE[client]['user_id']))
_main_menu(client, msg)
else:
logging.debug("Client not logged in")
if CLIENT_STATE[client]['auth_retry'] > AUTH_RETRY:
logging.debug("Kicked %s for too many login attempts." % (client.addrport()))
client.active = False
# for guest in CLIENT_LIST:
# if guest != client:
# guest.send('%s says, %s\n' % (client.addrport(), msg))
# else:
# guest.send('You say, %s\n' % msg)
# ## bye = disconnect
# if cmd == 'bye':
# client.active = False
# ## shutdown == stop the server | # Main
#------------------------------------------------------------------------------
if __name__ == '__main__':
telnet_server = TelnetServer(
port=7777,
address='',
on_connect=on_connect,
on_disconnect=on_disconnect,
timeout = .05
)
logging.info("Listening for connections on port %d. CTRL-C to break."
% telnet_server.port)
## Server Loop
while SERVER_RUN:
telnet_server.poll() ## Send, Recv, and look for new connections
state.kick_idle() ## Check for idle clients
process_clients() ## Check for client input
sleep(0.1)
logging.info(">> Server shutdown.") | # elif cmd == 'shutdown':
# SERVER_RUN = False
#------------------------------------------------------------------------------ | random_line_split |
run.py | #!/usr/bin/env python
from time import sleep
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.debug("Setup logging")
from miniboa import TelnetServer
from . import state
PRODUCT_NAME = "iMyFace"
AUTH_DB = {}
SERVER_RUN = True
AUTH_RETRY = 2
def on_connect(client):
""" handler for new connections
"""
logging.info("Opened connection to %s" % client.addrport() )
state.set_client_list(client)
state.initialize_client_state(client)
client.send("")
client.send("Welcome to the %s Server, %s.\n" % (PRODUCT_NAME, client.addrport()) )
client.send("Enter your user_id, or type \"enroll\" to create a new account: ")
def on_disconnect(client):
""" lost, or disconnected clients
"""
logging.info("Lost connection to %s" % client.addrport() )
state.prune_client_state(client)
state.prune_client_list(client)
#broadcast('%s leaves the conversation.\n' % client.addrport() )
def process_clients():
"""
Check each client, if client.cmd_ready == True then there is a line of
input available via client.get_command().
"""
for client in state.CLIENT_LIST:
if client.active and client.cmd_ready:
logging.debug("Found a message, processing...")
msg_processor(client)
def _enroll(client, msg):
current_state = CLIENT_STATE[client]['auth_status']
def get_user():
client.send("New username: ")
CLIENT_STATE[client]['auth_status'] = 'enroll_user'
def | ():
if not AUTH_DB.has_key(msg):
CLIENT_STATE[client]['user_id'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_pass'
client.send("New password: ")
client.password_mode_on()
else:
client.send("%s already taken, try something else: " % msg)
CLIENT_STATE[client]['auth_status'] = 'enroll_user'
def enroll_pass():
CLIENT_STATE[client]['pass1'] = msg
client.send("Please type your password again: ")
CLIENT_STATE[client]['auth_status'] = 'enroll_pass2'
def enroll_pass2():
if CLIENT_STATE[client]['pass1'] == msg:
client.password_mode_off()
logging.debug("New password confirmed.")
CLIENT_STATE[client]['temp_password'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_first_name'
client.send("\nWhat is your first name: ")
else:
logging.debug("Running target state %s in enroll module" % current_state)
CLIENT_STATE[client]['auth_status'] = 'enroll_pass'
def enroll_first_name():
if msg is not None or msg is not "":
CLIENT_STATE[client]['first_name'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_last_name'
client.send("What is your last name: ")
def enroll_last_name():
if msg is not None or msg is not "":
CLIENT_STATE[client]['last_name'] = msg
CLIENT_STATE[client]['auth_status'] = 'enroll_save'
fn = CLIENT_STATE[client]['first_name']
ln = CLIENT_STATE[client]['last_name']
client.send("\nAbout to create new user: %s %s" % (fn, ln))
client.send("\nType yes, and hit enter to continue: ")
def enroll_save():
if not msg.lower() == 'yes':
CLIENT_STATE[client]['auth_status'] = 'startup'
client.active = False
return
user_id = CLIENT_STATE[client]['user_id']
password = CLIENT_STATE[client]['temp_password']
first_name = CLIENT_STATE[client]['first_name']
last_name = CLIENT_STATE[client]['last_name']
AUTH_DB[user_id] = {'password': password, 'first_name': first_name, 'last_name': last_name}
# cleanup password session vars
# del(CLIENT_STATE[client]['temp_password'])
# del(CLIENT_STATE[client]['pass1'])
logging.debug("Saved user_id and password in auth_db")
client.send("Your account has been created. \n")
client.send("Please enter your new username: ")
CLIENT_STATE[client]['auth_status'] = 'startup'
process_clients()
cmds = {'enroll_start': get_user,
'enroll_user': enroll_user,
'enroll_pass': enroll_pass,
'enroll_pass2': enroll_pass2,
'enroll_first_name': enroll_first_name,
'enroll_last_name': enroll_last_name,
'enroll_save': enroll_save,
}
if cmds.has_key(current_state):
logging.debug("Running target state %s in enroll module" % current_state)
cmds[current_state]()
else:
logging.warn("Can not find target state in enroll module")
def _set_user_id(client, msg):
logging.debug("_set_user_id got message: " + msg)
if msg == 'enroll':
CLIENT_STATE[client]['auth_status'] = 'enroll_start'
_enroll(client, msg)
else:
CLIENT_STATE[client]['user_id'] = msg
logging.debug("Client set user_id to: " + msg)
# next step:
CLIENT_STATE[client]['auth_status'] = 'set_pass'
client.password_mode_on()
client.send("Enter your password: ")
process_clients()
def _set_password(client, msg):
CLIENT_STATE[client]['password'] = msg
client.password_mode_off()
logging.debug("Client set password to: " + msg)
CLIENT_STATE[client]['auth_status'] = 'auth_lookup'
msg_processor(client)
def _auth_lookup(client, msg):
def login_failed():
client.send("\nUser_id or password incorrect, enter your user_id again: ")
CLIENT_STATE[client]['auth_retry'] += 1
CLIENT_STATE[client]['auth_status'] = 'startup'
user_id = CLIENT_STATE[client]['user_id']
client_password = CLIENT_STATE[client]['password']
if AUTH_DB.has_key(user_id):
if AUTH_DB[user_id]['password'] == client_password:
logging.debug("auth_db lookup success for: " + user_id)
CLIENT_STATE[client]['auth_status'] = 'auth_success'
broadcast(client, '%s just connected.\n' % user_id )
msg_processor(client)
else:
login_failed()
else:
login_failed()
def auth(client, msg):
# msg = str(client.get_command())
logging.debug('Auth for %s:%s' % (client.addrport(), msg))
auth_status = CLIENT_STATE[client].get('auth_status', 'startup')
logging.debug("The auth status for %s is %s" % (str(client), auth_status))
command_dict = {'startup': _set_user_id,
'set_pass': _set_password,
'auth_lookup': _auth_lookup,
'enroll_start': _enroll,
'enroll_user': _enroll,
'enroll_pass': _enroll,
'enroll_pass2': _enroll,
'enroll_first_name': _enroll,
'enroll_last_name': _enroll,
'enroll_save': _enroll,
}
if auth_status == 'auth_success':
return True
else:
cmd = command_dict[auth_status]
logging.debug("Running auth function: " + str(cmd))
cmd(client, msg)
def broadcast(client, msg):
"""
Send msg to every client.
"""
for client_target in CLIENT_LIST:
if client_target != client:
client_target.send(msg)
def _whos_online(client, msg):
client.send("\nList of people who are currently online:\n")
for client_name in CLIENT_LIST:
user_id = CLIENT_STATE[client_name]['user_id']
first_name = AUTH_DB[user_id]['first_name']
last_name = AUTH_DB[user_id]['last_name']
client.send("\n-")
client.send(" ".join([first_name, last_name]))
client.send("\n\n")
def _new_post(client, msg):
pass
def _facedup(client, msg):
pass
def _inmyface(client, msg):
pass
def _outtamyface(client, msg):
pass
def _about_me(client, msg):
user_id = CLIENT_STATE[client]['user_id']
fn = AUTH_DB[user_id]['first_name']
ln = AUTH_DB[user_id]['last_name']
client.send("\nYour user_id is: " + user_id)
client.send("\nYour Name is: %s %s" % (fn, ln))
client.send("\n")
def _read_posts(client, msg):
pass
def _quit(client, msg):
client.active = False
def _draw_main_menu(client, commands):
"""pass the client, and the list of commands, to draw the main menu page
"""
client.send("\n")
client.send_wrapped("~=" * 20)
client.send(" " * 16 +"Welcome to " + PRODUCT_NAME + "\n")
client.send_wrapped("~=" * 20)
client.send("\n")
client.send(" | ".join(commands))
client.send("\n")
def _main_menu(client, msg):
logging.debug("Client %s at main menu" % (CLIENT_STATE[client]['user_id']))
commands = {'whos_online':_whos_online,
'new_post':_new_post,
'facedup': _facedup,
'inmyface': _inmyface,
'outtamyface': _outtamyface,
'about_me': _about_me,
'read_posts': _read_posts,
'quit': _quit,
}
_draw_main_menu(client, commands.keys())
if commands.has_key(msg):
cmd = commands.get(msg)
cmd(client, msg)
else:
client.send("Please enter a command: ")
def msg_processor(client):
"""
"""
global SERVER_RUN
msg = client.get_command()
logging.debug('%s says, "%s"' % (client.addrport(), msg))
if msg == "":
return
if msg == 'debug':
logging.debug(str(state.CLIENT_STATE))
logging.debug(str(AUTH_DB))
logging.debug(str(state.CLIENT_LIST))
return
if not CLIENT_STATE[client].has_key('auth_retry'):
CLIENT_STATE[client]['auth_retry'] = 0
if auth(client, msg):
logging.info("Client %s logged in." % (CLIENT_STATE[client]['user_id']))
_main_menu(client, msg)
else:
logging.debug("Client not logged in")
if CLIENT_STATE[client]['auth_retry'] > AUTH_RETRY:
logging.debug("Kicked %s for too many login attempts." % (client.addrport()))
client.active = False
# for guest in CLIENT_LIST:
# if guest != client:
# guest.send('%s says, %s\n' % (client.addrport(), msg))
# else:
# guest.send('You say, %s\n' % msg)
# ## bye = disconnect
# if cmd == 'bye':
# client.active = False
# ## shutdown == stop the server
# elif cmd == 'shutdown':
# SERVER_RUN = False
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
if __name__ == '__main__':
telnet_server = TelnetServer(
port=7777,
address='',
on_connect=on_connect,
on_disconnect=on_disconnect,
timeout = .05
)
logging.info("Listening for connections on port %d. CTRL-C to break."
% telnet_server.port)
## Server Loop
while SERVER_RUN:
telnet_server.poll() ## Send, Recv, and look for new connections
state.kick_idle() ## Check for idle clients
process_clients() ## Check for client input
sleep(0.1)
logging.info(">> Server shutdown.")
| enroll_user | identifier_name |
editorBrowser.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { IEventEmitter } from 'vs/base/common/eventEmitter';
import { IDisposable } from 'vs/base/common/lifecycle';
import { IKeyboardEvent } from 'vs/base/browser/keyboardEvent';
import { IMouseEvent } from 'vs/base/browser/mouseEvent';
import { IConstructorSignature1 } from 'vs/platform/instantiation/common/instantiation';
import * as editorCommon from 'vs/editor/common/editorCommon';
import { Position } from 'vs/editor/common/core/position';
import { Range } from 'vs/editor/common/core/range';
import { FastDomNode } from 'vs/base/browser/fastDomNode';
/**
* @internal
*/
export interface IContentWidgetData {
widget: IContentWidget;
position: IContentWidgetPosition;
}
/**
* @internal
*/
export interface IOverlayWidgetData {
widget: IOverlayWidget;
position: IOverlayWidgetPosition;
}
/**
* @internal
*/
export interface ICodeEditorHelper {
getScrollWidth(): number;
getScrollLeft(): number;
getScrollHeight(): number;
getScrollTop(): number;
setScrollPosition(position: editorCommon.INewScrollPosition): void;
getVerticalOffsetForPosition(lineNumber: number, column: number): number;
delegateVerticalScrollbarMouseDown(browserEvent: MouseEvent): void;
getOffsetForColumn(lineNumber: number, column: number): number;
getTargetAtClientPoint(clientX: number, clientY: number): IMouseTarget;
getCompletelyVisibleViewRange(): Range;
}
/**
* @internal
*/
export interface IView extends IDisposable {
domNode: FastDomNode<HTMLElement>;
getInternalEventBus(): IEventEmitter;
createOverviewRuler(cssClassName: string, minimumHeight: number, maximumHeight: number): IOverviewRuler;
getCodeEditorHelper(): ICodeEditorHelper;
change(callback: (changeAccessor: IViewZoneChangeAccessor) => any): boolean;
getWhitespaces(): editorCommon.IEditorWhitespace[];
render(now: boolean, everything: boolean): void;
setAriaActiveDescendant(id: string): void;
focus(): void;
isFocused(): boolean;
saveState(): editorCommon.IViewState;
restoreState(state: editorCommon.IViewState): void;
addContentWidget(widgetData: IContentWidgetData): void;
layoutContentWidget(widgetData: IContentWidgetData): void;
removeContentWidget(widgetData: IContentWidgetData): void;
addOverlayWidget(widgetData: IOverlayWidgetData): void;
layoutOverlayWidget(widgetData: IOverlayWidgetData): void;
removeOverlayWidget(widgetData: IOverlayWidgetData): void;
}
/**
* @internal
*/
export interface IViewZoneData {
viewZoneId: number;
positionBefore: Position;
positionAfter: Position;
position: Position;
afterLineNumber: number;
}
/**
* @internal
*/
export interface IMouseDispatchData {
position: Position;
/**
* Desired mouse column (e.g. when position.column gets clamped to text length -- clicking after text on a line).
*/
mouseColumn: number;
startedOnLineNumbers: boolean;
inSelectionMode: boolean;
mouseDownCount: number;
altKey: boolean;
ctrlKey: boolean;
metaKey: boolean;
shiftKey: boolean;
}
/**
* @internal
*/
export interface IViewController {
dispatchMouse(data: IMouseDispatchData);
moveTo(source: string, position: Position): void;
paste(source: string, text: string, pasteOnNewLine: boolean): void;
type(source: string, text: string): void;
replacePreviousChar(source: string, text: string, replaceCharCnt: number): void;
compositionStart(source: string): void;
compositionEnd(source: string): void;
cut(source: string): void;
emitKeyDown(e: IKeyboardEvent): void;
emitKeyUp(e: IKeyboardEvent): void;
emitContextMenu(e: IEditorMouseEvent): void;
emitMouseMove(e: IEditorMouseEvent): void;
emitMouseLeave(e: IEditorMouseEvent): void;
emitMouseUp(e: IEditorMouseEvent): void;
emitMouseDown(e: IEditorMouseEvent): void;
emitMouseDrag(e: IEditorMouseEvent): void;
emitMouseDrop(e: IEditorMouseEvent): void;
}
/**
* @internal
*/
export const ClassNames = {
TEXTAREA_COVER: 'textAreaCover',
TEXTAREA: 'inputarea',
LINES_CONTENT: 'lines-content',
OVERFLOW_GUARD: 'overflow-guard',
VIEW_LINES: 'view-lines',
VIEW_LINE: 'view-line',
SCROLLABLE_ELEMENT: 'editor-scrollable',
CONTENT_WIDGETS: 'contentWidgets',
OVERFLOWING_CONTENT_WIDGETS: 'overflowingContentWidgets',
OVERLAY_WIDGETS: 'overlayWidgets',
MARGIN_VIEW_OVERLAYS: 'margin-view-overlays',
MARGIN: 'margin',
LINE_NUMBERS: 'line-numbers',
GLYPH_MARGIN: 'glyph-margin',
SCROLL_DECORATION: 'scroll-decoration',
VIEW_CURSORS_LAYER: 'cursors-layer',
VIEW_ZONES: 'view-zones'
};
/**
* @internal
*/
export interface IViewportInfo {
visibleRange: Range;
width: number;
height: number;
deltaTop: number;
deltaLeft: number;
}
// --- end View Event Handlers & Parts
/**
* A view zone is a full horizontal rectangle that 'pushes' text down.
* The editor reserves space for view zones when rendering.
*/
export interface IViewZone {
/**
* The line number after which this zone should appear.
* Use 0 to place a view zone before the first line number.
*/
afterLineNumber: number;
/**
* The column after which this zone should appear.
* If not set, the maxLineColumn of `afterLineNumber` will be used.
*/
afterColumn?: number;
/**
* Suppress mouse down events.
* If set, the editor will attach a mouse down listener to the view zone and .preventDefault on it.
* Defaults to false
*/
suppressMouseDown?: boolean;
/**
* The height in lines of the view zone.
* If specified, `heightInPx` will be used instead of this.
* If neither `heightInPx` nor `heightInLines` is specified, a default of `heightInLines` = 1 will be chosen.
*/
heightInLines?: number;
/**
* The height in px of the view zone.
* If this is set, the editor will give preference to it rather than `heightInLines` above.
* If neither `heightInPx` nor `heightInLines` is specified, a default of `heightInLines` = 1 will be chosen.
*/
heightInPx?: number;
/**
* The dom node of the view zone
*/
domNode: HTMLElement;
/**
* An optional dom node for the view zone that will be placed in the margin area.
*/
marginDomNode?: HTMLElement;
/**
* Callback which gives the relative top of the view zone as it appears (taking scrolling into account).
*/
onDomNodeTop?: (top: number) => void;
/**
* Callback which gives the height in pixels of the view zone.
*/
onComputedHeight?: (height: number) => void;
}
/**
* An accessor that allows for zones to be added or removed.
*/
export interface IViewZoneChangeAccessor {
/**
* Create a new view zone.
* @param zone Zone to create
* @return A unique identifier to the view zone.
*/
addZone(zone: IViewZone): number;
/**
* Remove a zone
* @param id A unique identifier to the view zone, as returned by the `addZone` call.
*/
removeZone(id: number): void;
/**
* Change a zone's position.
* The editor will rescan the `afterLineNumber` and `afterColumn` properties of a view zone.
*/
layoutZone(id: number): void;
}
/**
* A positioning preference for rendering content widgets.
*/
export enum ContentWidgetPositionPreference {
/**
* Place the content widget exactly at a position
*/
EXACT,
/**
* Place the content widget above a position
*/
ABOVE,
/**
* Place the content widget below a position
*/
BELOW
}
/**
* A position for rendering content widgets.
*/
export interface IContentWidgetPosition {
/** | */
position: editorCommon.IPosition;
/**
* Placement preference for position, in order of preference.
*/
preference: ContentWidgetPositionPreference[];
}
/**
* A content widget renders inline with the text and can be easily placed 'near' an editor position.
*/
export interface IContentWidget {
/**
* Render this content widget in a location where it could overflow the editor's view dom node.
*/
allowEditorOverflow?: boolean;
suppressMouseDown?: boolean;
/**
* Get a unique identifier of the content widget.
*/
getId(): string;
/**
* Get the dom node of the content widget.
*/
getDomNode(): HTMLElement;
/**
* Get the placement of the content widget.
* If null is returned, the content widget will be placed off screen.
*/
getPosition(): IContentWidgetPosition;
}
/**
* A positioning preference for rendering overlay widgets.
*/
export enum OverlayWidgetPositionPreference {
/**
* Position the overlay widget in the top right corner
*/
TOP_RIGHT_CORNER,
/**
* Position the overlay widget in the bottom right corner
*/
BOTTOM_RIGHT_CORNER,
/**
* Position the overlay widget in the top center
*/
TOP_CENTER
}
/**
* A position for rendering overlay widgets.
*/
export interface IOverlayWidgetPosition {
/**
* The position preference for the overlay widget.
*/
preference: OverlayWidgetPositionPreference;
}
/**
* An overlay widgets renders on top of the text.
*/
export interface IOverlayWidget {
/**
* Get a unique identifier of the overlay widget.
*/
getId(): string;
/**
* Get the dom node of the overlay widget.
*/
getDomNode(): HTMLElement;
/**
* Get the placement of the overlay widget.
* If null is returned, the overlay widget is responsible to place itself.
*/
getPosition(): IOverlayWidgetPosition;
}
/**
* Target hit with the mouse in the editor.
*/
export interface IMouseTarget {
/**
* The target element
*/
readonly element: Element;
/**
* The target type
*/
readonly type: editorCommon.MouseTargetType;
/**
* The 'approximate' editor position
*/
readonly position: Position;
/**
* Desired mouse column (e.g. when position.column gets clamped to text length -- clicking after text on a line).
*/
readonly mouseColumn: number;
/**
* The 'approximate' editor range
*/
readonly range: Range;
/**
* Some extra detail.
*/
readonly detail: any;
}
/**
* A mouse event originating from the editor.
*/
export interface IEditorMouseEvent {
readonly event: IMouseEvent;
readonly target: IMouseTarget;
}
/**
* @internal
*/
export type IEditorContributionCtor = IConstructorSignature1<ICodeEditor, editorCommon.IEditorContribution>;
/**
* An overview ruler
* @internal
*/
export interface IOverviewRuler {
getDomNode(): HTMLElement;
dispose(): void;
setZones(zones: editorCommon.OverviewRulerZone[]): void;
setLayout(position: editorCommon.OverviewRulerPosition): void;
}
/**
* A rich code editor.
*/
export interface ICodeEditor extends editorCommon.ICommonCodeEditor {
/**
* An event emitted on a "mouseup".
* @event
*/
onMouseUp(listener: (e: IEditorMouseEvent) => void): IDisposable;
/**
* An event emitted on a "mousedown".
* @event
*/
onMouseDown(listener: (e: IEditorMouseEvent) => void): IDisposable;
/**
* An event emitted on a "mousedrag".
* @internal
* @event
*/
onMouseDrag(listener: (e: IEditorMouseEvent) => void): IDisposable;
/**
* An event emitted on a "mousedrop".
* @internal
* @event
*/
onMouseDrop(listener: (e: IEditorMouseEvent) => void): IDisposable;
/**
* An event emitted on a "contextmenu".
* @event
*/
onContextMenu(listener: (e: IEditorMouseEvent) => void): IDisposable;
/**
* An event emitted on a "mousemove".
* @event
*/
onMouseMove(listener: (e: IEditorMouseEvent) => void): IDisposable;
/**
* An event emitted on a "mouseleave".
* @event
*/
onMouseLeave(listener: (e: IEditorMouseEvent) => void): IDisposable;
/**
* An event emitted on a "keyup".
* @event
*/
onKeyUp(listener: (e: IKeyboardEvent) => void): IDisposable;
/**
* An event emitted on a "keydown".
* @event
*/
onKeyDown(listener: (e: IKeyboardEvent) => void): IDisposable;
/**
* An event emitted when the layout of the editor has changed.
* @event
*/
onDidLayoutChange(listener: (e: editorCommon.EditorLayoutInfo) => void): IDisposable;
/**
* An event emitted when the scroll in the editor has changed.
* @event
*/
onDidScrollChange(listener: (e: editorCommon.IScrollEvent) => void): IDisposable;
/**
* Returns the editor's dom node
*/
getDomNode(): HTMLElement;
/**
* Add a content widget. Widgets must have unique ids, otherwise they will be overwritten.
*/
addContentWidget(widget: IContentWidget): void;
/**
* Layout/Reposition a content widget. This is a ping to the editor to call widget.getPosition()
* and update appropiately.
*/
layoutContentWidget(widget: IContentWidget): void;
/**
* Remove a content widget.
*/
removeContentWidget(widget: IContentWidget): void;
/**
* Add an overlay widget. Widgets must have unique ids, otherwise they will be overwritten.
*/
addOverlayWidget(widget: IOverlayWidget): void;
/**
* Layout/Reposition an overlay widget. This is a ping to the editor to call widget.getPosition()
* and update appropiately.
*/
layoutOverlayWidget(widget: IOverlayWidget): void;
/**
* Remove an overlay widget.
*/
removeOverlayWidget(widget: IOverlayWidget): void;
/**
* Change the view zones. View zones are lost when a new model is attached to the editor.
*/
changeViewZones(callback: (accessor: IViewZoneChangeAccessor) => void): void;
/**
* Returns the range that is currently centered in the view port.
*/
getCenteredRangeInViewport(): Range;
/**
* Get the view zones.
* @internal
*/
getWhitespaces(): editorCommon.IEditorWhitespace[];
/**
* Get the horizontal position (left offset) for the column w.r.t to the beginning of the line.
* This method works only if the line `lineNumber` is currently rendered (in the editor's viewport).
* Use this method with caution.
*/
getOffsetForColumn(lineNumber: number, column: number): number;
/**
* Force an editor render now.
*/
render(): void;
/**
* Get the vertical position (top offset) for the line w.r.t. to the first line.
*/
getTopForLineNumber(lineNumber: number): number;
/**
* Get the vertical position (top offset) for the position w.r.t. to the first line.
*/
getTopForPosition(lineNumber: number, column: number): number;
/**
* Get the hit test target at coordinates `clientX` and `clientY`.
* The coordinates are relative to the top-left of the viewport.
*
* @returns Hit test target or null if the coordinates fall outside the editor or the editor has no model.
*/
getTargetAtClientPoint(clientX: number, clientY: number): IMouseTarget;
/**
* Get the visible position for `position`.
* The result position takes scrolling into account and is relative to the top left corner of the editor.
* Explanation 1: the results of this method will change for the same `position` if the user scrolls the editor.
* Explanation 2: the results of this method will not change if the container of the editor gets repositioned.
* Warning: the results of this method are innacurate for positions that are outside the current editor viewport.
*/
getScrolledVisiblePosition(position: editorCommon.IPosition): { top: number; left: number; height: number; };
/**
* Set the model ranges that will be hidden in the view.
* @internal
*/
setHiddenAreas(ranges: editorCommon.IRange[]): void;
/**
* @internal
*/
setAriaActiveDescendant(id: string): void;
/**
* Apply the same font settings as the editor to `target`.
*/
applyFontInfo(target: HTMLElement): void;
}
/**
* A rich diff editor.
*/
export interface IDiffEditor extends editorCommon.ICommonDiffEditor {
/**
* @see ICodeEditor.getDomNode
*/
getDomNode(): HTMLElement;
} | * Desired position for the content widget.
* `preference` will also affect the placement. | random_line_split |
dag_abm_catalogo_ubm.js | app.controller('dag_abm_catalogo_ubmController', function ($scope, $route,$rootScope, DreamFactory, CONFIG,sessionService,ngTableParams,$filter,sweet,$timeout, $routeParams, $location, $http, Data, LogGuardarInfo, $sce, registroLog, FileUploader, $window) {
// muestra la imagen guardada en el servidor
$scope.imprimirArchivo = function (fum) {
console.log("FUM",fum);
if(fum == null ||fum == ""){
sweet.show('', "No existe Imagen", 'warning');
}else{
$scope.varSpin = true;
$scope.RegistroFUM={
registrado:'OK',
mensaje:''
};
var cadena= fum;
if (cadena.indexOf('?') != -1){
separador = '?';
arreglodecadena = cadena.split(separador);
cadena = arreglodecadena[0];
console.log('arreglo de la cadena',arreglodecadena[0]);
}
var tipoarch=cadena.substr(-4);
console.log('substring: ',cadena.substr(-4));
var imagen = cadena.indexOf('.jpeg');
console.log(imagen);
if(tipoarch == '.pdf'){
$scope.archotro = false;
$scope.archpdf = true;
$('#visorFum object').attr('data',fum);
$timeout(function(){$scope.varSpin=false}, 1000);
}
else {
var tipoimg = tipoarch.toUpperCase();
console.log(tipoimg);
if(tipoimg == '.JPG' || tipoimg == '.PNG' || tipoimg == '.BMP' || tipoimg == '.GIF') {
$scope.archotro = true;
$scope.archpdf = false;
$scope.archivoP=fum;
$('#imgSalida').attr("src",fum);
}
else{ document.location = fum;}
}
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// ARBOL DE CATALOGACION //////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
// llamada para convertir el arbol en un archivo
$scope.actualizarArbol=function(){
$.blockUI();
var resRoles = {
"procedure_name":"dag.sp_dag_catalogo"
};
//servicio listar roles
var obj=DreamFactory.api[CONFIG.SERVICE].callStoredProcWithParams(resRoles);
obj.success(function (response) {
$scope.obtArbol = response;
var obtArbol=JSON.stringify($scope.obtArbol);
//console.log(obtArbol);
var parametros = {
"NODOS" : obtArbol,
"TARBOL" : "dataCatalogo.json"
};
$.ajax({
data: parametros,
//url: 'http://192.168.17.144:88/dreamfactory/dist/generaArbolAjaxDAG.php',
url: 'http://192.168.5.248/dreamfactory/dist/dag/generaArbolAjaxDAG.php',
//url: [CONFIG.DSP]+'192.168.17.144:88/dreamfactory/dist/generaArbolAjax.php',
type: 'POST',
error: function (response) {
$.unblockUI();
//sweet.show('Exito', 'Se realizó la actualización correctamente', 'success');
}
});
})
obj.error(function(error) {
$.unblockUI();
sweet.show('', 'Actualización no realizada, intentelo nuevamente', 'warning');
});
}
// llamada archivo del arbol para cargar en pantalla
$scope.jsonArbol = "";
$scope.arbolNodos = function () {
$.ajax({
data:{ } ,
//url: 'http://192.168.17.144:88/dreamfactory/dist/storeArbolAjaxDAG.php',
url: 'http://192.168.5.248/dreamfactory/dist/dag/storeArbolAjaxDAG.php',
type: 'post',
dataType: "json",
success: function (response) {
$timeout(function () {
var tempJsonArbol = JSON.stringify(response);
$scope.jsonArbol = JSON.parse(tempJsonArbol);
$('#tree1').tree({
data: $scope.jsonArbol,
closedIcon: $('<i class="fa fa-plus-circle"/>'),
openedIcon: $('<i class="fa fa-minus-circle"/>'),
autoOpen: 0
});
}, 1000);
}
});
}
// ojo no borrar la funcion q no hace nada
$scope.alestra = function () {
//$scope.nodoAr
$scope.contadorentrada=0;
$('#tree1').bind(
'tree.select',
function(event) {
if ($scope.contadorentrada==0){
if (event.node) {
// node was selected
var node = event.node;
//alert(node.name);
$scope.contadorentrada++;
$scope.MostrarInformacion(node.id);
//console.log(node);
//$scope.datos.UBI_IT = node.name;
$scope.$apply();
}
else {
// event.n | };
// adicionar nodo en el arbol
$scope.addNode = function() {
if ($scope.agregado == "false") {
var nameNode = $scope.datos.DAG_CAT_PRESUP_ITEM;
var idNode = parseInt($scope.nodoAr.children.length) + 1;
var padreNode = parseInt($scope.nodoAr.id);
//console.log(nameNode, " ", idNode);
$('#tree1').tree(
'appendNode',
{
name: nameNode,
id: idNode,
padre : padreNode
},
$scope.nodoAr
);
$scope.agregado = "true";
alert("Item agregado como hijo de: " + padreNode);
}
else{
alert("Solo se puede agregar una vez");
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// ARBOL //////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
$scope.guardarItem = function (){
var size = Object.keys($scope.item).length;
if(size > $scope.NItems ){
//console.log("Ok",size, ' ', $scope.NItems);
var node = $('#tree1').tree('getSelectedNode');
var nodo_carac = "";
for (j=$scope.NItems;j<size;j++){
//console.log($scope.item[j].caracteristicas1);
var datosItem = {
"procedure_name":"dag.sp_dag_insitem",
"body":{
"params": [
{
"name": "pdag_cat_id",
"value": node.id
},
{
"name": "pdag_cat_nombre",
"value": node.name
},
{
"name": "pdag_cat_caracteristicas",
"value": $scope.item[j].caracteristicas1
}
]
}
};
DreamFactory.api[CONFIG.SERVICE].callStoredProcWithParams(datosItem).success(function (response){
$scope.impresion = response;
})
.error(function(data){
sweet.show('', 'Error al guardar la informacion: ', 'error');
});
}
}
else
{
alert("No existen items nuevos para guardar");
}
}
// Cargo las caracteristicas de la clase seleccionada
$scope.MostrarInformacion = function (id_item) {
var datosImpresion = {
"procedure_name":"dag.sp_dag_cat_desc_item",
"body":{
"params": [
{
"name": "id_item",
"value": id_item
}
]
}
};
DreamFactory.api[CONFIG.SERVICE].callStoredProcWithParams(datosImpresion).success(function (response){
var DetItem = response;
if (Object.keys(DetItem).length == 1){
$scope.DAG_CAT_DESC_ITEM = DetItem[0].descripcion;
$scope.partida = DetItem[0].partida1;
$scope.DAG_CAT_PART = DetItem[0].partida;
$scope.DAG_CAT_ITEM_IMG = DetItem[0].imagen;
$scope.caracteristicas1 = JSON.parse(DetItem[0].caracteristicas);
var size = Object.keys($scope.caracteristicas1).length;
$scope.caracteristicas = [];
// $scope.caracteristicas.pop();
for(var j=0; j<size; j++){
$scope.caracteristicas.push({
name: $scope.caracteristicas1[j],
value: "-----" //$scope.caracteristicas1[j]
});
}
$scope.MostrarInformacion1(id_item);
}
})
.error(function(data){
sweet.show('', 'Error al cargar la informacion del item: ', 'error');
});
};
// Lleno la grilla segun la clase escogida
$scope.partida = "";
$scope.NItems = 0;
$scope.MostrarInformacion1 = function (id_item) {
var datosConsulta = {
"procedure_name":"dag.sp_dag_cat_items",
"body":{
"params": [
{
"name": "id_item",
"value": id_item
}
]
}
};
DreamFactory.api[CONFIG.SERVICE].callStoredProcWithParams(datosConsulta).success(function (response){
var DetItem = response;
$scope.NItems = Object.keys(DetItem).length;
$scope.item = [];
if ($scope.NItems > 0){
for(var j=0; j<$scope.NItems; j++){
var nodo_carac = "";
$scope.caracteristicas1 = JSON.parse(DetItem[j].caracteristicas);
var size1 = Object.keys($scope.caracteristicas1).length;
//console.log($scope.caracteristicas1);
var atributos = "";
xi=0;
for(var aux in $scope.caracteristicas1){
atributos += aux + " ";
xi++;
if ((eval('$scope.caracteristicas1.'+ aux)!= "") && (aux != "PARTIDA")){
nodo_carac = nodo_carac + ' ' + aux + ': ' + eval('$scope.caracteristicas1.'+ aux) ;
if (xi<size1-1){
nodo_carac = nodo_carac + ',';
}
//console.log(aux, ' ' , eval('$scope.caracteristicas1.'+ aux));
}
if (aux == "PARTIDA"){
$scope.partida = eval('$scope.caracteristicas1.'+ aux);
//console.log(aux, ' ' , eval('$scope.caracteristicas1.'+ aux));
}
}
$scope.item.push({
id: DetItem[j].id,
partida: $scope.partida,
nombre: DetItem[j].nombre,
caracteristicas: nodo_carac,
caracteristicas1: $scope.caracteristicas1
});
}
//console.log($scope.item);
}
})
.error(function(data){
sweet.show('', 'Error al cargar la informacion del item: ', 'error');
});
};
// Adiciona items
$scope.pushItem = function () {
var size = Object.keys($scope.caracteristicas).length;
var nodo_carac = Array();
var nodo_carac1 = Array();
var node = $('#tree1').tree('getSelectedNode');
if (node.children.length == 0){
nodo_carac = "";
nodo_carac1 = '{"PARTIDA":' + $scope.partida ;
for(var j=0; j<size; j++){
//console.log($scope.caracteristicas[j].name,':',$scope.caracteristicas[j].value);
nodo_carac = nodo_carac + ' ' + $scope.caracteristicas[j].name + ': ' + $scope.caracteristicas[j].value;
if (j<size-1) {
nodo_carac = nodo_carac + ', ';
}
nodo_carac1 = nodo_carac1 + ',' + JSON.stringify($scope.caracteristicas[j].name) + ':' + JSON.stringify($scope.caracteristicas[j].value);
}
nodo_carac1 = nodo_carac1 + '}';
//console.log(nodo_carac1);
//console.log(node.children.length);
$scope.item.push({
id: $scope.item.length + 1,
partida: $scope.partida,
nombre: node.name,
caracteristicas: nodo_carac,
caracteristicas1: nodo_carac1
});
console.log(nodo_carac1);
}
else
{
alert("No se puede agregar items en nodos que tienen hijos");
}
//console.log($scope.item);
}
// Eliminar el ultimo item
$scope.dropItem = function () {
var size = Object.keys($scope.item).length;
//console.log($scope.NItems);
if(size > $scope.NItems ){
$scope.item.pop();
}
else
{
alert("No se puede eliminar mas");
console.log("No se puede eliminar mas");
}
}
// Inicio del formulario
$scope.$on('api:ready',function(){
//alert("DAGG");
$scope.arbolNodos();
//$scope.datos.agregado = "false";
});
$scope.inicioDag = function () {
if(DreamFactory.api[CONFIG.SERVICE]){
//alert("DAGG");
$scope.arbolNodos();
//$scope.datos.agregado = "false";
}
};
}); | ode is null
// a node was deselected
// e.previous_node contains the deselected node
}
}
}
);
| conditional_block |
dag_abm_catalogo_ubm.js | app.controller('dag_abm_catalogo_ubmController', function ($scope, $route,$rootScope, DreamFactory, CONFIG,sessionService,ngTableParams,$filter,sweet,$timeout, $routeParams, $location, $http, Data, LogGuardarInfo, $sce, registroLog, FileUploader, $window) {
// muestra la imagen guardada en el servidor
$scope.imprimirArchivo = function (fum) {
console.log("FUM",fum);
if(fum == null ||fum == ""){
sweet.show('', "No existe Imagen", 'warning');
}else{
$scope.varSpin = true;
$scope.RegistroFUM={
registrado:'OK',
mensaje:''
};
var cadena= fum;
if (cadena.indexOf('?') != -1){
separador = '?';
arreglodecadena = cadena.split(separador);
cadena = arreglodecadena[0];
console.log('arreglo de la cadena',arreglodecadena[0]);
}
var tipoarch=cadena.substr(-4);
console.log('substring: ',cadena.substr(-4));
var imagen = cadena.indexOf('.jpeg');
console.log(imagen);
if(tipoarch == '.pdf'){
$scope.archotro = false;
$scope.archpdf = true;
$('#visorFum object').attr('data',fum);
$timeout(function(){$scope.varSpin=false}, 1000);
}
else {
var tipoimg = tipoarch.toUpperCase();
console.log(tipoimg);
if(tipoimg == '.JPG' || tipoimg == '.PNG' || tipoimg == '.BMP' || tipoimg == '.GIF') {
$scope.archotro = true;
$scope.archpdf = false;
$scope.archivoP=fum;
$('#imgSalida').attr("src",fum);
}
else{ document.location = fum;}
}
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// ARBOL DE CATALOGACION //////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
// llamada para convertir el arbol en un archivo
$scope.actualizarArbol=function(){
$.blockUI();
var resRoles = {
"procedure_name":"dag.sp_dag_catalogo"
};
//servicio listar roles
var obj=DreamFactory.api[CONFIG.SERVICE].callStoredProcWithParams(resRoles);
obj.success(function (response) {
$scope.obtArbol = response;
var obtArbol=JSON.stringify($scope.obtArbol);
//console.log(obtArbol);
var parametros = {
"NODOS" : obtArbol,
"TARBOL" : "dataCatalogo.json"
};
$.ajax({
data: parametros,
//url: 'http://192.168.17.144:88/dreamfactory/dist/generaArbolAjaxDAG.php',
url: 'http://192.168.5.248/dreamfactory/dist/dag/generaArbolAjaxDAG.php',
//url: [CONFIG.DSP]+'192.168.17.144:88/dreamfactory/dist/generaArbolAjax.php',
type: 'POST',
error: function (response) {
$.unblockUI();
//sweet.show('Exito', 'Se realizó la actualización correctamente', 'success');
}
});
})
obj.error(function(error) {
$.unblockUI();
sweet.show('', 'Actualización no realizada, intentelo nuevamente', 'warning');
});
}
// llamada archivo del arbol para cargar en pantalla
$scope.jsonArbol = "";
$scope.arbolNodos = function () {
$.ajax({
data:{ } ,
//url: 'http://192.168.17.144:88/dreamfactory/dist/storeArbolAjaxDAG.php',
url: 'http://192.168.5.248/dreamfactory/dist/dag/storeArbolAjaxDAG.php',
type: 'post',
dataType: "json",
success: function (response) {
$timeout(function () {
var tempJsonArbol = JSON.stringify(response);
$scope.jsonArbol = JSON.parse(tempJsonArbol);
$('#tree1').tree({
data: $scope.jsonArbol,
closedIcon: $('<i class="fa fa-plus-circle"/>'),
openedIcon: $('<i class="fa fa-minus-circle"/>'),
autoOpen: 0
});
}, 1000);
}
});
}
// ojo no borrar la funcion q no hace nada
$scope.alestra = function () {
//$scope.nodoAr
$scope.contadorentrada=0;
$('#tree1').bind(
'tree.select',
function(event) {
if ($scope.contadorentrada==0){
if (event.node) {
// node was selected
var node = event.node;
//alert(node.name);
$scope.contadorentrada++;
$scope.MostrarInformacion(node.id);
//console.log(node);
//$scope.datos.UBI_IT = node.name;
$scope.$apply();
}
else {
// event.node is null
// a node was deselected
// e.previous_node contains the deselected node
}
}
}
);
};
// adicionar nodo en el arbol
$scope.addNode = function() {
if ($scope.agregado == "false") {
var nameNode = $scope.datos.DAG_CAT_PRESUP_ITEM;
var idNode = parseInt($scope.nodoAr.children.length) + 1;
var padreNode = parseInt($scope.nodoAr.id);
//console.log(nameNode, " ", idNode);
$('#tree1').tree(
'appendNode',
{
name: nameNode,
id: idNode,
padre : padreNode
},
$scope.nodoAr
);
$scope.agregado = "true";
alert("Item agregado como hijo de: " + padreNode);
}
else{
alert("Solo se puede agregar una vez");
};
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// ARBOL //////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
$scope.guardarItem = function (){
var size = Object.keys($scope.item).length;
if(size > $scope.NItems ){
//console.log("Ok",size, ' ', $scope.NItems);
var node = $('#tree1').tree('getSelectedNode');
var nodo_carac = "";
for (j=$scope.NItems;j<size;j++){
//console.log($scope.item[j].caracteristicas1);
var datosItem = {
"procedure_name":"dag.sp_dag_insitem",
"body":{
"params": [
{
"name": "pdag_cat_id",
"value": node.id
},
{
"name": "pdag_cat_nombre",
"value": node.name
},
{
"name": "pdag_cat_caracteristicas",
"value": $scope.item[j].caracteristicas1
}
]
}
};
DreamFactory.api[CONFIG.SERVICE].callStoredProcWithParams(datosItem).success(function (response){
$scope.impresion = response;
})
.error(function(data){
sweet.show('', 'Error al guardar la informacion: ', 'error');
});
}
}
else
{
alert("No existen items nuevos para guardar");
}
}
// Cargo las caracteristicas de la clase seleccionada
$scope.MostrarInformacion = function (id_item) {
var datosImpresion = {
"procedure_name":"dag.sp_dag_cat_desc_item",
"body":{
"params": [
{
"name": "id_item",
"value": id_item
}
]
}
};
DreamFactory.api[CONFIG.SERVICE].callStoredProcWithParams(datosImpresion).success(function (response){
var DetItem = response;
if (Object.keys(DetItem).length == 1){
$scope.DAG_CAT_DESC_ITEM = DetItem[0].descripcion;
$scope.partida = DetItem[0].partida1;
$scope.DAG_CAT_PART = DetItem[0].partida;
$scope.DAG_CAT_ITEM_IMG = DetItem[0].imagen;
$scope.caracteristicas1 = JSON.parse(DetItem[0].caracteristicas);
var size = Object.keys($scope.caracteristicas1).length;
$scope.caracteristicas = [];
// $scope.caracteristicas.pop();
for(var j=0; j<size; j++){
$scope.caracteristicas.push({
name: $scope.caracteristicas1[j],
value: "-----" //$scope.caracteristicas1[j]
});
}
$scope.MostrarInformacion1(id_item);
}
})
.error(function(data){
sweet.show('', 'Error al cargar la informacion del item: ', 'error');
});
};
// Lleno la grilla segun la clase escogida
$scope.partida = "";
$scope.NItems = 0;
$scope.MostrarInformacion1 = function (id_item) {
var datosConsulta = {
"procedure_name":"dag.sp_dag_cat_items",
"body":{
"params": [
{
"name": "id_item",
"value": id_item
}
]
}
};
DreamFactory.api[CONFIG.SERVICE].callStoredProcWithParams(datosConsulta).success(function (response){
var DetItem = response;
$scope.NItems = Object.keys(DetItem).length;
$scope.item = [];
if ($scope.NItems > 0){
for(var j=0; j<$scope.NItems; j++){
var nodo_carac = "";
$scope.caracteristicas1 = JSON.parse(DetItem[j].caracteristicas);
var size1 = Object.keys($scope.caracteristicas1).length;
//console.log($scope.caracteristicas1);
var atributos = "";
xi=0;
for(var aux in $scope.caracteristicas1){
atributos += aux + " ";
xi++;
if ((eval('$scope.caracteristicas1.'+ aux)!= "") && (aux != "PARTIDA")){
nodo_carac = nodo_carac + ' ' + aux + ': ' + eval('$scope.caracteristicas1.'+ aux) ;
if (xi<size1-1){
nodo_carac = nodo_carac + ',';
}
//console.log(aux, ' ' , eval('$scope.caracteristicas1.'+ aux));
}
if (aux == "PARTIDA"){
$scope.partida = eval('$scope.caracteristicas1.'+ aux);
//console.log(aux, ' ' , eval('$scope.caracteristicas1.'+ aux));
}
} | caracteristicas1: $scope.caracteristicas1
});
}
//console.log($scope.item);
}
})
.error(function(data){
sweet.show('', 'Error al cargar la informacion del item: ', 'error');
});
};
// Adiciona items
$scope.pushItem = function () {
var size = Object.keys($scope.caracteristicas).length;
var nodo_carac = Array();
var nodo_carac1 = Array();
var node = $('#tree1').tree('getSelectedNode');
if (node.children.length == 0){
nodo_carac = "";
nodo_carac1 = '{"PARTIDA":' + $scope.partida ;
for(var j=0; j<size; j++){
//console.log($scope.caracteristicas[j].name,':',$scope.caracteristicas[j].value);
nodo_carac = nodo_carac + ' ' + $scope.caracteristicas[j].name + ': ' + $scope.caracteristicas[j].value;
if (j<size-1) {
nodo_carac = nodo_carac + ', ';
}
nodo_carac1 = nodo_carac1 + ',' + JSON.stringify($scope.caracteristicas[j].name) + ':' + JSON.stringify($scope.caracteristicas[j].value);
}
nodo_carac1 = nodo_carac1 + '}';
//console.log(nodo_carac1);
//console.log(node.children.length);
$scope.item.push({
id: $scope.item.length + 1,
partida: $scope.partida,
nombre: node.name,
caracteristicas: nodo_carac,
caracteristicas1: nodo_carac1
});
console.log(nodo_carac1);
}
else
{
alert("No se puede agregar items en nodos que tienen hijos");
}
//console.log($scope.item);
}
// Eliminar el ultimo item
$scope.dropItem = function () {
var size = Object.keys($scope.item).length;
//console.log($scope.NItems);
if(size > $scope.NItems ){
$scope.item.pop();
}
else
{
alert("No se puede eliminar mas");
console.log("No se puede eliminar mas");
}
}
// Inicio del formulario
$scope.$on('api:ready',function(){
//alert("DAGG");
$scope.arbolNodos();
//$scope.datos.agregado = "false";
});
$scope.inicioDag = function () {
if(DreamFactory.api[CONFIG.SERVICE]){
//alert("DAGG");
$scope.arbolNodos();
//$scope.datos.agregado = "false";
}
};
}); | $scope.item.push({
id: DetItem[j].id,
partida: $scope.partida,
nombre: DetItem[j].nombre,
caracteristicas: nodo_carac, | random_line_split |
Buy.js | import React from "react";
import Header from "./Header";
import Footer from "./Footer";
import emailjs from "emailjs-com";
class Buy extends React.Component {
constructor(props) {
super(props);
this.state = {
productEmail: "",
};
}
obtainProductsLocalStorage() {
let productLS;
if (localStorage.getItem("productos") === null) {
productLS = [];
} else {
productLS = JSON.parse(localStorage.getItem("productos"));
}
return productLS;
}
totalCalculate() {
let productLS;
let total = 0,
subtotal = 0,
igv = 0;
productLS = this.obtainProductsLocalStorage();
for (let i = 0; i < productLS.length; i++) {
let element = Number(productLS[i].precio * productLS[i].cantidad);
total = total + element;
}
igv = parseFloat(total * 0.18).toFixed(2);
subtotal = parseFloat(total - igv).toFixed(2);
document.getElementById("subtotal").innerHTML = "$" + subtotal;
document.getElementById("igv").innerHTML = "$" + igv;
document.getElementById("total").value = "$" + total.toFixed(2);
}
readLocalStorageShop() {
let productsLS;
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (product) {
const row = document.createElement("tr");
row.innerHTML = `
<td>
<img src="${product.imagen}" width=100>
</td>
<td>${product.titulo}</td>
<td>${product.precio}</td>
<td>
<input type="number" class="form-control cantidad" min="1" value=${
product.cantidad
}>
</td>
<td id='subtotals'>${(product.precio * product.cantidad).toFixed(
2
)}</td>
<td>
<a href="#" class="delete-product bx bxs-x-circle" style="font-size:30px" data-id="${
product.id
}"}></a>
</td>
`;
document.querySelector("#buy-list tbody").appendChild(row);
});
}
eraseProduct(e) {
e.preventDefault();
let product, productID;
if (e.target.classList.contains("delete-product")) {
e.target.parentElement.parentElement.remove();
product = e.target.parentElement.parentElement;
productID = product.querySelector("a").getAttribute("data-id");
}
this.deleteProductLocalStorage(productID);
this.totalCalculate();
}
deleteProductLocalStorage(productID) {
this.totalCalculate();
let productsLS;
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (productLS, index) {
if (productLS.id === productID) {
productsLS.splice(index, 1);
}
});
localStorage.setItem("productos", JSON.stringify(productsLS));
}
componentDidMount() {
this.totalCalculate();
document.addEventListener("DOMContentLoaded", this.readLocalStorageShop());
}
emptyLocalStorage() {
localStorage.clear();
}
purchaseProcess(e) {
e.preventDefault();
if (this.obtainProductsLocalStorage().length === 0) {
window.alert(
"No se puede realizar la compra porque no hay productos seleccionados"
);
window.location.href = "/menu";
} else if (
document.getElementById("client").value === "" ||
document.getElementById("address").value === ""
) | else {
const loadingGif = document.querySelector("#load");
loadingGif.style.display = "block";
const send = document.createElement("img");
send.src = "../images/mail.gif";
send.id = "mailImage";
let productsLS, product;
productsLS = JSON.parse(localStorage.getItem("productos"));
productsLS.map((productLS, i) => {
product +=
"\n" +
JSON.stringify(
`Plato: ${productLS.titulo} Precio: ${productLS.precio} Cantidad: ${productLS.cantidad}`
);
});
product = product.replace("undefined", "");
emailjs
.send(
"service_dxswoo3",
"template_mlm662d",
{
addressee: document.getElementById("client").value,
products: product,
cc_to: document.getElementById("address").value,
total_value: document.getElementById("total").value,
},
"user_CWVJnQVkk2WBBvozaeuKP"
)
.then(
function () {
loadingGif.style.display = "none";
document.querySelector("#loaders").appendChild(send);
setTimeout(() => {
send.remove();
localStorage.clear();
alert(
"Pedido registrado exitosamente\n Revisa el correo diligenciado, por favor"
);
window.location = "/menu";
}, 2000);
},
function (err) {
alert(
"Falló el envío del email\r\n Respuesta:\n " + JSON.stringify(err)
);
}
);
}
}
obtainEvent(e) {
e.preventDefault();
this.totalCalculate();
let id, cant, product, productsLS;
if (e.target.classList.contains("cantidad")) {
product = e.target.parentElement.parentElement;
id = product.querySelector("a").getAttribute("data-id");
cant = product.querySelector("input").value;
let updateCant = document.querySelectorAll("#subtotals");
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (productLS, index) {
if (productLS.id === id) {
productLS.cantidad = cant;
updateCant[index].innerHTML = Number(cant * productsLS[index].precio);
}
});
localStorage.setItem("productos", JSON.stringify(productsLS));
} else {
console.log("click afuera");
}
}
twoActionsBuy(e) {
this.obtainEvent(e);
this.eraseProduct(e);
}
render() {
return (
<>
<Header
total={() => {
this.totalCalculate();
}}
/>
<section id="buy">
<div className="container">
<div className="row mt-3">
<div className="col">
<h2 className="d-flex justify-content-center mb-3">
Realizar Pedido
</h2>
<form id="buy-process" action="#" method="POST">
<div className="form-group row">
<label
for="client"
className="col-12 col-md-2 col-form-label h2"
>
Cliente :
</label>
<div className="col-12 col-md-10">
<input
type="text"
className="form-control"
id="client"
placeholder="Ingresa nombre cliente"
name="addressee"
/>
</div>
</div>
<div className="form-group row">
<label
for="email"
className="col-12 col-md-2 col-form-label h2"
>
Correo :
</label>
<div className="col-12 col-md-10">
<input
type="email"
className="form-control"
id="address"
placeholder="Ingresa tu correo"
name="cc_to"
/>
</div>
</div>
<div
id="buy-car"
className="table-responsive"
onClick={(e) => this.twoActionsBuy(e)}
onChange={(e) => this.obtainEvent(e)}
onKeyUp={(e) => this.obtainEvent(e)}
>
<table className="table" id="buy-list">
<thead>
<tr>
<th scope="col">Imagen</th>
<th scope="col">Nombre</th>
<th scope="col">Precio</th>
<th scope="col">Cantidad</th>
<th scope="col">Sub Total</th>
<th scope="col">Eliminar</th>
</tr>
</thead>
<tbody></tbody>
<tr>
<th colspan="4" scope="col" className="text-right">
SUB TOTAL :
</th>
<th scope="col">
<p id="subtotal"></p>
</th>
</tr>
<tr>
<th colspan="4" scope="col" className="text-right">
IGV :
</th>
<th scope="col">
<p id="igv"></p>
</th>
</tr>
<tr>
<th colspan="4" scope="col" className="text-right">
TOTAL :
</th>
<th scope="col">
<input
type="text"
id="total"
name="total_value"
readonly
className="font-weight-bold border-0"
></input>
</th>
</tr>
</table>
</div>
<div className="row justify-content-center" id="loaders">
<img id="load" src="../images/load.gif" />
</div>
<div className="row justify-content-between">
<div className="col-md-4 mb-2">
<a href="/menu" className="btn btn-info btn-block">
Seguir comprando
</a>
</div>
<div className="col-xs-12 col-md-4">
<button
href=""
className="btn btn-success btn-block"
type="submit"
id="process"
onClick={(e) => this.purchaseProcess(e)}
>
Realizar compra
</button>
</div>
</div>
</form>
</div>
</div>
</div>
</section>
<Footer />
</>
);
}
}
export default Buy;
| {
window.alert("Por favor, diligencie todos los campos");
} | conditional_block |
Buy.js | import React from "react";
import Header from "./Header";
import Footer from "./Footer";
import emailjs from "emailjs-com";
class Buy extends React.Component {
constructor(props) {
super(props);
this.state = {
productEmail: "",
};
}
obtainProductsLocalStorage() {
let productLS;
if (localStorage.getItem("productos") === null) {
productLS = [];
} else {
productLS = JSON.parse(localStorage.getItem("productos"));
}
return productLS;
}
totalCalculate() {
let productLS;
let total = 0,
subtotal = 0,
igv = 0;
productLS = this.obtainProductsLocalStorage();
for (let i = 0; i < productLS.length; i++) {
let element = Number(productLS[i].precio * productLS[i].cantidad);
total = total + element;
}
igv = parseFloat(total * 0.18).toFixed(2);
subtotal = parseFloat(total - igv).toFixed(2);
document.getElementById("subtotal").innerHTML = "$" + subtotal;
document.getElementById("igv").innerHTML = "$" + igv;
document.getElementById("total").value = "$" + total.toFixed(2);
}
readLocalStorageShop() {
let productsLS;
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (product) {
const row = document.createElement("tr");
row.innerHTML = `
<td>
<img src="${product.imagen}" width=100>
</td>
<td>${product.titulo}</td>
<td>${product.precio}</td>
<td>
<input type="number" class="form-control cantidad" min="1" value=${
product.cantidad
}>
</td>
<td id='subtotals'>${(product.precio * product.cantidad).toFixed(
2
)}</td>
<td>
<a href="#" class="delete-product bx bxs-x-circle" style="font-size:30px" data-id="${
product.id
}"}></a>
</td>
`;
document.querySelector("#buy-list tbody").appendChild(row);
});
}
eraseProduct(e) {
e.preventDefault();
let product, productID;
if (e.target.classList.contains("delete-product")) {
e.target.parentElement.parentElement.remove();
product = e.target.parentElement.parentElement;
productID = product.querySelector("a").getAttribute("data-id");
}
this.deleteProductLocalStorage(productID);
this.totalCalculate();
}
deleteProductLocalStorage(productID) {
this.totalCalculate();
let productsLS;
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (productLS, index) {
if (productLS.id === productID) {
productsLS.splice(index, 1);
}
});
localStorage.setItem("productos", JSON.stringify(productsLS));
}
componentDidMount() {
this.totalCalculate();
document.addEventListener("DOMContentLoaded", this.readLocalStorageShop());
}
emptyLocalStorage() {
localStorage.clear();
}
purchaseProcess(e) | obtainEvent(e) {
e.preventDefault();
this.totalCalculate();
let id, cant, product, productsLS;
if (e.target.classList.contains("cantidad")) {
product = e.target.parentElement.parentElement;
id = product.querySelector("a").getAttribute("data-id");
cant = product.querySelector("input").value;
let updateCant = document.querySelectorAll("#subtotals");
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (productLS, index) {
if (productLS.id === id) {
productLS.cantidad = cant;
updateCant[index].innerHTML = Number(cant * productsLS[index].precio);
}
});
localStorage.setItem("productos", JSON.stringify(productsLS));
} else {
console.log("click afuera");
}
}
twoActionsBuy(e) {
this.obtainEvent(e);
this.eraseProduct(e);
}
render() {
return (
<>
<Header
total={() => {
this.totalCalculate();
}}
/>
<section id="buy">
<div className="container">
<div className="row mt-3">
<div className="col">
<h2 className="d-flex justify-content-center mb-3">
Realizar Pedido
</h2>
<form id="buy-process" action="#" method="POST">
<div className="form-group row">
<label
for="client"
className="col-12 col-md-2 col-form-label h2"
>
Cliente :
</label>
<div className="col-12 col-md-10">
<input
type="text"
className="form-control"
id="client"
placeholder="Ingresa nombre cliente"
name="addressee"
/>
</div>
</div>
<div className="form-group row">
<label
for="email"
className="col-12 col-md-2 col-form-label h2"
>
Correo :
</label>
<div className="col-12 col-md-10">
<input
type="email"
className="form-control"
id="address"
placeholder="Ingresa tu correo"
name="cc_to"
/>
</div>
</div>
<div
id="buy-car"
className="table-responsive"
onClick={(e) => this.twoActionsBuy(e)}
onChange={(e) => this.obtainEvent(e)}
onKeyUp={(e) => this.obtainEvent(e)}
>
<table className="table" id="buy-list">
<thead>
<tr>
<th scope="col">Imagen</th>
<th scope="col">Nombre</th>
<th scope="col">Precio</th>
<th scope="col">Cantidad</th>
<th scope="col">Sub Total</th>
<th scope="col">Eliminar</th>
</tr>
</thead>
<tbody></tbody>
<tr>
<th colspan="4" scope="col" className="text-right">
SUB TOTAL :
</th>
<th scope="col">
<p id="subtotal"></p>
</th>
</tr>
<tr>
<th colspan="4" scope="col" className="text-right">
IGV :
</th>
<th scope="col">
<p id="igv"></p>
</th>
</tr>
<tr>
<th colspan="4" scope="col" className="text-right">
TOTAL :
</th>
<th scope="col">
<input
type="text"
id="total"
name="total_value"
readonly
className="font-weight-bold border-0"
></input>
</th>
</tr>
</table>
</div>
<div className="row justify-content-center" id="loaders">
<img id="load" src="../images/load.gif" />
</div>
<div className="row justify-content-between">
<div className="col-md-4 mb-2">
<a href="/menu" className="btn btn-info btn-block">
Seguir comprando
</a>
</div>
<div className="col-xs-12 col-md-4">
<button
href=""
className="btn btn-success btn-block"
type="submit"
id="process"
onClick={(e) => this.purchaseProcess(e)}
>
Realizar compra
</button>
</div>
</div>
</form>
</div>
</div>
</div>
</section>
<Footer />
</>
);
}
}
export default Buy;
| {
e.preventDefault();
if (this.obtainProductsLocalStorage().length === 0) {
window.alert(
"No se puede realizar la compra porque no hay productos seleccionados"
);
window.location.href = "/menu";
} else if (
document.getElementById("client").value === "" ||
document.getElementById("address").value === ""
) {
window.alert("Por favor, diligencie todos los campos");
} else {
const loadingGif = document.querySelector("#load");
loadingGif.style.display = "block";
const send = document.createElement("img");
send.src = "../images/mail.gif";
send.id = "mailImage";
let productsLS, product;
productsLS = JSON.parse(localStorage.getItem("productos"));
productsLS.map((productLS, i) => {
product +=
"\n" +
JSON.stringify(
`Plato: ${productLS.titulo} Precio: ${productLS.precio} Cantidad: ${productLS.cantidad}`
);
});
product = product.replace("undefined", "");
emailjs
.send(
"service_dxswoo3",
"template_mlm662d",
{
addressee: document.getElementById("client").value,
products: product,
cc_to: document.getElementById("address").value,
total_value: document.getElementById("total").value,
},
"user_CWVJnQVkk2WBBvozaeuKP"
)
.then(
function () {
loadingGif.style.display = "none";
document.querySelector("#loaders").appendChild(send);
setTimeout(() => {
send.remove();
localStorage.clear();
alert(
"Pedido registrado exitosamente\n Revisa el correo diligenciado, por favor"
);
window.location = "/menu";
}, 2000);
},
function (err) {
alert(
"Falló el envío del email\r\n Respuesta:\n " + JSON.stringify(err)
);
}
);
}
}
| identifier_body |
Buy.js | import React from "react";
import Header from "./Header";
import Footer from "./Footer";
import emailjs from "emailjs-com";
class Buy extends React.Component {
constructor(props) {
super(props);
this.state = {
productEmail: "",
};
}
obtainProductsLocalStorage() { | productLS = JSON.parse(localStorage.getItem("productos"));
}
return productLS;
}
totalCalculate() {
let productLS;
let total = 0,
subtotal = 0,
igv = 0;
productLS = this.obtainProductsLocalStorage();
for (let i = 0; i < productLS.length; i++) {
let element = Number(productLS[i].precio * productLS[i].cantidad);
total = total + element;
}
igv = parseFloat(total * 0.18).toFixed(2);
subtotal = parseFloat(total - igv).toFixed(2);
document.getElementById("subtotal").innerHTML = "$" + subtotal;
document.getElementById("igv").innerHTML = "$" + igv;
document.getElementById("total").value = "$" + total.toFixed(2);
}
readLocalStorageShop() {
let productsLS;
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (product) {
const row = document.createElement("tr");
row.innerHTML = `
<td>
<img src="${product.imagen}" width=100>
</td>
<td>${product.titulo}</td>
<td>${product.precio}</td>
<td>
<input type="number" class="form-control cantidad" min="1" value=${
product.cantidad
}>
</td>
<td id='subtotals'>${(product.precio * product.cantidad).toFixed(
2
)}</td>
<td>
<a href="#" class="delete-product bx bxs-x-circle" style="font-size:30px" data-id="${
product.id
}"}></a>
</td>
`;
document.querySelector("#buy-list tbody").appendChild(row);
});
}
eraseProduct(e) {
e.preventDefault();
let product, productID;
if (e.target.classList.contains("delete-product")) {
e.target.parentElement.parentElement.remove();
product = e.target.parentElement.parentElement;
productID = product.querySelector("a").getAttribute("data-id");
}
this.deleteProductLocalStorage(productID);
this.totalCalculate();
}
deleteProductLocalStorage(productID) {
this.totalCalculate();
let productsLS;
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (productLS, index) {
if (productLS.id === productID) {
productsLS.splice(index, 1);
}
});
localStorage.setItem("productos", JSON.stringify(productsLS));
}
componentDidMount() {
this.totalCalculate();
document.addEventListener("DOMContentLoaded", this.readLocalStorageShop());
}
emptyLocalStorage() {
localStorage.clear();
}
purchaseProcess(e) {
e.preventDefault();
if (this.obtainProductsLocalStorage().length === 0) {
window.alert(
"No se puede realizar la compra porque no hay productos seleccionados"
);
window.location.href = "/menu";
} else if (
document.getElementById("client").value === "" ||
document.getElementById("address").value === ""
) {
window.alert("Por favor, diligencie todos los campos");
} else {
const loadingGif = document.querySelector("#load");
loadingGif.style.display = "block";
const send = document.createElement("img");
send.src = "../images/mail.gif";
send.id = "mailImage";
let productsLS, product;
productsLS = JSON.parse(localStorage.getItem("productos"));
productsLS.map((productLS, i) => {
product +=
"\n" +
JSON.stringify(
`Plato: ${productLS.titulo} Precio: ${productLS.precio} Cantidad: ${productLS.cantidad}`
);
});
product = product.replace("undefined", "");
emailjs
.send(
"service_dxswoo3",
"template_mlm662d",
{
addressee: document.getElementById("client").value,
products: product,
cc_to: document.getElementById("address").value,
total_value: document.getElementById("total").value,
},
"user_CWVJnQVkk2WBBvozaeuKP"
)
.then(
function () {
loadingGif.style.display = "none";
document.querySelector("#loaders").appendChild(send);
setTimeout(() => {
send.remove();
localStorage.clear();
alert(
"Pedido registrado exitosamente\n Revisa el correo diligenciado, por favor"
);
window.location = "/menu";
}, 2000);
},
function (err) {
alert(
"Falló el envío del email\r\n Respuesta:\n " + JSON.stringify(err)
);
}
);
}
}
obtainEvent(e) {
e.preventDefault();
this.totalCalculate();
let id, cant, product, productsLS;
if (e.target.classList.contains("cantidad")) {
product = e.target.parentElement.parentElement;
id = product.querySelector("a").getAttribute("data-id");
cant = product.querySelector("input").value;
let updateCant = document.querySelectorAll("#subtotals");
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (productLS, index) {
if (productLS.id === id) {
productLS.cantidad = cant;
updateCant[index].innerHTML = Number(cant * productsLS[index].precio);
}
});
localStorage.setItem("productos", JSON.stringify(productsLS));
} else {
console.log("click afuera");
}
}
twoActionsBuy(e) {
this.obtainEvent(e);
this.eraseProduct(e);
}
render() {
return (
<>
<Header
total={() => {
this.totalCalculate();
}}
/>
<section id="buy">
<div className="container">
<div className="row mt-3">
<div className="col">
<h2 className="d-flex justify-content-center mb-3">
Realizar Pedido
</h2>
<form id="buy-process" action="#" method="POST">
<div className="form-group row">
<label
for="client"
className="col-12 col-md-2 col-form-label h2"
>
Cliente :
</label>
<div className="col-12 col-md-10">
<input
type="text"
className="form-control"
id="client"
placeholder="Ingresa nombre cliente"
name="addressee"
/>
</div>
</div>
<div className="form-group row">
<label
for="email"
className="col-12 col-md-2 col-form-label h2"
>
Correo :
</label>
<div className="col-12 col-md-10">
<input
type="email"
className="form-control"
id="address"
placeholder="Ingresa tu correo"
name="cc_to"
/>
</div>
</div>
<div
id="buy-car"
className="table-responsive"
onClick={(e) => this.twoActionsBuy(e)}
onChange={(e) => this.obtainEvent(e)}
onKeyUp={(e) => this.obtainEvent(e)}
>
<table className="table" id="buy-list">
<thead>
<tr>
<th scope="col">Imagen</th>
<th scope="col">Nombre</th>
<th scope="col">Precio</th>
<th scope="col">Cantidad</th>
<th scope="col">Sub Total</th>
<th scope="col">Eliminar</th>
</tr>
</thead>
<tbody></tbody>
<tr>
<th colspan="4" scope="col" className="text-right">
SUB TOTAL :
</th>
<th scope="col">
<p id="subtotal"></p>
</th>
</tr>
<tr>
<th colspan="4" scope="col" className="text-right">
IGV :
</th>
<th scope="col">
<p id="igv"></p>
</th>
</tr>
<tr>
<th colspan="4" scope="col" className="text-right">
TOTAL :
</th>
<th scope="col">
<input
type="text"
id="total"
name="total_value"
readonly
className="font-weight-bold border-0"
></input>
</th>
</tr>
</table>
</div>
<div className="row justify-content-center" id="loaders">
<img id="load" src="../images/load.gif" />
</div>
<div className="row justify-content-between">
<div className="col-md-4 mb-2">
<a href="/menu" className="btn btn-info btn-block">
Seguir comprando
</a>
</div>
<div className="col-xs-12 col-md-4">
<button
href=""
className="btn btn-success btn-block"
type="submit"
id="process"
onClick={(e) => this.purchaseProcess(e)}
>
Realizar compra
</button>
</div>
</div>
</form>
</div>
</div>
</div>
</section>
<Footer />
</>
);
}
}
export default Buy; | let productLS;
if (localStorage.getItem("productos") === null) {
productLS = [];
} else { | random_line_split |
Buy.js | import React from "react";
import Header from "./Header";
import Footer from "./Footer";
import emailjs from "emailjs-com";
class Buy extends React.Component {
constructor(props) {
super(props);
this.state = {
productEmail: "",
};
}
obtainProductsLocalStorage() {
let productLS;
if (localStorage.getItem("productos") === null) {
productLS = [];
} else {
productLS = JSON.parse(localStorage.getItem("productos"));
}
return productLS;
}
| () {
let productLS;
let total = 0,
subtotal = 0,
igv = 0;
productLS = this.obtainProductsLocalStorage();
for (let i = 0; i < productLS.length; i++) {
let element = Number(productLS[i].precio * productLS[i].cantidad);
total = total + element;
}
igv = parseFloat(total * 0.18).toFixed(2);
subtotal = parseFloat(total - igv).toFixed(2);
document.getElementById("subtotal").innerHTML = "$" + subtotal;
document.getElementById("igv").innerHTML = "$" + igv;
document.getElementById("total").value = "$" + total.toFixed(2);
}
readLocalStorageShop() {
let productsLS;
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (product) {
const row = document.createElement("tr");
row.innerHTML = `
<td>
<img src="${product.imagen}" width=100>
</td>
<td>${product.titulo}</td>
<td>${product.precio}</td>
<td>
<input type="number" class="form-control cantidad" min="1" value=${
product.cantidad
}>
</td>
<td id='subtotals'>${(product.precio * product.cantidad).toFixed(
2
)}</td>
<td>
<a href="#" class="delete-product bx bxs-x-circle" style="font-size:30px" data-id="${
product.id
}"}></a>
</td>
`;
document.querySelector("#buy-list tbody").appendChild(row);
});
}
eraseProduct(e) {
e.preventDefault();
let product, productID;
if (e.target.classList.contains("delete-product")) {
e.target.parentElement.parentElement.remove();
product = e.target.parentElement.parentElement;
productID = product.querySelector("a").getAttribute("data-id");
}
this.deleteProductLocalStorage(productID);
this.totalCalculate();
}
deleteProductLocalStorage(productID) {
this.totalCalculate();
let productsLS;
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (productLS, index) {
if (productLS.id === productID) {
productsLS.splice(index, 1);
}
});
localStorage.setItem("productos", JSON.stringify(productsLS));
}
componentDidMount() {
this.totalCalculate();
document.addEventListener("DOMContentLoaded", this.readLocalStorageShop());
}
emptyLocalStorage() {
localStorage.clear();
}
purchaseProcess(e) {
e.preventDefault();
if (this.obtainProductsLocalStorage().length === 0) {
window.alert(
"No se puede realizar la compra porque no hay productos seleccionados"
);
window.location.href = "/menu";
} else if (
document.getElementById("client").value === "" ||
document.getElementById("address").value === ""
) {
window.alert("Por favor, diligencie todos los campos");
} else {
const loadingGif = document.querySelector("#load");
loadingGif.style.display = "block";
const send = document.createElement("img");
send.src = "../images/mail.gif";
send.id = "mailImage";
let productsLS, product;
productsLS = JSON.parse(localStorage.getItem("productos"));
productsLS.map((productLS, i) => {
product +=
"\n" +
JSON.stringify(
`Plato: ${productLS.titulo} Precio: ${productLS.precio} Cantidad: ${productLS.cantidad}`
);
});
product = product.replace("undefined", "");
emailjs
.send(
"service_dxswoo3",
"template_mlm662d",
{
addressee: document.getElementById("client").value,
products: product,
cc_to: document.getElementById("address").value,
total_value: document.getElementById("total").value,
},
"user_CWVJnQVkk2WBBvozaeuKP"
)
.then(
function () {
loadingGif.style.display = "none";
document.querySelector("#loaders").appendChild(send);
setTimeout(() => {
send.remove();
localStorage.clear();
alert(
"Pedido registrado exitosamente\n Revisa el correo diligenciado, por favor"
);
window.location = "/menu";
}, 2000);
},
function (err) {
alert(
"Falló el envío del email\r\n Respuesta:\n " + JSON.stringify(err)
);
}
);
}
}
obtainEvent(e) {
e.preventDefault();
this.totalCalculate();
let id, cant, product, productsLS;
if (e.target.classList.contains("cantidad")) {
product = e.target.parentElement.parentElement;
id = product.querySelector("a").getAttribute("data-id");
cant = product.querySelector("input").value;
let updateCant = document.querySelectorAll("#subtotals");
productsLS = this.obtainProductsLocalStorage();
productsLS.forEach(function (productLS, index) {
if (productLS.id === id) {
productLS.cantidad = cant;
updateCant[index].innerHTML = Number(cant * productsLS[index].precio);
}
});
localStorage.setItem("productos", JSON.stringify(productsLS));
} else {
console.log("click afuera");
}
}
twoActionsBuy(e) {
this.obtainEvent(e);
this.eraseProduct(e);
}
render() {
return (
<>
<Header
total={() => {
this.totalCalculate();
}}
/>
<section id="buy">
<div className="container">
<div className="row mt-3">
<div className="col">
<h2 className="d-flex justify-content-center mb-3">
Realizar Pedido
</h2>
<form id="buy-process" action="#" method="POST">
<div className="form-group row">
<label
for="client"
className="col-12 col-md-2 col-form-label h2"
>
Cliente :
</label>
<div className="col-12 col-md-10">
<input
type="text"
className="form-control"
id="client"
placeholder="Ingresa nombre cliente"
name="addressee"
/>
</div>
</div>
<div className="form-group row">
<label
for="email"
className="col-12 col-md-2 col-form-label h2"
>
Correo :
</label>
<div className="col-12 col-md-10">
<input
type="email"
className="form-control"
id="address"
placeholder="Ingresa tu correo"
name="cc_to"
/>
</div>
</div>
<div
id="buy-car"
className="table-responsive"
onClick={(e) => this.twoActionsBuy(e)}
onChange={(e) => this.obtainEvent(e)}
onKeyUp={(e) => this.obtainEvent(e)}
>
<table className="table" id="buy-list">
<thead>
<tr>
<th scope="col">Imagen</th>
<th scope="col">Nombre</th>
<th scope="col">Precio</th>
<th scope="col">Cantidad</th>
<th scope="col">Sub Total</th>
<th scope="col">Eliminar</th>
</tr>
</thead>
<tbody></tbody>
<tr>
<th colspan="4" scope="col" className="text-right">
SUB TOTAL :
</th>
<th scope="col">
<p id="subtotal"></p>
</th>
</tr>
<tr>
<th colspan="4" scope="col" className="text-right">
IGV :
</th>
<th scope="col">
<p id="igv"></p>
</th>
</tr>
<tr>
<th colspan="4" scope="col" className="text-right">
TOTAL :
</th>
<th scope="col">
<input
type="text"
id="total"
name="total_value"
readonly
className="font-weight-bold border-0"
></input>
</th>
</tr>
</table>
</div>
<div className="row justify-content-center" id="loaders">
<img id="load" src="../images/load.gif" />
</div>
<div className="row justify-content-between">
<div className="col-md-4 mb-2">
<a href="/menu" className="btn btn-info btn-block">
Seguir comprando
</a>
</div>
<div className="col-xs-12 col-md-4">
<button
href=""
className="btn btn-success btn-block"
type="submit"
id="process"
onClick={(e) => this.purchaseProcess(e)}
>
Realizar compra
</button>
</div>
</div>
</form>
</div>
</div>
</div>
</section>
<Footer />
</>
);
}
}
export default Buy;
| totalCalculate | identifier_name |
message.pb.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: temporal/server/api/clock/v1/message.proto
package clock
import (
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type VectorClock struct {
ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
Clock int64 `protobuf:"varint,2,opt,name=clock,proto3" json:"clock,omitempty"`
ClusterId int64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
}
func (m *VectorClock) Reset() { *m = VectorClock{} }
func (*VectorClock) ProtoMessage() {}
func (*VectorClock) Descriptor() ([]byte, []int) {
return fileDescriptor_86d20c4676353367, []int{0}
}
func (m *VectorClock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VectorClock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) |
func (m *VectorClock) XXX_Merge(src proto.Message) {
xxx_messageInfo_VectorClock.Merge(m, src)
}
func (m *VectorClock) XXX_Size() int {
return m.Size()
}
func (m *VectorClock) XXX_DiscardUnknown() {
xxx_messageInfo_VectorClock.DiscardUnknown(m)
}
var xxx_messageInfo_VectorClock proto.InternalMessageInfo
func (m *VectorClock) GetShardId() int32 {
if m != nil {
return m.ShardId
}
return 0
}
func (m *VectorClock) GetClock() int64 {
if m != nil {
return m.Clock
}
return 0
}
func (m *VectorClock) GetClusterId() int64 {
if m != nil {
return m.ClusterId
}
return 0
}
func init() {
proto.RegisterType((*VectorClock)(nil), "temporal.server.api.clock.v1.VectorClock")
}
func init() {
proto.RegisterFile("temporal/server/api/clock/v1/message.proto", fileDescriptor_86d20c4676353367)
}
var fileDescriptor_86d20c4676353367 = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x2a, 0x49, 0xcd, 0x2d,
0xc8, 0x2f, 0x4a, 0xcc, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xd2, 0x4f, 0x2c, 0xc8, 0xd4,
0x4f, 0xce, 0xc9, 0x4f, 0xce, 0xd6, 0x2f, 0x33, 0xd4, 0xcf, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f,
0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x81, 0xa9, 0xd5, 0x83, 0xa8, 0xd5, 0x4b, 0x2c,
0xc8, 0xd4, 0x03, 0xab, 0xd5, 0x2b, 0x33, 0x54, 0x8a, 0xe5, 0xe2, 0x0e, 0x4b, 0x4d, 0x2e, 0xc9,
0x2f, 0x72, 0x06, 0x89, 0x08, 0x49, 0x72, 0x71, 0x14, 0x67, 0x24, 0x16, 0xa5, 0xc4, 0x67, 0xa6,
0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x06, 0xb1, 0x83, 0xf9, 0x9e, 0x29, 0x42, 0x22, 0x5c, 0xac,
0x60, 0x5d, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x90, 0x2c, 0x17, 0x57, 0x72,
0x4e, 0x69, 0x71, 0x49, 0x6a, 0x11, 0x48, 0x0b, 0x33, 0x58, 0x8a, 0x13, 0x2a, 0xe2, 0x99, 0xe2,
0x14, 0x77, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0x36, 0x3c,
0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f,
0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18,
0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0x23, 0x3d, 0x5f, 0x0f, 0xee, 0xea,
0xcc, 0x7c, 0x6c, 0x9e, 0xb4, 0x06, 0x33, 0x92, 0xd8, 0xc0, 0x7e, 0x34, 0x06, 0x04, 0x00, 0x00,
0xff, 0xff, 0xc4, 0x11, 0xe0, 0xe1, 0x11, 0x01, 0x00, 0x00,
}
func (this *VectorClock) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*VectorClock)
if !ok {
that2, ok := that.(VectorClock)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.ShardId != that1.ShardId {
return false
}
if this.Clock != that1.Clock {
return false
}
if this.ClusterId != that1.ClusterId {
return false
}
return true
}
func (this *VectorClock) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&clock.VectorClock{")
s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n")
s = append(s, "Clock: "+fmt.Sprintf("%#v", this.Clock)+",\n")
s = append(s, "ClusterId: "+fmt.Sprintf("%#v", this.ClusterId)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringMessage(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *VectorClock) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VectorClock) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *VectorClock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ClusterId != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.ClusterId))
i--
dAtA[i] = 0x18
}
if m.Clock != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.Clock))
i--
dAtA[i] = 0x10
}
if m.ShardId != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.ShardId))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintMessage(dAtA []byte, offset int, v uint64) int {
offset -= sovMessage(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *VectorClock) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.ShardId != 0 {
n += 1 + sovMessage(uint64(m.ShardId))
}
if m.Clock != 0 {
n += 1 + sovMessage(uint64(m.Clock))
}
if m.ClusterId != 0 {
n += 1 + sovMessage(uint64(m.ClusterId))
}
return n
}
func sovMessage(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMessage(x uint64) (n int) {
return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *VectorClock) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&VectorClock{`,
`ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`,
`Clock:` + fmt.Sprintf("%v", this.Clock) + `,`,
`ClusterId:` + fmt.Sprintf("%v", this.ClusterId) + `,`,
`}`,
}, "")
return s
}
func valueToStringMessage(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *VectorClock) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VectorClock: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VectorClock: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ShardId", wireType)
}
m.ShardId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ShardId |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Clock", wireType)
}
m.Clock = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Clock |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
}
m.ClusterId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ClusterId |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMessage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMessage
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMessage
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMessage(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthMessage
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupMessage
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthMessage
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group")
)
| {
if deterministic {
return xxx_messageInfo_VectorClock.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
} | identifier_body |
message.pb.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: temporal/server/api/clock/v1/message.proto
package clock
import (
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type VectorClock struct {
ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
Clock int64 `protobuf:"varint,2,opt,name=clock,proto3" json:"clock,omitempty"`
ClusterId int64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
}
func (m *VectorClock) Reset() { *m = VectorClock{} }
func (*VectorClock) ProtoMessage() {}
func (*VectorClock) Descriptor() ([]byte, []int) {
return fileDescriptor_86d20c4676353367, []int{0}
}
func (m *VectorClock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VectorClock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VectorClock.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *VectorClock) XXX_Merge(src proto.Message) {
xxx_messageInfo_VectorClock.Merge(m, src)
}
func (m *VectorClock) XXX_Size() int {
return m.Size()
}
func (m *VectorClock) XXX_DiscardUnknown() {
xxx_messageInfo_VectorClock.DiscardUnknown(m)
}
var xxx_messageInfo_VectorClock proto.InternalMessageInfo
func (m *VectorClock) GetShardId() int32 {
if m != nil {
return m.ShardId
}
return 0
}
func (m *VectorClock) GetClock() int64 {
if m != nil {
return m.Clock
}
return 0
}
func (m *VectorClock) GetClusterId() int64 {
if m != nil {
return m.ClusterId
}
return 0
}
func init() {
proto.RegisterType((*VectorClock)(nil), "temporal.server.api.clock.v1.VectorClock")
}
func init() {
proto.RegisterFile("temporal/server/api/clock/v1/message.proto", fileDescriptor_86d20c4676353367)
}
var fileDescriptor_86d20c4676353367 = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x2a, 0x49, 0xcd, 0x2d,
0xc8, 0x2f, 0x4a, 0xcc, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xd2, 0x4f, 0x2c, 0xc8, 0xd4,
0x4f, 0xce, 0xc9, 0x4f, 0xce, 0xd6, 0x2f, 0x33, 0xd4, 0xcf, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f,
0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x81, 0xa9, 0xd5, 0x83, 0xa8, 0xd5, 0x4b, 0x2c,
0xc8, 0xd4, 0x03, 0xab, 0xd5, 0x2b, 0x33, 0x54, 0x8a, 0xe5, 0xe2, 0x0e, 0x4b, 0x4d, 0x2e, 0xc9,
0x2f, 0x72, 0x06, 0x89, 0x08, 0x49, 0x72, 0x71, 0x14, 0x67, 0x24, 0x16, 0xa5, 0xc4, 0x67, 0xa6,
0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x06, 0xb1, 0x83, 0xf9, 0x9e, 0x29, 0x42, 0x22, 0x5c, 0xac,
0x60, 0x5d, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x90, 0x2c, 0x17, 0x57, 0x72,
0x4e, 0x69, 0x71, 0x49, 0x6a, 0x11, 0x48, 0x0b, 0x33, 0x58, 0x8a, 0x13, 0x2a, 0xe2, 0x99, 0xe2,
0x14, 0x77, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0x36, 0x3c,
0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f,
0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18,
0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0x23, 0x3d, 0x5f, 0x0f, 0xee, 0xea,
0xcc, 0x7c, 0x6c, 0x9e, 0xb4, 0x06, 0x33, 0x92, 0xd8, 0xc0, 0x7e, 0x34, 0x06, 0x04, 0x00, 0x00,
0xff, 0xff, 0xc4, 0x11, 0xe0, 0xe1, 0x11, 0x01, 0x00, 0x00,
}
func (this *VectorClock) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*VectorClock)
if !ok {
that2, ok := that.(VectorClock)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.ShardId != that1.ShardId {
return false
}
if this.Clock != that1.Clock {
return false
}
if this.ClusterId != that1.ClusterId {
return false
}
return true
}
func (this *VectorClock) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&clock.VectorClock{")
s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n")
s = append(s, "Clock: "+fmt.Sprintf("%#v", this.Clock)+",\n")
s = append(s, "ClusterId: "+fmt.Sprintf("%#v", this.ClusterId)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringMessage(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *VectorClock) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VectorClock) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *VectorClock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ClusterId != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.ClusterId))
i--
dAtA[i] = 0x18
}
if m.Clock != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.Clock))
i--
dAtA[i] = 0x10
}
if m.ShardId != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.ShardId))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintMessage(dAtA []byte, offset int, v uint64) int {
offset -= sovMessage(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *VectorClock) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.ShardId != 0 {
n += 1 + sovMessage(uint64(m.ShardId))
}
if m.Clock != 0 {
n += 1 + sovMessage(uint64(m.Clock))
}
if m.ClusterId != 0 {
n += 1 + sovMessage(uint64(m.ClusterId))
}
return n
}
func sovMessage(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMessage(x uint64) (n int) {
return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *VectorClock) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&VectorClock{`,
`ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`,
`Clock:` + fmt.Sprintf("%v", this.Clock) + `,`,
`ClusterId:` + fmt.Sprintf("%v", this.ClusterId) + `,`,
`}`,
}, "")
return s
}
func valueToStringMessage(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *VectorClock) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VectorClock: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VectorClock: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ShardId", wireType)
}
m.ShardId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ShardId |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Clock", wireType)
}
m.Clock = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l |
b := dAtA[iNdEx]
iNdEx++
m.Clock |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
}
m.ClusterId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ClusterId |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMessage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMessage
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMessage
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMessage(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthMessage
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupMessage
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthMessage
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group")
)
| {
return io.ErrUnexpectedEOF
} | conditional_block |
message.pb.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: temporal/server/api/clock/v1/message.proto
package clock
import (
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type VectorClock struct {
ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
Clock int64 `protobuf:"varint,2,opt,name=clock,proto3" json:"clock,omitempty"`
ClusterId int64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
}
func (m *VectorClock) Reset() { *m = VectorClock{} }
func (*VectorClock) ProtoMessage() {}
func (*VectorClock) Descriptor() ([]byte, []int) {
return fileDescriptor_86d20c4676353367, []int{0}
}
func (m *VectorClock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VectorClock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VectorClock.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *VectorClock) XXX_Merge(src proto.Message) {
xxx_messageInfo_VectorClock.Merge(m, src)
}
func (m *VectorClock) XXX_Size() int {
return m.Size()
}
func (m *VectorClock) XXX_DiscardUnknown() {
xxx_messageInfo_VectorClock.DiscardUnknown(m)
}
var xxx_messageInfo_VectorClock proto.InternalMessageInfo
func (m *VectorClock) GetShardId() int32 {
if m != nil {
return m.ShardId
}
return 0
}
func (m *VectorClock) GetClock() int64 {
if m != nil {
return m.Clock
}
return 0
}
func (m *VectorClock) GetClusterId() int64 {
if m != nil {
return m.ClusterId
}
return 0
}
func init() {
proto.RegisterType((*VectorClock)(nil), "temporal.server.api.clock.v1.VectorClock")
}
func init() {
proto.RegisterFile("temporal/server/api/clock/v1/message.proto", fileDescriptor_86d20c4676353367)
}
var fileDescriptor_86d20c4676353367 = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x2a, 0x49, 0xcd, 0x2d,
0xc8, 0x2f, 0x4a, 0xcc, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xd2, 0x4f, 0x2c, 0xc8, 0xd4,
0x4f, 0xce, 0xc9, 0x4f, 0xce, 0xd6, 0x2f, 0x33, 0xd4, 0xcf, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f,
0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x81, 0xa9, 0xd5, 0x83, 0xa8, 0xd5, 0x4b, 0x2c,
0xc8, 0xd4, 0x03, 0xab, 0xd5, 0x2b, 0x33, 0x54, 0x8a, 0xe5, 0xe2, 0x0e, 0x4b, 0x4d, 0x2e, 0xc9,
0x2f, 0x72, 0x06, 0x89, 0x08, 0x49, 0x72, 0x71, 0x14, 0x67, 0x24, 0x16, 0xa5, 0xc4, 0x67, 0xa6,
0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x06, 0xb1, 0x83, 0xf9, 0x9e, 0x29, 0x42, 0x22, 0x5c, 0xac,
0x60, 0x5d, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x90, 0x2c, 0x17, 0x57, 0x72,
0x4e, 0x69, 0x71, 0x49, 0x6a, 0x11, 0x48, 0x0b, 0x33, 0x58, 0x8a, 0x13, 0x2a, 0xe2, 0x99, 0xe2,
0x14, 0x77, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0x36, 0x3c,
0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f,
0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, | 0xff, 0xff, 0xc4, 0x11, 0xe0, 0xe1, 0x11, 0x01, 0x00, 0x00,
}
func (this *VectorClock) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*VectorClock)
if !ok {
that2, ok := that.(VectorClock)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.ShardId != that1.ShardId {
return false
}
if this.Clock != that1.Clock {
return false
}
if this.ClusterId != that1.ClusterId {
return false
}
return true
}
func (this *VectorClock) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&clock.VectorClock{")
s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n")
s = append(s, "Clock: "+fmt.Sprintf("%#v", this.Clock)+",\n")
s = append(s, "ClusterId: "+fmt.Sprintf("%#v", this.ClusterId)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringMessage(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *VectorClock) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VectorClock) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *VectorClock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ClusterId != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.ClusterId))
i--
dAtA[i] = 0x18
}
if m.Clock != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.Clock))
i--
dAtA[i] = 0x10
}
if m.ShardId != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.ShardId))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintMessage(dAtA []byte, offset int, v uint64) int {
offset -= sovMessage(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *VectorClock) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.ShardId != 0 {
n += 1 + sovMessage(uint64(m.ShardId))
}
if m.Clock != 0 {
n += 1 + sovMessage(uint64(m.Clock))
}
if m.ClusterId != 0 {
n += 1 + sovMessage(uint64(m.ClusterId))
}
return n
}
func sovMessage(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMessage(x uint64) (n int) {
return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *VectorClock) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&VectorClock{`,
`ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`,
`Clock:` + fmt.Sprintf("%v", this.Clock) + `,`,
`ClusterId:` + fmt.Sprintf("%v", this.ClusterId) + `,`,
`}`,
}, "")
return s
}
func valueToStringMessage(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *VectorClock) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VectorClock: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VectorClock: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ShardId", wireType)
}
m.ShardId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ShardId |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Clock", wireType)
}
m.Clock = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Clock |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
}
m.ClusterId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ClusterId |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMessage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMessage
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMessage
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMessage(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthMessage
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupMessage
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthMessage
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group")
) | 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0x23, 0x3d, 0x5f, 0x0f, 0xee, 0xea,
0xcc, 0x7c, 0x6c, 0x9e, 0xb4, 0x06, 0x33, 0x92, 0xd8, 0xc0, 0x7e, 0x34, 0x06, 0x04, 0x00, 0x00, | random_line_split |
message.pb.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: temporal/server/api/clock/v1/message.proto
package clock
import (
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type VectorClock struct {
ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
Clock int64 `protobuf:"varint,2,opt,name=clock,proto3" json:"clock,omitempty"`
ClusterId int64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
}
func (m *VectorClock) Reset() { *m = VectorClock{} }
func (*VectorClock) ProtoMessage() {}
func (*VectorClock) Descriptor() ([]byte, []int) {
return fileDescriptor_86d20c4676353367, []int{0}
}
func (m *VectorClock) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *VectorClock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_VectorClock.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *VectorClock) XXX_Merge(src proto.Message) {
xxx_messageInfo_VectorClock.Merge(m, src)
}
func (m *VectorClock) XXX_Size() int {
return m.Size()
}
func (m *VectorClock) XXX_DiscardUnknown() {
xxx_messageInfo_VectorClock.DiscardUnknown(m)
}
var xxx_messageInfo_VectorClock proto.InternalMessageInfo
func (m *VectorClock) GetShardId() int32 {
if m != nil {
return m.ShardId
}
return 0
}
func (m *VectorClock) GetClock() int64 {
if m != nil {
return m.Clock
}
return 0
}
func (m *VectorClock) | () int64 {
if m != nil {
return m.ClusterId
}
return 0
}
func init() {
proto.RegisterType((*VectorClock)(nil), "temporal.server.api.clock.v1.VectorClock")
}
func init() {
proto.RegisterFile("temporal/server/api/clock/v1/message.proto", fileDescriptor_86d20c4676353367)
}
var fileDescriptor_86d20c4676353367 = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x2a, 0x49, 0xcd, 0x2d,
0xc8, 0x2f, 0x4a, 0xcc, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0xd2, 0x4f, 0x2c, 0xc8, 0xd4,
0x4f, 0xce, 0xc9, 0x4f, 0xce, 0xd6, 0x2f, 0x33, 0xd4, 0xcf, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f,
0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x81, 0xa9, 0xd5, 0x83, 0xa8, 0xd5, 0x4b, 0x2c,
0xc8, 0xd4, 0x03, 0xab, 0xd5, 0x2b, 0x33, 0x54, 0x8a, 0xe5, 0xe2, 0x0e, 0x4b, 0x4d, 0x2e, 0xc9,
0x2f, 0x72, 0x06, 0x89, 0x08, 0x49, 0x72, 0x71, 0x14, 0x67, 0x24, 0x16, 0xa5, 0xc4, 0x67, 0xa6,
0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x06, 0xb1, 0x83, 0xf9, 0x9e, 0x29, 0x42, 0x22, 0x5c, 0xac,
0x60, 0x5d, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x90, 0x2c, 0x17, 0x57, 0x72,
0x4e, 0x69, 0x71, 0x49, 0x6a, 0x11, 0x48, 0x0b, 0x33, 0x58, 0x8a, 0x13, 0x2a, 0xe2, 0x99, 0xe2,
0x14, 0x77, 0xe1, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0x36, 0x3c,
0x92, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f,
0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18,
0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0x23, 0x3d, 0x5f, 0x0f, 0xee, 0xea,
0xcc, 0x7c, 0x6c, 0x9e, 0xb4, 0x06, 0x33, 0x92, 0xd8, 0xc0, 0x7e, 0x34, 0x06, 0x04, 0x00, 0x00,
0xff, 0xff, 0xc4, 0x11, 0xe0, 0xe1, 0x11, 0x01, 0x00, 0x00,
}
func (this *VectorClock) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*VectorClock)
if !ok {
that2, ok := that.(VectorClock)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.ShardId != that1.ShardId {
return false
}
if this.Clock != that1.Clock {
return false
}
if this.ClusterId != that1.ClusterId {
return false
}
return true
}
func (this *VectorClock) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&clock.VectorClock{")
s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n")
s = append(s, "Clock: "+fmt.Sprintf("%#v", this.Clock)+",\n")
s = append(s, "ClusterId: "+fmt.Sprintf("%#v", this.ClusterId)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringMessage(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *VectorClock) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VectorClock) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *VectorClock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ClusterId != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.ClusterId))
i--
dAtA[i] = 0x18
}
if m.Clock != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.Clock))
i--
dAtA[i] = 0x10
}
if m.ShardId != 0 {
i = encodeVarintMessage(dAtA, i, uint64(m.ShardId))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintMessage(dAtA []byte, offset int, v uint64) int {
offset -= sovMessage(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *VectorClock) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.ShardId != 0 {
n += 1 + sovMessage(uint64(m.ShardId))
}
if m.Clock != 0 {
n += 1 + sovMessage(uint64(m.Clock))
}
if m.ClusterId != 0 {
n += 1 + sovMessage(uint64(m.ClusterId))
}
return n
}
func sovMessage(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMessage(x uint64) (n int) {
return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *VectorClock) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&VectorClock{`,
`ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`,
`Clock:` + fmt.Sprintf("%v", this.Clock) + `,`,
`ClusterId:` + fmt.Sprintf("%v", this.ClusterId) + `,`,
`}`,
}, "")
return s
}
func valueToStringMessage(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *VectorClock) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VectorClock: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VectorClock: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ShardId", wireType)
}
m.ShardId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ShardId |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Clock", wireType)
}
m.Clock = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Clock |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
}
m.ClusterId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMessage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ClusterId |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMessage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMessage
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMessage
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMessage(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMessage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthMessage
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupMessage
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthMessage
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group")
)
| GetClusterId | identifier_name |
scripts.py | import os
import sys
import codecs
import re
from ansi2html import Ansi2HTMLConverter
from mtaac_package.CoNLL_file_parser import conll_file
from mtaac_package.common_functions import *
from cdliconll2conllu.converter import CdliCoNLLtoCoNLLUConverter
##from conllu.convert import convert as conllu2brat
from SPARQLTransformer import sparqlTransformer
'''
Not in use:
import rdflib
from SPARQLWrapper import SPARQLWrapper, JSON
'''
#
#---/ GENERAL COMMENTS /-------------------------------------------------------
#
'''
PIP DEPENDENCIES:
- mtaac_package (https://github.com/cdli-gh/mtaac-package)
- ansi2html
# - rdflib // not used
# - SPARQLWrapper // not used
OTHER DEPENDENCIES (Windows):
- http://www.oracle.com/technetwork/java/javase/downloads/
jdk8-downloads-2133151.html
WORKFLOW:
+ 1. CDLI-CoNLL (already there)
+ 2. CoNLL2RDF <https://github.com/acoli-repo/conll-rdf>
+ 3. RDF
+ 4. Syntactic Pre-annotator
+ 5. RDF2CoNLL
>? 6. CDLI-CoNLL2CoNLL-U
<https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter)>
> 7. CoNLLU > Brat
8. Brat (push file to brat server)
(9. Editor corrects syntax)
10. Brat 2 CDLI-Conll
<https://github.com/cdli-gh/brat_to_cdli_CONLLconverter>
TODO:
+ check .sh scripts for missed steps
- columns should be adjusted for CDLI-CoNLL:
ID WORD MORPH2 POS IGNORE IGNORE IGNORE
- make sure columns are correctly designated for both formats
- make sure abbreviations are unified:
- either different rules for different abbreviations
OR
- better:
- apply your own abbr. unifier (lemmatization data scripts)
to make the data unified.
- then insure that the abbr. in SPARQL match
- Find a solution for rendering words in SPARQL.
Perhaps, FLASK templates would be the best solution also to corpus-specific
placeholders' rendering.
'''
#
#---/ ANSI 2 HTML /------------------------------------------------------------
#
a2h_conv = Ansi2HTMLConverter()
#
#---/ Variables /--------------------------------------------------------------
#
_path = os.path.dirname(os.path.abspath(__file__))
sp = subprocesses()
#
#---/ CDLI-CoNLL > CONLL-U /---------------------------------------------------
#
class CC2CU(common_functions, CdliCoNLLtoCoNLLUConverter):
'''
Wrapper around CDLI-CoNLL-to-CoNLLU-Converter:
https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter
'''
GIT_CC2CU = 'https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-' \
'Converter.git'
def __init__(self):
self.cdliCoNLLInputFileName = 'CoNLL data'
## self.install_or_upgrade_CC2CU()
self.__reset__()
from cdliconll2conllu.mapping import Mapping
self.cl = Mapping()
self.header = '#%s' %'\t'.join(self.cl.conllUFields)
## print(self.cl.cdliConllFields, len(self.cl.cdliConllFields))
## ## TESTING ONLY:
## for f_name in ['P100149.conll', 'P100159.conll', 'P100188.conll']:
## f_path = os.path.join(_path, 'data', 'cdli-conll', f_name)
## self.convert_CC2CU(f_path)
def install_or_upgrade_CC2CU(self):
'''
Install CC2CU if missing or upgrade it.
'''
sp.run(['pip', 'install', 'git+'+self.GIT_CC2CU, '--upgrade'])
def convert_from_str(self, conll_str):
'''
Convert CDLI-CoNLL to CoNLL-U from CoNLL string.
'''
#print(conll_str)
lines_all = [l.strip() for l in conll_str.splitlines()]
headerLines = [l for l in lines_all if l[0]=='#']
inputLines = [l.split('\t') for l in lines_all if l not in headerLines+['']]
if '\t' in headerLines[-1]:
headerLines = headerLines[:-1]
headerLines.append(self.header)
## for l in inputLines:
## print([l])
self.convertCDLICoNLLtoCoNLLU(inputLines)
#print(self.outputLines, ['\t'.join(l) for l in self.outputLines])
conll_str = '\n'.join(headerLines+['\t'.join(l) for l in self.outputLines])
self.__reset__()
return conll_str
def convert_from_file(self, filename):
'''
Convert CDLI-CoNLL to CoNLL-U from file.
'''
sp.run(['cdliconll2conllu', '-i', filename, '-v'], print_stdout=False)
cdli_conll_u = CC2CU()
#---/ CONLL-U <> CONLL-RDF /---------------------------------------------------
#
class CoNLL2RDF(common_functions):
'''
Wrapper around CoNLL-RDF:
https://github.com/acoli-repo/conll-rdf
'''
GIT_CONLLRDF = 'https://github.com/acoli-repo/conll-rdf.git'
CONLLRDF_PATH = os.path.join(_path, 'conll-rdf')
def __init__(self):
'''
'''
self.add_java_path()
if not os.path.exists(self.CONLLRDF_PATH):
self.install_CONLLRDF()
def add_java_path(self):
'''
Windows: Find and add Java/JDK/bin path to env.
'''
self.JAVA_PATH = None
for b in ['', ' (x86)']:
pf = os.environ['ProgramFiles'].replace(b, '')
basic_java_path = os.path.join(pf, 'Java')
if os.path.exists(basic_java_path):
dirs_lst = os.listdir(basic_java_path)
jdk_lst = [jdk for jdk in dirs_lst if 'jdk' in jdk]
jre_lst = [jre for jre in dirs_lst if 'jre' in jre]
if jdk_lst!=[]:
self.JAVA_JDK_PATH = \
os.path.join(basic_java_path, jdk_lst[-1], 'bin')
self.JAVA_JRE_PATH = \
os.path.join(basic_java_path, jre_lst[-1], 'bin')
break
if not self.JAVA_JDK_PATH:
print(
'''No Java Development Kit installation found! '''
'''Download and install latest:\n'''
'''http://www.oracle.com/technetwork/'''
'''java/javase/downloads/index.html''')
return False
elif self.JAVA_JDK_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JDK_PATH
elif self.JAVA_JRE_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JRE_PATH
self.JAVAC_PATH = os.path.join(self.JAVA_JDK_PATH, 'javac.exe')
return True
def install_CONLLRDF(self):
'''
Install CoNLL-RDF:
1. Clone Github repo
2. Build Java libs
'''
sp.run(['git', 'clone', self.GIT_CONLLRDF])
self.compile_CONLLRDF()
def compile_CONLLRDF(self):
'''
Compile CoNLL-RDF Java libraries.
'''
dep_dict = {
'CoNLLStreamExtractor': 'CoNLL2RDF',
'CoNLLRDFAnnotator': 'CoNLLRDFFormatter',
'CoNLLRDFUpdater': 'CoNLLRDFViz'
}
src_path = os.path.join(
self.CONLLRDF_PATH, 'src', 'org', 'acoli', 'conll', 'rdf')
target_path = os.path.join(self.CONLLRDF_PATH, 'bin')
if not os.path.exists(target_path):
os.mkdir(target_path)
cp_vars = self.java_command(full_path=True, include_bin=True)[-1]
for f in os.listdir(src_path):
if '.java' in f and f.replace('.java', '') in dep_dict.keys():
src_files_path = os.path.join(src_path, f)
dep_src_file_path = os.path.join(src_path,
dep_dict[f.replace('.java', '')])
src_files_lst = [src_files_path, dep_src_file_path+'.java']
cp_path = cp_vars
self.compile_java(src_files_lst,
target_path,
cp_path)
def compile_java(self, src_files_lst, target_path, cp_path, cwd_path=None):
'''
Run Java compiler with command.
'''
self.run([r'%s' %self.JAVAC_PATH,
'-d', r'%s' %target_path,
'-g',
'-cp', r'%s' %cp_path,
]+[r'%s' %f for f in src_files_lst],
cwd_path=cwd_path)
def conll2rdf(self, f_path, columns_typ):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_typ)
command = self.CoNLLStreamExtractor_command() + ['../data/'] \
+ self.columns
self.dump_rdf(rdf_str, f_path)
def rdf2conll(self, columns, f_path=None, stdin_str=None,
decode_stdout=False, target_path=None):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_type) | return None
command = self.CoNLLRDFFormatter_command() + ['-conll'] \
+ columns
(CONLLstr, errors) = self.run(
command,
cwd_path=f_path,
stdin_str=stdin_str,
decode_stdout=True)
CONLLstr = CONLLstr.replace(' #', ' \n#') \
.replace('\t#', '\n#').replace('\n\n', '\n')
if target_path:
self.dump(CONLLstr, target_path)
return CONLLstr
def get_stdin(self, stdin_path=None, stdin_str=None): #escape_unicode=False
'''
Get stdin from path or string to use with run.
'''
stdin = ''
if stdin_path==None and stdin_str==None:
return b''
if stdin_path:
with codecs.open(stdin_path, 'r', 'utf-8') as file:
stdin = file.read()
if 'etcsri' in stdin_path and '.conll' in stdin_path:
stdin = self.convert_ETCSRI(stdin)
elif stdin_str:
stdin = stdin_str
if type(stdin)!=bytes:
stdin = stdin.encode('utf-8')
## if escape_unicode==True:
## stdin = self.standardize_translit(stdin)
#print(stdin_str)
return stdin
def run(self, command, cwd_path=None, stdin_path=None, stdin_str=None,
decode_stdout=True):#, escape_unicode=False
'''
Open file, load it to stdin, run command, return stdout.
'''
stdin = self.get_stdin(
stdin_path, stdin_str)#, escape_unicode=escape_unicode)
if not cwd_path:
cwd_path=self.CONLLRDF_PATH
stdout = sp.run(
command,
cwd=cwd_path,
stdin=stdin,
print_stdout=False,
decode_stdout=decode_stdout
)
return self.filter_errors(stdout)
def filter_errors(self, stdout):
'''
Return (real_result, errors_or_warnings).
'''
shell_markers = [b'java.', b'log4j', b'org.apache', b'org.acoli']
typ = type(stdout)
if typ==str:
stdout = stdout.encode('utf-8')
shell_lst = []
for b in stdout.split(b'\n'):
for m in shell_markers:
if m in b:
shell_lst.append(b)
break
stdout_lst = [b for b in stdout.split(b'\n') if b not in shell_lst]
if typ==bytes:
errors = b'\n'.join(shell_lst)
stdout = b'\n'.join(stdout_lst)
## print(stdout.decode('utf-8'))
## print(errors.decode('utf-8'))
elif typ==str:
errors = b'\n'.join(shell_lst).decode('utf-8')
stdout = b'\n'.join(stdout_lst).decode('utf-8')
## print(stdout)
## print(errors)
return (stdout, errors)
def CoNLLStreamExtractor_command(self):
'''
Return a list containing basic command to run CoNLLStreamExtractor
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLStreamExtractor']
def CoNLLRDFFormatter_command(self):
'''
Return a list containing basic command to run CoNLLRDFFormatter
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLRDFFormatter']
def java_command(self, full_path=False, include_bin=True):
'''
Return a list containing basic java command to the library.
Set path to 'full' to get full path output.
'''
# Prepare java vatriables
dest = 'bin'
lib_path = os.path.join(self.CONLLRDF_PATH, 'lib')
if full_path==False:
libs = ';'.join(
['lib/%s' %l for l in os.listdir(lib_path)
if '.jar' in l])
elif full_path==True:
dest = os.path.join(self.CONLLRDF_PATH, dest)
libs = ';'.join(
[os.path.join(lib_path, l) for l in os.listdir(lib_path)
if '.jar' in l])
# Make command to run CoNLL2RDF with java
cp = libs
if include_bin==True:
cp = ';'.join([dest, libs])
return ['java', '-cp', cp]
def dump_rdf(self, rdf_str, f_path):
'''
Recieve original path and rdf string, dump to file.
'''
rdf_str = "#new_text" + rdf_str.split("#new_text")[1]
filename = f_path.split('/')[-1].split('.')[0]+'.ttl'
dump_path = os.path.join(_path, 'data', 'conll-rdf', filename)
self.dump(rdf_str, dump_path)
#---/ SYNTAX PREANNOTATION /---------------------------------------------------
#
class syntax_preannotation(CoNLL2RDF):
'''
Class to preannotate turtle files with SPARQL update queries.
Extends ´CoNLL2RDF´.
'''
REQUEST_SRC = [
('remove-IGNORE', 0),
('extract-feats', 1),
('remove-MORPH2', 0),
('init-SHIFT', 1),
## ('REDUCE-adjective', 3),
## ('REDUCE-math-operators', 1), # <- additional rules for admin -
## ('REDUCE-numerals-chain', 6),
## ('REDUCE-time-num', 1),
## ('REDUCE-measurements', 1), # -->
## ('REDUCE-compound-verbs', 1),
## ('REDUCE-adnominal', 3),
## ('REDUCE-appos', 1),
## ('REDUCE-absolutive', 1),
## ('REDUCE-appos', 1), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-appos', 4), # again?
## ('REDUCE-preposed-genitive', 1),
## ('REDUCE-arguments', 5), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-to-HEAD', 1),
## ('remove-feats', 1),
('create-ID-and-DEP', 1),
('create-_HEAD', 1)
]
#other possible rules:
# PN <-- N (as in & instead of PN lugal)
# reduce remaining nouns to first verb as nmod (?)
# mu <-- V.MID
# (NU)
# |
# (ADJ) NU
# \ /
# UNIT\
# (...)/ \ (NU)
# ____________BASE__/
# / | | | \
# u4 ki giri iti (us)
# | | | | |
# NU PN PN (diri) mu
# | | | \
# (...) (...) MN V.MID--...
#
#
REQUEST_REMOVE_IGNORE = [
('remove-IGNORE', 1)
]
SPARQL_PATH = os.path.join(_path, 'syntax-preannotation', 'sparql')
OUTPUT_PATH = os.path.join(_path, 'data', 'conll-preannotated')
def __init__(self):
'''
'''
CoNLL2RDF.__init__(self)
def load_requests(self, requests=[]):
'''
Load SPARQL requests to ´self.requests_full_lst´.
Specify repeats from int in ´r[1]´ when it is not ´None´.
'''
requests_lst = []
if requests==[]:
requests = self.REQUEST_SRC
for r in requests:
addit = ''
if r[1]!=None:
repeat = '{%s}' %r[1]
requests_lst.append(
r'%s\%s.sparql%s' %(self.SPARQL_PATH, r[0], repeat))
return requests_lst
def preannotate(self, f_path):
'''
Run SPARQL with ´self.requests_full_lst´ from requests.
First command converts CoNLL to RDF and applies preannotation
rules to it. The second converts the file back to CoNLL.
'''
columns = [
'ID_NUM', 'FORM', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
corpus = 'cdli'
override = {}
if 'etcsri' in f_path:
corpus = 'etcsri'
columns = [
'ID_NUM', 'FORM_ATF', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
override = {
'FORM_ATF': 'FORM'}
c = conll_file(path=f_path, corpus=corpus)
c.configure_str_output(columns, override=override)
rdf_str = self.convert_to_conll_and_preannotate(c)
#print('zzzzzzzzzzzzzzzzz', rdf_str) #<-- PROBLEM HERE !!!! returns b''
filename, target_path, target_path_tree = self.get_path_data(f_path)
self.tree_output(rdf_str, target_path_tree)
conll_str = self.rdf2conll(columns=c.override_columns,
stdin_str=rdf_str, decode_stdout=False)
c.merge_columns_from_conll_str(conll_str, ['HEAD', ('EDGE', 'DEPREL')])
c.configure_str_output(['ID_NUM']+c.COLUMNS_CDLI[1:], override=override)
conll_u = cdli_conll_u.convert_from_str(str(c))+'\n' #<--convert to CoNLL-U
self.dump(conll_u, target_path)
def get_path_data(self, f_path):
'''
'''
filename = os.path.basename(f_path)
target_path = os.path.join(self.OUTPUT_PATH, filename)
target_path_tree = os.path.join(
self.OUTPUT_PATH, '%s_tree.html' %filename.split('.')[0])
return filename, target_path, target_path_tree
def convert_to_conll_and_preannotate(self, conll_obj):
'''
Convert CoNLL to RDF and preannotate with SPARQL.
'''
# !TODO!
# REPLACE ['http://oracc.museum.upenn.edu/etcsri/'] by context!
command = self.CoNLLStreamExtractor_command() \
+ ['http://oracc.museum.upenn.edu/etcsri/'] \
+ conll_obj.override_columns + ['-u'] \
+ self.load_requests()
run_dict={
'command': command, 'stdin_str': str(conll_obj),
'decode_stdout': False}
#, 'escape_unicode': True}
#print(run_dict) #<-- ALL GOOD
(rdf_str, errors) = self.run(**run_dict) #<-- PROBLEM SOMEWHERE HERE !!!! returns b''
print(errors) #Error in Parsing Data: Incorrect XPOSTAG at line:
return rdf_str
def tree_output(self, rdf_str, target_path=''):
'''
Return string with parsed RDF tree representation.
Dump to target_path when it is given.
'''
command = self.CoNLLRDFFormatter_command() + ['-grammar']
(tree_str, errors) = \
self.run(command, stdin_str=rdf_str, decode_stdout=True)
tree_html = a2h_conv.convert(tree_str)
tree_html = tree_html.replace('pre-wrap', 'pre')
if target_path!='':
self.dump(tree_html, target_path)
return tree_str
#---/ COMMANDS /---------------------------------------------------------------
#
'''
Preannotate all files in data/etsri-conll-all, except all errors:
'''
##f_path = os.path.join(_path, 'data', 'etcsri-conll-all')
##sx = syntax_preannotation()
##for f in os.listdir(f_path):
## try:
## sx.preannotate(os.path.join(f_path, f))
## except Exception as e:
## raise e
## pass
'''
Preannotate all files in data/cdli-conll-all, except all errors:
'''
#f_path = os.path.join(_path, 'data', 'etcsri-conll')
#f_path = os.path.join(_path, 'data', 'cdli-jinyan-non-admin') #'etcsri-conll-all')
f_path = os.path.join(_path, 'data', 'cdli-conll-all')
#f_path = os.path.join(_path, 'data', 'evaluate')
preannotated = os.listdir(os.path.join(_path, 'data', 'conll-preannotated'))
exclude = [pa.replace('_tree.html', '.conll') for pa in preannotated if '_tree.html' in pa]
from list_errors import list_files_with_errors
errors_list = list_files_with_errors() # files previusly annotated with errors
sx = syntax_preannotation()
for f in os.listdir(f_path):
if f in errors_list: #f not in exclude
try:
sx.preannotate(os.path.join(f_path, f))
except Exception as e:
raise e
pass
errors_list_new = list_files_with_errors()
print('old_errors', errors_list)
print('new_errors', errors_list_new)
#CC2CU()
#CoNLL2RDF()
#syntax_preannotation()
##c = CoNLL2RDF()
##c.rdf2conll("data\conll-rdf\P100188.ttl") | if f_path==None and stdin_str==None:
print('rdf2conll wrapper: specify path OR string.') | random_line_split |
scripts.py | import os
import sys
import codecs
import re
from ansi2html import Ansi2HTMLConverter
from mtaac_package.CoNLL_file_parser import conll_file
from mtaac_package.common_functions import *
from cdliconll2conllu.converter import CdliCoNLLtoCoNLLUConverter
##from conllu.convert import convert as conllu2brat
from SPARQLTransformer import sparqlTransformer
'''
Not in use:
import rdflib
from SPARQLWrapper import SPARQLWrapper, JSON
'''
#
#---/ GENERAL COMMENTS /-------------------------------------------------------
#
'''
PIP DEPENDENCIES:
- mtaac_package (https://github.com/cdli-gh/mtaac-package)
- ansi2html
# - rdflib // not used
# - SPARQLWrapper // not used
OTHER DEPENDENCIES (Windows):
- http://www.oracle.com/technetwork/java/javase/downloads/
jdk8-downloads-2133151.html
WORKFLOW:
+ 1. CDLI-CoNLL (already there)
+ 2. CoNLL2RDF <https://github.com/acoli-repo/conll-rdf>
+ 3. RDF
+ 4. Syntactic Pre-annotator
+ 5. RDF2CoNLL
>? 6. CDLI-CoNLL2CoNLL-U
<https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter)>
> 7. CoNLLU > Brat
8. Brat (push file to brat server)
(9. Editor corrects syntax)
10. Brat 2 CDLI-Conll
<https://github.com/cdli-gh/brat_to_cdli_CONLLconverter>
TODO:
+ check .sh scripts for missed steps
- columns should be adjusted for CDLI-CoNLL:
ID WORD MORPH2 POS IGNORE IGNORE IGNORE
- make sure columns are correctly designated for both formats
- make sure abbreviations are unified:
- either different rules for different abbreviations
OR
- better:
- apply your own abbr. unifier (lemmatization data scripts)
to make the data unified.
- then insure that the abbr. in SPARQL match
- Find a solution for rendering words in SPARQL.
Perhaps, FLASK templates would be the best solution also to corpus-specific
placeholders' rendering.
'''
#
#---/ ANSI 2 HTML /------------------------------------------------------------
#
a2h_conv = Ansi2HTMLConverter()
#
#---/ Variables /--------------------------------------------------------------
#
_path = os.path.dirname(os.path.abspath(__file__))
sp = subprocesses()
#
#---/ CDLI-CoNLL > CONLL-U /---------------------------------------------------
#
class CC2CU(common_functions, CdliCoNLLtoCoNLLUConverter):
'''
Wrapper around CDLI-CoNLL-to-CoNLLU-Converter:
https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter
'''
GIT_CC2CU = 'https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-' \
'Converter.git'
def __init__(self):
self.cdliCoNLLInputFileName = 'CoNLL data'
## self.install_or_upgrade_CC2CU()
self.__reset__()
from cdliconll2conllu.mapping import Mapping
self.cl = Mapping()
self.header = '#%s' %'\t'.join(self.cl.conllUFields)
## print(self.cl.cdliConllFields, len(self.cl.cdliConllFields))
## ## TESTING ONLY:
## for f_name in ['P100149.conll', 'P100159.conll', 'P100188.conll']:
## f_path = os.path.join(_path, 'data', 'cdli-conll', f_name)
## self.convert_CC2CU(f_path)
def install_or_upgrade_CC2CU(self):
'''
Install CC2CU if missing or upgrade it.
'''
sp.run(['pip', 'install', 'git+'+self.GIT_CC2CU, '--upgrade'])
def convert_from_str(self, conll_str):
'''
Convert CDLI-CoNLL to CoNLL-U from CoNLL string.
'''
#print(conll_str)
lines_all = [l.strip() for l in conll_str.splitlines()]
headerLines = [l for l in lines_all if l[0]=='#']
inputLines = [l.split('\t') for l in lines_all if l not in headerLines+['']]
if '\t' in headerLines[-1]:
headerLines = headerLines[:-1]
headerLines.append(self.header)
## for l in inputLines:
## print([l])
self.convertCDLICoNLLtoCoNLLU(inputLines)
#print(self.outputLines, ['\t'.join(l) for l in self.outputLines])
conll_str = '\n'.join(headerLines+['\t'.join(l) for l in self.outputLines])
self.__reset__()
return conll_str
def | (self, filename):
'''
Convert CDLI-CoNLL to CoNLL-U from file.
'''
sp.run(['cdliconll2conllu', '-i', filename, '-v'], print_stdout=False)
cdli_conll_u = CC2CU()
#---/ CONLL-U <> CONLL-RDF /---------------------------------------------------
#
class CoNLL2RDF(common_functions):
'''
Wrapper around CoNLL-RDF:
https://github.com/acoli-repo/conll-rdf
'''
GIT_CONLLRDF = 'https://github.com/acoli-repo/conll-rdf.git'
CONLLRDF_PATH = os.path.join(_path, 'conll-rdf')
def __init__(self):
'''
'''
self.add_java_path()
if not os.path.exists(self.CONLLRDF_PATH):
self.install_CONLLRDF()
def add_java_path(self):
'''
Windows: Find and add Java/JDK/bin path to env.
'''
self.JAVA_PATH = None
for b in ['', ' (x86)']:
pf = os.environ['ProgramFiles'].replace(b, '')
basic_java_path = os.path.join(pf, 'Java')
if os.path.exists(basic_java_path):
dirs_lst = os.listdir(basic_java_path)
jdk_lst = [jdk for jdk in dirs_lst if 'jdk' in jdk]
jre_lst = [jre for jre in dirs_lst if 'jre' in jre]
if jdk_lst!=[]:
self.JAVA_JDK_PATH = \
os.path.join(basic_java_path, jdk_lst[-1], 'bin')
self.JAVA_JRE_PATH = \
os.path.join(basic_java_path, jre_lst[-1], 'bin')
break
if not self.JAVA_JDK_PATH:
print(
'''No Java Development Kit installation found! '''
'''Download and install latest:\n'''
'''http://www.oracle.com/technetwork/'''
'''java/javase/downloads/index.html''')
return False
elif self.JAVA_JDK_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JDK_PATH
elif self.JAVA_JRE_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JRE_PATH
self.JAVAC_PATH = os.path.join(self.JAVA_JDK_PATH, 'javac.exe')
return True
def install_CONLLRDF(self):
'''
Install CoNLL-RDF:
1. Clone Github repo
2. Build Java libs
'''
sp.run(['git', 'clone', self.GIT_CONLLRDF])
self.compile_CONLLRDF()
def compile_CONLLRDF(self):
'''
Compile CoNLL-RDF Java libraries.
'''
dep_dict = {
'CoNLLStreamExtractor': 'CoNLL2RDF',
'CoNLLRDFAnnotator': 'CoNLLRDFFormatter',
'CoNLLRDFUpdater': 'CoNLLRDFViz'
}
src_path = os.path.join(
self.CONLLRDF_PATH, 'src', 'org', 'acoli', 'conll', 'rdf')
target_path = os.path.join(self.CONLLRDF_PATH, 'bin')
if not os.path.exists(target_path):
os.mkdir(target_path)
cp_vars = self.java_command(full_path=True, include_bin=True)[-1]
for f in os.listdir(src_path):
if '.java' in f and f.replace('.java', '') in dep_dict.keys():
src_files_path = os.path.join(src_path, f)
dep_src_file_path = os.path.join(src_path,
dep_dict[f.replace('.java', '')])
src_files_lst = [src_files_path, dep_src_file_path+'.java']
cp_path = cp_vars
self.compile_java(src_files_lst,
target_path,
cp_path)
def compile_java(self, src_files_lst, target_path, cp_path, cwd_path=None):
'''
Run Java compiler with command.
'''
self.run([r'%s' %self.JAVAC_PATH,
'-d', r'%s' %target_path,
'-g',
'-cp', r'%s' %cp_path,
]+[r'%s' %f for f in src_files_lst],
cwd_path=cwd_path)
def conll2rdf(self, f_path, columns_typ):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_typ)
command = self.CoNLLStreamExtractor_command() + ['../data/'] \
+ self.columns
self.dump_rdf(rdf_str, f_path)
def rdf2conll(self, columns, f_path=None, stdin_str=None,
decode_stdout=False, target_path=None):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_type)
if f_path==None and stdin_str==None:
print('rdf2conll wrapper: specify path OR string.')
return None
command = self.CoNLLRDFFormatter_command() + ['-conll'] \
+ columns
(CONLLstr, errors) = self.run(
command,
cwd_path=f_path,
stdin_str=stdin_str,
decode_stdout=True)
CONLLstr = CONLLstr.replace(' #', ' \n#') \
.replace('\t#', '\n#').replace('\n\n', '\n')
if target_path:
self.dump(CONLLstr, target_path)
return CONLLstr
def get_stdin(self, stdin_path=None, stdin_str=None): #escape_unicode=False
'''
Get stdin from path or string to use with run.
'''
stdin = ''
if stdin_path==None and stdin_str==None:
return b''
if stdin_path:
with codecs.open(stdin_path, 'r', 'utf-8') as file:
stdin = file.read()
if 'etcsri' in stdin_path and '.conll' in stdin_path:
stdin = self.convert_ETCSRI(stdin)
elif stdin_str:
stdin = stdin_str
if type(stdin)!=bytes:
stdin = stdin.encode('utf-8')
## if escape_unicode==True:
## stdin = self.standardize_translit(stdin)
#print(stdin_str)
return stdin
def run(self, command, cwd_path=None, stdin_path=None, stdin_str=None,
decode_stdout=True):#, escape_unicode=False
'''
Open file, load it to stdin, run command, return stdout.
'''
stdin = self.get_stdin(
stdin_path, stdin_str)#, escape_unicode=escape_unicode)
if not cwd_path:
cwd_path=self.CONLLRDF_PATH
stdout = sp.run(
command,
cwd=cwd_path,
stdin=stdin,
print_stdout=False,
decode_stdout=decode_stdout
)
return self.filter_errors(stdout)
def filter_errors(self, stdout):
'''
Return (real_result, errors_or_warnings).
'''
shell_markers = [b'java.', b'log4j', b'org.apache', b'org.acoli']
typ = type(stdout)
if typ==str:
stdout = stdout.encode('utf-8')
shell_lst = []
for b in stdout.split(b'\n'):
for m in shell_markers:
if m in b:
shell_lst.append(b)
break
stdout_lst = [b for b in stdout.split(b'\n') if b not in shell_lst]
if typ==bytes:
errors = b'\n'.join(shell_lst)
stdout = b'\n'.join(stdout_lst)
## print(stdout.decode('utf-8'))
## print(errors.decode('utf-8'))
elif typ==str:
errors = b'\n'.join(shell_lst).decode('utf-8')
stdout = b'\n'.join(stdout_lst).decode('utf-8')
## print(stdout)
## print(errors)
return (stdout, errors)
def CoNLLStreamExtractor_command(self):
'''
Return a list containing basic command to run CoNLLStreamExtractor
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLStreamExtractor']
def CoNLLRDFFormatter_command(self):
'''
Return a list containing basic command to run CoNLLRDFFormatter
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLRDFFormatter']
def java_command(self, full_path=False, include_bin=True):
'''
Return a list containing basic java command to the library.
Set path to 'full' to get full path output.
'''
# Prepare java vatriables
dest = 'bin'
lib_path = os.path.join(self.CONLLRDF_PATH, 'lib')
if full_path==False:
libs = ';'.join(
['lib/%s' %l for l in os.listdir(lib_path)
if '.jar' in l])
elif full_path==True:
dest = os.path.join(self.CONLLRDF_PATH, dest)
libs = ';'.join(
[os.path.join(lib_path, l) for l in os.listdir(lib_path)
if '.jar' in l])
# Make command to run CoNLL2RDF with java
cp = libs
if include_bin==True:
cp = ';'.join([dest, libs])
return ['java', '-cp', cp]
def dump_rdf(self, rdf_str, f_path):
'''
Recieve original path and rdf string, dump to file.
'''
rdf_str = "#new_text" + rdf_str.split("#new_text")[1]
filename = f_path.split('/')[-1].split('.')[0]+'.ttl'
dump_path = os.path.join(_path, 'data', 'conll-rdf', filename)
self.dump(rdf_str, dump_path)
#---/ SYNTAX PREANNOTATION /---------------------------------------------------
#
class syntax_preannotation(CoNLL2RDF):
'''
Class to preannotate turtle files with SPARQL update queries.
Extends ´CoNLL2RDF´.
'''
REQUEST_SRC = [
('remove-IGNORE', 0),
('extract-feats', 1),
('remove-MORPH2', 0),
('init-SHIFT', 1),
## ('REDUCE-adjective', 3),
## ('REDUCE-math-operators', 1), # <- additional rules for admin -
## ('REDUCE-numerals-chain', 6),
## ('REDUCE-time-num', 1),
## ('REDUCE-measurements', 1), # -->
## ('REDUCE-compound-verbs', 1),
## ('REDUCE-adnominal', 3),
## ('REDUCE-appos', 1),
## ('REDUCE-absolutive', 1),
## ('REDUCE-appos', 1), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-appos', 4), # again?
## ('REDUCE-preposed-genitive', 1),
## ('REDUCE-arguments', 5), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-to-HEAD', 1),
## ('remove-feats', 1),
('create-ID-and-DEP', 1),
('create-_HEAD', 1)
]
#other possible rules:
# PN <-- N (as in & instead of PN lugal)
# reduce remaining nouns to first verb as nmod (?)
# mu <-- V.MID
# (NU)
# |
# (ADJ) NU
# \ /
# UNIT\
# (...)/ \ (NU)
# ____________BASE__/
# / | | | \
# u4 ki giri iti (us)
# | | | | |
# NU PN PN (diri) mu
# | | | \
# (...) (...) MN V.MID--...
#
#
REQUEST_REMOVE_IGNORE = [
('remove-IGNORE', 1)
]
SPARQL_PATH = os.path.join(_path, 'syntax-preannotation', 'sparql')
OUTPUT_PATH = os.path.join(_path, 'data', 'conll-preannotated')
def __init__(self):
'''
'''
CoNLL2RDF.__init__(self)
def load_requests(self, requests=[]):
'''
Load SPARQL requests to ´self.requests_full_lst´.
Specify repeats from int in ´r[1]´ when it is not ´None´.
'''
requests_lst = []
if requests==[]:
requests = self.REQUEST_SRC
for r in requests:
addit = ''
if r[1]!=None:
repeat = '{%s}' %r[1]
requests_lst.append(
r'%s\%s.sparql%s' %(self.SPARQL_PATH, r[0], repeat))
return requests_lst
def preannotate(self, f_path):
'''
Run SPARQL with ´self.requests_full_lst´ from requests.
First command converts CoNLL to RDF and applies preannotation
rules to it. The second converts the file back to CoNLL.
'''
columns = [
'ID_NUM', 'FORM', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
corpus = 'cdli'
override = {}
if 'etcsri' in f_path:
corpus = 'etcsri'
columns = [
'ID_NUM', 'FORM_ATF', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
override = {
'FORM_ATF': 'FORM'}
c = conll_file(path=f_path, corpus=corpus)
c.configure_str_output(columns, override=override)
rdf_str = self.convert_to_conll_and_preannotate(c)
#print('zzzzzzzzzzzzzzzzz', rdf_str) #<-- PROBLEM HERE !!!! returns b''
filename, target_path, target_path_tree = self.get_path_data(f_path)
self.tree_output(rdf_str, target_path_tree)
conll_str = self.rdf2conll(columns=c.override_columns,
stdin_str=rdf_str, decode_stdout=False)
c.merge_columns_from_conll_str(conll_str, ['HEAD', ('EDGE', 'DEPREL')])
c.configure_str_output(['ID_NUM']+c.COLUMNS_CDLI[1:], override=override)
conll_u = cdli_conll_u.convert_from_str(str(c))+'\n' #<--convert to CoNLL-U
self.dump(conll_u, target_path)
def get_path_data(self, f_path):
'''
'''
filename = os.path.basename(f_path)
target_path = os.path.join(self.OUTPUT_PATH, filename)
target_path_tree = os.path.join(
self.OUTPUT_PATH, '%s_tree.html' %filename.split('.')[0])
return filename, target_path, target_path_tree
def convert_to_conll_and_preannotate(self, conll_obj):
'''
Convert CoNLL to RDF and preannotate with SPARQL.
'''
# !TODO!
# REPLACE ['http://oracc.museum.upenn.edu/etcsri/'] by context!
command = self.CoNLLStreamExtractor_command() \
+ ['http://oracc.museum.upenn.edu/etcsri/'] \
+ conll_obj.override_columns + ['-u'] \
+ self.load_requests()
run_dict={
'command': command, 'stdin_str': str(conll_obj),
'decode_stdout': False}
#, 'escape_unicode': True}
#print(run_dict) #<-- ALL GOOD
(rdf_str, errors) = self.run(**run_dict) #<-- PROBLEM SOMEWHERE HERE !!!! returns b''
print(errors) #Error in Parsing Data: Incorrect XPOSTAG at line:
return rdf_str
def tree_output(self, rdf_str, target_path=''):
'''
Return string with parsed RDF tree representation.
Dump to target_path when it is given.
'''
command = self.CoNLLRDFFormatter_command() + ['-grammar']
(tree_str, errors) = \
self.run(command, stdin_str=rdf_str, decode_stdout=True)
tree_html = a2h_conv.convert(tree_str)
tree_html = tree_html.replace('pre-wrap', 'pre')
if target_path!='':
self.dump(tree_html, target_path)
return tree_str
#---/ COMMANDS /---------------------------------------------------------------
#
'''
Preannotate all files in data/etsri-conll-all, except all errors:
'''
##f_path = os.path.join(_path, 'data', 'etcsri-conll-all')
##sx = syntax_preannotation()
##for f in os.listdir(f_path):
## try:
## sx.preannotate(os.path.join(f_path, f))
## except Exception as e:
## raise e
## pass
'''
Preannotate all files in data/cdli-conll-all, except all errors:
'''
#f_path = os.path.join(_path, 'data', 'etcsri-conll')
#f_path = os.path.join(_path, 'data', 'cdli-jinyan-non-admin') #'etcsri-conll-all')
f_path = os.path.join(_path, 'data', 'cdli-conll-all')
#f_path = os.path.join(_path, 'data', 'evaluate')
preannotated = os.listdir(os.path.join(_path, 'data', 'conll-preannotated'))
exclude = [pa.replace('_tree.html', '.conll') for pa in preannotated if '_tree.html' in pa]
from list_errors import list_files_with_errors
errors_list = list_files_with_errors() # files previusly annotated with errors
sx = syntax_preannotation()
for f in os.listdir(f_path):
if f in errors_list: #f not in exclude
try:
sx.preannotate(os.path.join(f_path, f))
except Exception as e:
raise e
pass
errors_list_new = list_files_with_errors()
print('old_errors', errors_list)
print('new_errors', errors_list_new)
#CC2CU()
#CoNLL2RDF()
#syntax_preannotation()
##c = CoNLL2RDF()
##c.rdf2conll("data\conll-rdf\P100188.ttl")
| convert_from_file | identifier_name |
scripts.py | import os
import sys
import codecs
import re
from ansi2html import Ansi2HTMLConverter
from mtaac_package.CoNLL_file_parser import conll_file
from mtaac_package.common_functions import *
from cdliconll2conllu.converter import CdliCoNLLtoCoNLLUConverter
##from conllu.convert import convert as conllu2brat
from SPARQLTransformer import sparqlTransformer
'''
Not in use:
import rdflib
from SPARQLWrapper import SPARQLWrapper, JSON
'''
#
#---/ GENERAL COMMENTS /-------------------------------------------------------
#
'''
PIP DEPENDENCIES:
- mtaac_package (https://github.com/cdli-gh/mtaac-package)
- ansi2html
# - rdflib // not used
# - SPARQLWrapper // not used
OTHER DEPENDENCIES (Windows):
- http://www.oracle.com/technetwork/java/javase/downloads/
jdk8-downloads-2133151.html
WORKFLOW:
+ 1. CDLI-CoNLL (already there)
+ 2. CoNLL2RDF <https://github.com/acoli-repo/conll-rdf>
+ 3. RDF
+ 4. Syntactic Pre-annotator
+ 5. RDF2CoNLL
>? 6. CDLI-CoNLL2CoNLL-U
<https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter)>
> 7. CoNLLU > Brat
8. Brat (push file to brat server)
(9. Editor corrects syntax)
10. Brat 2 CDLI-Conll
<https://github.com/cdli-gh/brat_to_cdli_CONLLconverter>
TODO:
+ check .sh scripts for missed steps
- columns should be adjusted for CDLI-CoNLL:
ID WORD MORPH2 POS IGNORE IGNORE IGNORE
- make sure columns are correctly designated for both formats
- make sure abbreviations are unified:
- either different rules for different abbreviations
OR
- better:
- apply your own abbr. unifier (lemmatization data scripts)
to make the data unified.
- then insure that the abbr. in SPARQL match
- Find a solution for rendering words in SPARQL.
Perhaps, FLASK templates would be the best solution also to corpus-specific
placeholders' rendering.
'''
#
#---/ ANSI 2 HTML /------------------------------------------------------------
#
a2h_conv = Ansi2HTMLConverter()
#
#---/ Variables /--------------------------------------------------------------
#
_path = os.path.dirname(os.path.abspath(__file__))
sp = subprocesses()
#
#---/ CDLI-CoNLL > CONLL-U /---------------------------------------------------
#
class CC2CU(common_functions, CdliCoNLLtoCoNLLUConverter):
'''
Wrapper around CDLI-CoNLL-to-CoNLLU-Converter:
https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter
'''
GIT_CC2CU = 'https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-' \
'Converter.git'
def __init__(self):
self.cdliCoNLLInputFileName = 'CoNLL data'
## self.install_or_upgrade_CC2CU()
self.__reset__()
from cdliconll2conllu.mapping import Mapping
self.cl = Mapping()
self.header = '#%s' %'\t'.join(self.cl.conllUFields)
## print(self.cl.cdliConllFields, len(self.cl.cdliConllFields))
## ## TESTING ONLY:
## for f_name in ['P100149.conll', 'P100159.conll', 'P100188.conll']:
## f_path = os.path.join(_path, 'data', 'cdli-conll', f_name)
## self.convert_CC2CU(f_path)
def install_or_upgrade_CC2CU(self):
'''
Install CC2CU if missing or upgrade it.
'''
sp.run(['pip', 'install', 'git+'+self.GIT_CC2CU, '--upgrade'])
def convert_from_str(self, conll_str):
'''
Convert CDLI-CoNLL to CoNLL-U from CoNLL string.
'''
#print(conll_str)
lines_all = [l.strip() for l in conll_str.splitlines()]
headerLines = [l for l in lines_all if l[0]=='#']
inputLines = [l.split('\t') for l in lines_all if l not in headerLines+['']]
if '\t' in headerLines[-1]:
headerLines = headerLines[:-1]
headerLines.append(self.header)
## for l in inputLines:
## print([l])
self.convertCDLICoNLLtoCoNLLU(inputLines)
#print(self.outputLines, ['\t'.join(l) for l in self.outputLines])
conll_str = '\n'.join(headerLines+['\t'.join(l) for l in self.outputLines])
self.__reset__()
return conll_str
def convert_from_file(self, filename):
'''
Convert CDLI-CoNLL to CoNLL-U from file.
'''
sp.run(['cdliconll2conllu', '-i', filename, '-v'], print_stdout=False)
cdli_conll_u = CC2CU()
#---/ CONLL-U <> CONLL-RDF /---------------------------------------------------
#
class CoNLL2RDF(common_functions):
'''
Wrapper around CoNLL-RDF:
https://github.com/acoli-repo/conll-rdf
'''
GIT_CONLLRDF = 'https://github.com/acoli-repo/conll-rdf.git'
CONLLRDF_PATH = os.path.join(_path, 'conll-rdf')
def __init__(self):
'''
'''
self.add_java_path()
if not os.path.exists(self.CONLLRDF_PATH):
self.install_CONLLRDF()
def add_java_path(self):
'''
Windows: Find and add Java/JDK/bin path to env.
'''
self.JAVA_PATH = None
for b in ['', ' (x86)']:
pf = os.environ['ProgramFiles'].replace(b, '')
basic_java_path = os.path.join(pf, 'Java')
if os.path.exists(basic_java_path):
dirs_lst = os.listdir(basic_java_path)
jdk_lst = [jdk for jdk in dirs_lst if 'jdk' in jdk]
jre_lst = [jre for jre in dirs_lst if 'jre' in jre]
if jdk_lst!=[]:
self.JAVA_JDK_PATH = \
os.path.join(basic_java_path, jdk_lst[-1], 'bin')
self.JAVA_JRE_PATH = \
os.path.join(basic_java_path, jre_lst[-1], 'bin')
break
if not self.JAVA_JDK_PATH:
print(
'''No Java Development Kit installation found! '''
'''Download and install latest:\n'''
'''http://www.oracle.com/technetwork/'''
'''java/javase/downloads/index.html''')
return False
elif self.JAVA_JDK_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JDK_PATH
elif self.JAVA_JRE_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JRE_PATH
self.JAVAC_PATH = os.path.join(self.JAVA_JDK_PATH, 'javac.exe')
return True
def install_CONLLRDF(self):
'''
Install CoNLL-RDF:
1. Clone Github repo
2. Build Java libs
'''
sp.run(['git', 'clone', self.GIT_CONLLRDF])
self.compile_CONLLRDF()
def compile_CONLLRDF(self):
'''
Compile CoNLL-RDF Java libraries.
'''
dep_dict = {
'CoNLLStreamExtractor': 'CoNLL2RDF',
'CoNLLRDFAnnotator': 'CoNLLRDFFormatter',
'CoNLLRDFUpdater': 'CoNLLRDFViz'
}
src_path = os.path.join(
self.CONLLRDF_PATH, 'src', 'org', 'acoli', 'conll', 'rdf')
target_path = os.path.join(self.CONLLRDF_PATH, 'bin')
if not os.path.exists(target_path):
os.mkdir(target_path)
cp_vars = self.java_command(full_path=True, include_bin=True)[-1]
for f in os.listdir(src_path):
if '.java' in f and f.replace('.java', '') in dep_dict.keys():
src_files_path = os.path.join(src_path, f)
dep_src_file_path = os.path.join(src_path,
dep_dict[f.replace('.java', '')])
src_files_lst = [src_files_path, dep_src_file_path+'.java']
cp_path = cp_vars
self.compile_java(src_files_lst,
target_path,
cp_path)
def compile_java(self, src_files_lst, target_path, cp_path, cwd_path=None):
|
def conll2rdf(self, f_path, columns_typ):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_typ)
command = self.CoNLLStreamExtractor_command() + ['../data/'] \
+ self.columns
self.dump_rdf(rdf_str, f_path)
def rdf2conll(self, columns, f_path=None, stdin_str=None,
decode_stdout=False, target_path=None):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_type)
if f_path==None and stdin_str==None:
print('rdf2conll wrapper: specify path OR string.')
return None
command = self.CoNLLRDFFormatter_command() + ['-conll'] \
+ columns
(CONLLstr, errors) = self.run(
command,
cwd_path=f_path,
stdin_str=stdin_str,
decode_stdout=True)
CONLLstr = CONLLstr.replace(' #', ' \n#') \
.replace('\t#', '\n#').replace('\n\n', '\n')
if target_path:
self.dump(CONLLstr, target_path)
return CONLLstr
def get_stdin(self, stdin_path=None, stdin_str=None): #escape_unicode=False
'''
Get stdin from path or string to use with run.
'''
stdin = ''
if stdin_path==None and stdin_str==None:
return b''
if stdin_path:
with codecs.open(stdin_path, 'r', 'utf-8') as file:
stdin = file.read()
if 'etcsri' in stdin_path and '.conll' in stdin_path:
stdin = self.convert_ETCSRI(stdin)
elif stdin_str:
stdin = stdin_str
if type(stdin)!=bytes:
stdin = stdin.encode('utf-8')
## if escape_unicode==True:
## stdin = self.standardize_translit(stdin)
#print(stdin_str)
return stdin
def run(self, command, cwd_path=None, stdin_path=None, stdin_str=None,
decode_stdout=True):#, escape_unicode=False
'''
Open file, load it to stdin, run command, return stdout.
'''
stdin = self.get_stdin(
stdin_path, stdin_str)#, escape_unicode=escape_unicode)
if not cwd_path:
cwd_path=self.CONLLRDF_PATH
stdout = sp.run(
command,
cwd=cwd_path,
stdin=stdin,
print_stdout=False,
decode_stdout=decode_stdout
)
return self.filter_errors(stdout)
def filter_errors(self, stdout):
'''
Return (real_result, errors_or_warnings).
'''
shell_markers = [b'java.', b'log4j', b'org.apache', b'org.acoli']
typ = type(stdout)
if typ==str:
stdout = stdout.encode('utf-8')
shell_lst = []
for b in stdout.split(b'\n'):
for m in shell_markers:
if m in b:
shell_lst.append(b)
break
stdout_lst = [b for b in stdout.split(b'\n') if b not in shell_lst]
if typ==bytes:
errors = b'\n'.join(shell_lst)
stdout = b'\n'.join(stdout_lst)
## print(stdout.decode('utf-8'))
## print(errors.decode('utf-8'))
elif typ==str:
errors = b'\n'.join(shell_lst).decode('utf-8')
stdout = b'\n'.join(stdout_lst).decode('utf-8')
## print(stdout)
## print(errors)
return (stdout, errors)
def CoNLLStreamExtractor_command(self):
'''
Return a list containing basic command to run CoNLLStreamExtractor
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLStreamExtractor']
def CoNLLRDFFormatter_command(self):
'''
Return a list containing basic command to run CoNLLRDFFormatter
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLRDFFormatter']
def java_command(self, full_path=False, include_bin=True):
'''
Return a list containing basic java command to the library.
Set path to 'full' to get full path output.
'''
# Prepare java vatriables
dest = 'bin'
lib_path = os.path.join(self.CONLLRDF_PATH, 'lib')
if full_path==False:
libs = ';'.join(
['lib/%s' %l for l in os.listdir(lib_path)
if '.jar' in l])
elif full_path==True:
dest = os.path.join(self.CONLLRDF_PATH, dest)
libs = ';'.join(
[os.path.join(lib_path, l) for l in os.listdir(lib_path)
if '.jar' in l])
# Make command to run CoNLL2RDF with java
cp = libs
if include_bin==True:
cp = ';'.join([dest, libs])
return ['java', '-cp', cp]
def dump_rdf(self, rdf_str, f_path):
'''
Recieve original path and rdf string, dump to file.
'''
rdf_str = "#new_text" + rdf_str.split("#new_text")[1]
filename = f_path.split('/')[-1].split('.')[0]+'.ttl'
dump_path = os.path.join(_path, 'data', 'conll-rdf', filename)
self.dump(rdf_str, dump_path)
#---/ SYNTAX PREANNOTATION /---------------------------------------------------
#
class syntax_preannotation(CoNLL2RDF):
'''
Class to preannotate turtle files with SPARQL update queries.
Extends ´CoNLL2RDF´.
'''
REQUEST_SRC = [
('remove-IGNORE', 0),
('extract-feats', 1),
('remove-MORPH2', 0),
('init-SHIFT', 1),
## ('REDUCE-adjective', 3),
## ('REDUCE-math-operators', 1), # <- additional rules for admin -
## ('REDUCE-numerals-chain', 6),
## ('REDUCE-time-num', 1),
## ('REDUCE-measurements', 1), # -->
## ('REDUCE-compound-verbs', 1),
## ('REDUCE-adnominal', 3),
## ('REDUCE-appos', 1),
## ('REDUCE-absolutive', 1),
## ('REDUCE-appos', 1), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-appos', 4), # again?
## ('REDUCE-preposed-genitive', 1),
## ('REDUCE-arguments', 5), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-to-HEAD', 1),
## ('remove-feats', 1),
('create-ID-and-DEP', 1),
('create-_HEAD', 1)
]
#other possible rules:
# PN <-- N (as in & instead of PN lugal)
# reduce remaining nouns to first verb as nmod (?)
# mu <-- V.MID
# (NU)
# |
# (ADJ) NU
# \ /
# UNIT\
# (...)/ \ (NU)
# ____________BASE__/
# / | | | \
# u4 ki giri iti (us)
# | | | | |
# NU PN PN (diri) mu
# | | | \
# (...) (...) MN V.MID--...
#
#
REQUEST_REMOVE_IGNORE = [
('remove-IGNORE', 1)
]
SPARQL_PATH = os.path.join(_path, 'syntax-preannotation', 'sparql')
OUTPUT_PATH = os.path.join(_path, 'data', 'conll-preannotated')
def __init__(self):
'''
'''
CoNLL2RDF.__init__(self)
def load_requests(self, requests=[]):
'''
Load SPARQL requests to ´self.requests_full_lst´.
Specify repeats from int in ´r[1]´ when it is not ´None´.
'''
requests_lst = []
if requests==[]:
requests = self.REQUEST_SRC
for r in requests:
addit = ''
if r[1]!=None:
repeat = '{%s}' %r[1]
requests_lst.append(
r'%s\%s.sparql%s' %(self.SPARQL_PATH, r[0], repeat))
return requests_lst
def preannotate(self, f_path):
'''
Run SPARQL with ´self.requests_full_lst´ from requests.
First command converts CoNLL to RDF and applies preannotation
rules to it. The second converts the file back to CoNLL.
'''
columns = [
'ID_NUM', 'FORM', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
corpus = 'cdli'
override = {}
if 'etcsri' in f_path:
corpus = 'etcsri'
columns = [
'ID_NUM', 'FORM_ATF', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
override = {
'FORM_ATF': 'FORM'}
c = conll_file(path=f_path, corpus=corpus)
c.configure_str_output(columns, override=override)
rdf_str = self.convert_to_conll_and_preannotate(c)
#print('zzzzzzzzzzzzzzzzz', rdf_str) #<-- PROBLEM HERE !!!! returns b''
filename, target_path, target_path_tree = self.get_path_data(f_path)
self.tree_output(rdf_str, target_path_tree)
conll_str = self.rdf2conll(columns=c.override_columns,
stdin_str=rdf_str, decode_stdout=False)
c.merge_columns_from_conll_str(conll_str, ['HEAD', ('EDGE', 'DEPREL')])
c.configure_str_output(['ID_NUM']+c.COLUMNS_CDLI[1:], override=override)
conll_u = cdli_conll_u.convert_from_str(str(c))+'\n' #<--convert to CoNLL-U
self.dump(conll_u, target_path)
def get_path_data(self, f_path):
'''
'''
filename = os.path.basename(f_path)
target_path = os.path.join(self.OUTPUT_PATH, filename)
target_path_tree = os.path.join(
self.OUTPUT_PATH, '%s_tree.html' %filename.split('.')[0])
return filename, target_path, target_path_tree
def convert_to_conll_and_preannotate(self, conll_obj):
'''
Convert CoNLL to RDF and preannotate with SPARQL.
'''
# !TODO!
# REPLACE ['http://oracc.museum.upenn.edu/etcsri/'] by context!
command = self.CoNLLStreamExtractor_command() \
+ ['http://oracc.museum.upenn.edu/etcsri/'] \
+ conll_obj.override_columns + ['-u'] \
+ self.load_requests()
run_dict={
'command': command, 'stdin_str': str(conll_obj),
'decode_stdout': False}
#, 'escape_unicode': True}
#print(run_dict) #<-- ALL GOOD
(rdf_str, errors) = self.run(**run_dict) #<-- PROBLEM SOMEWHERE HERE !!!! returns b''
print(errors) #Error in Parsing Data: Incorrect XPOSTAG at line:
return rdf_str
def tree_output(self, rdf_str, target_path=''):
'''
Return string with parsed RDF tree representation.
Dump to target_path when it is given.
'''
command = self.CoNLLRDFFormatter_command() + ['-grammar']
(tree_str, errors) = \
self.run(command, stdin_str=rdf_str, decode_stdout=True)
tree_html = a2h_conv.convert(tree_str)
tree_html = tree_html.replace('pre-wrap', 'pre')
if target_path!='':
self.dump(tree_html, target_path)
return tree_str
#---/ COMMANDS /---------------------------------------------------------------
#
'''
Preannotate all files in data/etsri-conll-all, except all errors:
'''
##f_path = os.path.join(_path, 'data', 'etcsri-conll-all')
##sx = syntax_preannotation()
##for f in os.listdir(f_path):
## try:
## sx.preannotate(os.path.join(f_path, f))
## except Exception as e:
## raise e
## pass
'''
Preannotate all files in data/cdli-conll-all, except all errors:
'''
#f_path = os.path.join(_path, 'data', 'etcsri-conll')
#f_path = os.path.join(_path, 'data', 'cdli-jinyan-non-admin') #'etcsri-conll-all')
f_path = os.path.join(_path, 'data', 'cdli-conll-all')
#f_path = os.path.join(_path, 'data', 'evaluate')
preannotated = os.listdir(os.path.join(_path, 'data', 'conll-preannotated'))
exclude = [pa.replace('_tree.html', '.conll') for pa in preannotated if '_tree.html' in pa]
from list_errors import list_files_with_errors
errors_list = list_files_with_errors() # files previusly annotated with errors
sx = syntax_preannotation()
for f in os.listdir(f_path):
if f in errors_list: #f not in exclude
try:
sx.preannotate(os.path.join(f_path, f))
except Exception as e:
raise e
pass
errors_list_new = list_files_with_errors()
print('old_errors', errors_list)
print('new_errors', errors_list_new)
#CC2CU()
#CoNLL2RDF()
#syntax_preannotation()
##c = CoNLL2RDF()
##c.rdf2conll("data\conll-rdf\P100188.ttl")
| '''
Run Java compiler with command.
'''
self.run([r'%s' %self.JAVAC_PATH,
'-d', r'%s' %target_path,
'-g',
'-cp', r'%s' %cp_path,
]+[r'%s' %f for f in src_files_lst],
cwd_path=cwd_path) | identifier_body |
scripts.py | import os
import sys
import codecs
import re
from ansi2html import Ansi2HTMLConverter
from mtaac_package.CoNLL_file_parser import conll_file
from mtaac_package.common_functions import *
from cdliconll2conllu.converter import CdliCoNLLtoCoNLLUConverter
##from conllu.convert import convert as conllu2brat
from SPARQLTransformer import sparqlTransformer
'''
Not in use:
import rdflib
from SPARQLWrapper import SPARQLWrapper, JSON
'''
#
#---/ GENERAL COMMENTS /-------------------------------------------------------
#
'''
PIP DEPENDENCIES:
- mtaac_package (https://github.com/cdli-gh/mtaac-package)
- ansi2html
# - rdflib // not used
# - SPARQLWrapper // not used
OTHER DEPENDENCIES (Windows):
- http://www.oracle.com/technetwork/java/javase/downloads/
jdk8-downloads-2133151.html
WORKFLOW:
+ 1. CDLI-CoNLL (already there)
+ 2. CoNLL2RDF <https://github.com/acoli-repo/conll-rdf>
+ 3. RDF
+ 4. Syntactic Pre-annotator
+ 5. RDF2CoNLL
>? 6. CDLI-CoNLL2CoNLL-U
<https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter)>
> 7. CoNLLU > Brat
8. Brat (push file to brat server)
(9. Editor corrects syntax)
10. Brat 2 CDLI-Conll
<https://github.com/cdli-gh/brat_to_cdli_CONLLconverter>
TODO:
+ check .sh scripts for missed steps
- columns should be adjusted for CDLI-CoNLL:
ID WORD MORPH2 POS IGNORE IGNORE IGNORE
- make sure columns are correctly designated for both formats
- make sure abbreviations are unified:
- either different rules for different abbreviations
OR
- better:
- apply your own abbr. unifier (lemmatization data scripts)
to make the data unified.
- then insure that the abbr. in SPARQL match
- Find a solution for rendering words in SPARQL.
Perhaps, FLASK templates would be the best solution also to corpus-specific
placeholders' rendering.
'''
#
#---/ ANSI 2 HTML /------------------------------------------------------------
#
a2h_conv = Ansi2HTMLConverter()
#
#---/ Variables /--------------------------------------------------------------
#
_path = os.path.dirname(os.path.abspath(__file__))
sp = subprocesses()
#
#---/ CDLI-CoNLL > CONLL-U /---------------------------------------------------
#
class CC2CU(common_functions, CdliCoNLLtoCoNLLUConverter):
'''
Wrapper around CDLI-CoNLL-to-CoNLLU-Converter:
https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-Converter
'''
GIT_CC2CU = 'https://github.com/cdli-gh/CDLI-CoNLL-to-CoNLLU-' \
'Converter.git'
def __init__(self):
self.cdliCoNLLInputFileName = 'CoNLL data'
## self.install_or_upgrade_CC2CU()
self.__reset__()
from cdliconll2conllu.mapping import Mapping
self.cl = Mapping()
self.header = '#%s' %'\t'.join(self.cl.conllUFields)
## print(self.cl.cdliConllFields, len(self.cl.cdliConllFields))
## ## TESTING ONLY:
## for f_name in ['P100149.conll', 'P100159.conll', 'P100188.conll']:
## f_path = os.path.join(_path, 'data', 'cdli-conll', f_name)
## self.convert_CC2CU(f_path)
def install_or_upgrade_CC2CU(self):
'''
Install CC2CU if missing or upgrade it.
'''
sp.run(['pip', 'install', 'git+'+self.GIT_CC2CU, '--upgrade'])
def convert_from_str(self, conll_str):
'''
Convert CDLI-CoNLL to CoNLL-U from CoNLL string.
'''
#print(conll_str)
lines_all = [l.strip() for l in conll_str.splitlines()]
headerLines = [l for l in lines_all if l[0]=='#']
inputLines = [l.split('\t') for l in lines_all if l not in headerLines+['']]
if '\t' in headerLines[-1]:
headerLines = headerLines[:-1]
headerLines.append(self.header)
## for l in inputLines:
## print([l])
self.convertCDLICoNLLtoCoNLLU(inputLines)
#print(self.outputLines, ['\t'.join(l) for l in self.outputLines])
conll_str = '\n'.join(headerLines+['\t'.join(l) for l in self.outputLines])
self.__reset__()
return conll_str
def convert_from_file(self, filename):
'''
Convert CDLI-CoNLL to CoNLL-U from file.
'''
sp.run(['cdliconll2conllu', '-i', filename, '-v'], print_stdout=False)
cdli_conll_u = CC2CU()
#---/ CONLL-U <> CONLL-RDF /---------------------------------------------------
#
class CoNLL2RDF(common_functions):
'''
Wrapper around CoNLL-RDF:
https://github.com/acoli-repo/conll-rdf
'''
GIT_CONLLRDF = 'https://github.com/acoli-repo/conll-rdf.git'
CONLLRDF_PATH = os.path.join(_path, 'conll-rdf')
def __init__(self):
'''
'''
self.add_java_path()
if not os.path.exists(self.CONLLRDF_PATH):
self.install_CONLLRDF()
def add_java_path(self):
'''
Windows: Find and add Java/JDK/bin path to env.
'''
self.JAVA_PATH = None
for b in ['', ' (x86)']:
pf = os.environ['ProgramFiles'].replace(b, '')
basic_java_path = os.path.join(pf, 'Java')
if os.path.exists(basic_java_path):
dirs_lst = os.listdir(basic_java_path)
jdk_lst = [jdk for jdk in dirs_lst if 'jdk' in jdk]
jre_lst = [jre for jre in dirs_lst if 'jre' in jre]
if jdk_lst!=[]:
self.JAVA_JDK_PATH = \
os.path.join(basic_java_path, jdk_lst[-1], 'bin')
self.JAVA_JRE_PATH = \
os.path.join(basic_java_path, jre_lst[-1], 'bin')
break
if not self.JAVA_JDK_PATH:
print(
'''No Java Development Kit installation found! '''
'''Download and install latest:\n'''
'''http://www.oracle.com/technetwork/'''
'''java/javase/downloads/index.html''')
return False
elif self.JAVA_JDK_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JDK_PATH
elif self.JAVA_JRE_PATH not in sp.env['PATH']:
sp.env['PATH']+=self.JAVA_JRE_PATH
self.JAVAC_PATH = os.path.join(self.JAVA_JDK_PATH, 'javac.exe')
return True
def install_CONLLRDF(self):
'''
Install CoNLL-RDF:
1. Clone Github repo
2. Build Java libs
'''
sp.run(['git', 'clone', self.GIT_CONLLRDF])
self.compile_CONLLRDF()
def compile_CONLLRDF(self):
'''
Compile CoNLL-RDF Java libraries.
'''
dep_dict = {
'CoNLLStreamExtractor': 'CoNLL2RDF',
'CoNLLRDFAnnotator': 'CoNLLRDFFormatter',
'CoNLLRDFUpdater': 'CoNLLRDFViz'
}
src_path = os.path.join(
self.CONLLRDF_PATH, 'src', 'org', 'acoli', 'conll', 'rdf')
target_path = os.path.join(self.CONLLRDF_PATH, 'bin')
if not os.path.exists(target_path):
os.mkdir(target_path)
cp_vars = self.java_command(full_path=True, include_bin=True)[-1]
for f in os.listdir(src_path):
if '.java' in f and f.replace('.java', '') in dep_dict.keys():
src_files_path = os.path.join(src_path, f)
dep_src_file_path = os.path.join(src_path,
dep_dict[f.replace('.java', '')])
src_files_lst = [src_files_path, dep_src_file_path+'.java']
cp_path = cp_vars
self.compile_java(src_files_lst,
target_path,
cp_path)
def compile_java(self, src_files_lst, target_path, cp_path, cwd_path=None):
'''
Run Java compiler with command.
'''
self.run([r'%s' %self.JAVAC_PATH,
'-d', r'%s' %target_path,
'-g',
'-cp', r'%s' %cp_path,
]+[r'%s' %f for f in src_files_lst],
cwd_path=cwd_path)
def conll2rdf(self, f_path, columns_typ):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_typ)
command = self.CoNLLStreamExtractor_command() + ['../data/'] \
+ self.columns
self.dump_rdf(rdf_str, f_path)
def rdf2conll(self, columns, f_path=None, stdin_str=None,
decode_stdout=False, target_path=None):
'''
Run Java CoNNL2RDF script to convert CoNLL file to RDF.
'''
#self.define_columns(columns_type)
if f_path==None and stdin_str==None:
print('rdf2conll wrapper: specify path OR string.')
return None
command = self.CoNLLRDFFormatter_command() + ['-conll'] \
+ columns
(CONLLstr, errors) = self.run(
command,
cwd_path=f_path,
stdin_str=stdin_str,
decode_stdout=True)
CONLLstr = CONLLstr.replace(' #', ' \n#') \
.replace('\t#', '\n#').replace('\n\n', '\n')
if target_path:
self.dump(CONLLstr, target_path)
return CONLLstr
def get_stdin(self, stdin_path=None, stdin_str=None): #escape_unicode=False
'''
Get stdin from path or string to use with run.
'''
stdin = ''
if stdin_path==None and stdin_str==None:
return b''
if stdin_path:
with codecs.open(stdin_path, 'r', 'utf-8') as file:
stdin = file.read()
if 'etcsri' in stdin_path and '.conll' in stdin_path:
stdin = self.convert_ETCSRI(stdin)
elif stdin_str:
stdin = stdin_str
if type(stdin)!=bytes:
stdin = stdin.encode('utf-8')
## if escape_unicode==True:
## stdin = self.standardize_translit(stdin)
#print(stdin_str)
return stdin
def run(self, command, cwd_path=None, stdin_path=None, stdin_str=None,
decode_stdout=True):#, escape_unicode=False
'''
Open file, load it to stdin, run command, return stdout.
'''
stdin = self.get_stdin(
stdin_path, stdin_str)#, escape_unicode=escape_unicode)
if not cwd_path:
cwd_path=self.CONLLRDF_PATH
stdout = sp.run(
command,
cwd=cwd_path,
stdin=stdin,
print_stdout=False,
decode_stdout=decode_stdout
)
return self.filter_errors(stdout)
def filter_errors(self, stdout):
'''
Return (real_result, errors_or_warnings).
'''
shell_markers = [b'java.', b'log4j', b'org.apache', b'org.acoli']
typ = type(stdout)
if typ==str:
stdout = stdout.encode('utf-8')
shell_lst = []
for b in stdout.split(b'\n'):
|
stdout_lst = [b for b in stdout.split(b'\n') if b not in shell_lst]
if typ==bytes:
errors = b'\n'.join(shell_lst)
stdout = b'\n'.join(stdout_lst)
## print(stdout.decode('utf-8'))
## print(errors.decode('utf-8'))
elif typ==str:
errors = b'\n'.join(shell_lst).decode('utf-8')
stdout = b'\n'.join(stdout_lst).decode('utf-8')
## print(stdout)
## print(errors)
return (stdout, errors)
def CoNLLStreamExtractor_command(self):
'''
Return a list containing basic command to run CoNLLStreamExtractor
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLStreamExtractor']
def CoNLLRDFFormatter_command(self):
'''
Return a list containing basic command to run CoNLLRDFFormatter
with no additional arguments.
'''
# Make command to run CoNLL2RDF with java
return self.java_command()+['org.acoli.conll.rdf.CoNLLRDFFormatter']
def java_command(self, full_path=False, include_bin=True):
'''
Return a list containing basic java command to the library.
Set path to 'full' to get full path output.
'''
# Prepare java vatriables
dest = 'bin'
lib_path = os.path.join(self.CONLLRDF_PATH, 'lib')
if full_path==False:
libs = ';'.join(
['lib/%s' %l for l in os.listdir(lib_path)
if '.jar' in l])
elif full_path==True:
dest = os.path.join(self.CONLLRDF_PATH, dest)
libs = ';'.join(
[os.path.join(lib_path, l) for l in os.listdir(lib_path)
if '.jar' in l])
# Make command to run CoNLL2RDF with java
cp = libs
if include_bin==True:
cp = ';'.join([dest, libs])
return ['java', '-cp', cp]
def dump_rdf(self, rdf_str, f_path):
'''
Recieve original path and rdf string, dump to file.
'''
rdf_str = "#new_text" + rdf_str.split("#new_text")[1]
filename = f_path.split('/')[-1].split('.')[0]+'.ttl'
dump_path = os.path.join(_path, 'data', 'conll-rdf', filename)
self.dump(rdf_str, dump_path)
#---/ SYNTAX PREANNOTATION /---------------------------------------------------
#
class syntax_preannotation(CoNLL2RDF):
'''
Class to preannotate turtle files with SPARQL update queries.
Extends ´CoNLL2RDF´.
'''
REQUEST_SRC = [
('remove-IGNORE', 0),
('extract-feats', 1),
('remove-MORPH2', 0),
('init-SHIFT', 1),
## ('REDUCE-adjective', 3),
## ('REDUCE-math-operators', 1), # <- additional rules for admin -
## ('REDUCE-numerals-chain', 6),
## ('REDUCE-time-num', 1),
## ('REDUCE-measurements', 1), # -->
## ('REDUCE-compound-verbs', 1),
## ('REDUCE-adnominal', 3),
## ('REDUCE-appos', 1),
## ('REDUCE-absolutive', 1),
## ('REDUCE-appos', 1), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-appos', 4), # again?
## ('REDUCE-preposed-genitive', 1),
## ('REDUCE-arguments', 5), # again?
## ('REDUCE-adjective', 1), # again?
## ('REDUCE-to-HEAD', 1),
## ('remove-feats', 1),
('create-ID-and-DEP', 1),
('create-_HEAD', 1)
]
#other possible rules:
# PN <-- N (as in & instead of PN lugal)
# reduce remaining nouns to first verb as nmod (?)
# mu <-- V.MID
# (NU)
# |
# (ADJ) NU
# \ /
# UNIT\
# (...)/ \ (NU)
# ____________BASE__/
# / | | | \
# u4 ki giri iti (us)
# | | | | |
# NU PN PN (diri) mu
# | | | \
# (...) (...) MN V.MID--...
#
#
REQUEST_REMOVE_IGNORE = [
('remove-IGNORE', 1)
]
SPARQL_PATH = os.path.join(_path, 'syntax-preannotation', 'sparql')
OUTPUT_PATH = os.path.join(_path, 'data', 'conll-preannotated')
def __init__(self):
'''
'''
CoNLL2RDF.__init__(self)
def load_requests(self, requests=[]):
'''
Load SPARQL requests to ´self.requests_full_lst´.
Specify repeats from int in ´r[1]´ when it is not ´None´.
'''
requests_lst = []
if requests==[]:
requests = self.REQUEST_SRC
for r in requests:
addit = ''
if r[1]!=None:
repeat = '{%s}' %r[1]
requests_lst.append(
r'%s\%s.sparql%s' %(self.SPARQL_PATH, r[0], repeat))
return requests_lst
def preannotate(self, f_path):
'''
Run SPARQL with ´self.requests_full_lst´ from requests.
First command converts CoNLL to RDF and applies preannotation
rules to it. The second converts the file back to CoNLL.
'''
columns = [
'ID_NUM', 'FORM', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
corpus = 'cdli'
override = {}
if 'etcsri' in f_path:
corpus = 'etcsri'
columns = [
'ID_NUM', 'FORM_ATF', 'BASE', 'MORPH2',
'POS', 'EPOS', 'HEAD', 'DEP', 'EDGE']
override = {
'FORM_ATF': 'FORM'}
c = conll_file(path=f_path, corpus=corpus)
c.configure_str_output(columns, override=override)
rdf_str = self.convert_to_conll_and_preannotate(c)
#print('zzzzzzzzzzzzzzzzz', rdf_str) #<-- PROBLEM HERE !!!! returns b''
filename, target_path, target_path_tree = self.get_path_data(f_path)
self.tree_output(rdf_str, target_path_tree)
conll_str = self.rdf2conll(columns=c.override_columns,
stdin_str=rdf_str, decode_stdout=False)
c.merge_columns_from_conll_str(conll_str, ['HEAD', ('EDGE', 'DEPREL')])
c.configure_str_output(['ID_NUM']+c.COLUMNS_CDLI[1:], override=override)
conll_u = cdli_conll_u.convert_from_str(str(c))+'\n' #<--convert to CoNLL-U
self.dump(conll_u, target_path)
def get_path_data(self, f_path):
'''
'''
filename = os.path.basename(f_path)
target_path = os.path.join(self.OUTPUT_PATH, filename)
target_path_tree = os.path.join(
self.OUTPUT_PATH, '%s_tree.html' %filename.split('.')[0])
return filename, target_path, target_path_tree
def convert_to_conll_and_preannotate(self, conll_obj):
'''
Convert CoNLL to RDF and preannotate with SPARQL.
'''
# !TODO!
# REPLACE ['http://oracc.museum.upenn.edu/etcsri/'] by context!
command = self.CoNLLStreamExtractor_command() \
+ ['http://oracc.museum.upenn.edu/etcsri/'] \
+ conll_obj.override_columns + ['-u'] \
+ self.load_requests()
run_dict={
'command': command, 'stdin_str': str(conll_obj),
'decode_stdout': False}
#, 'escape_unicode': True}
#print(run_dict) #<-- ALL GOOD
(rdf_str, errors) = self.run(**run_dict) #<-- PROBLEM SOMEWHERE HERE !!!! returns b''
print(errors) #Error in Parsing Data: Incorrect XPOSTAG at line:
return rdf_str
def tree_output(self, rdf_str, target_path=''):
'''
Return string with parsed RDF tree representation.
Dump to target_path when it is given.
'''
command = self.CoNLLRDFFormatter_command() + ['-grammar']
(tree_str, errors) = \
self.run(command, stdin_str=rdf_str, decode_stdout=True)
tree_html = a2h_conv.convert(tree_str)
tree_html = tree_html.replace('pre-wrap', 'pre')
if target_path!='':
self.dump(tree_html, target_path)
return tree_str
#---/ COMMANDS /---------------------------------------------------------------
#
'''
Preannotate all files in data/etsri-conll-all, except all errors:
'''
##f_path = os.path.join(_path, 'data', 'etcsri-conll-all')
##sx = syntax_preannotation()
##for f in os.listdir(f_path):
## try:
## sx.preannotate(os.path.join(f_path, f))
## except Exception as e:
## raise e
## pass
'''
Preannotate all files in data/cdli-conll-all, except all errors:
'''
#f_path = os.path.join(_path, 'data', 'etcsri-conll')
#f_path = os.path.join(_path, 'data', 'cdli-jinyan-non-admin') #'etcsri-conll-all')
f_path = os.path.join(_path, 'data', 'cdli-conll-all')
#f_path = os.path.join(_path, 'data', 'evaluate')
preannotated = os.listdir(os.path.join(_path, 'data', 'conll-preannotated'))
exclude = [pa.replace('_tree.html', '.conll') for pa in preannotated if '_tree.html' in pa]
from list_errors import list_files_with_errors
errors_list = list_files_with_errors() # files previusly annotated with errors
sx = syntax_preannotation()
for f in os.listdir(f_path):
if f in errors_list: #f not in exclude
try:
sx.preannotate(os.path.join(f_path, f))
except Exception as e:
raise e
pass
errors_list_new = list_files_with_errors()
print('old_errors', errors_list)
print('new_errors', errors_list_new)
#CC2CU()
#CoNLL2RDF()
#syntax_preannotation()
##c = CoNLL2RDF()
##c.rdf2conll("data\conll-rdf\P100188.ttl")
| for m in shell_markers:
if m in b:
shell_lst.append(b)
break | conditional_block |
imp.rs | use std::mem;
use aho_corasick::{self, packed, AhoCorasick, AhoCorasickBuilder};
use memchr::{memchr, memchr2, memchr3, memmem};
use regex_syntax::hir::literal::{Literal, Literals};
/// A prefix extracted from a compiled regular expression.
///
/// A regex prefix is a set of literal strings that *must* be matched at the
/// beginning of a regex in order for the entire regex to match. Similarly
/// for a regex suffix.
#[derive(Clone, Debug)]
pub struct LiteralSearcher {
complete: bool,
lcp: Memmem,
lcs: Memmem,
matcher: Matcher,
}
#[derive(Clone, Debug)]
enum Matcher {
/// No literals. (Never advances through the input.)
Empty,
/// A set of four or more single byte literals.
Bytes(SingleByteSet),
/// A single substring, using vector accelerated routines when available.
Memmem(Memmem),
/// An Aho-Corasick automaton.
AC { ac: AhoCorasick<u32>, lits: Vec<Literal> },
/// A packed multiple substring searcher, using SIMD.
///
/// Note that Aho-Corasick will actually use this packed searcher
/// internally automatically, however, there is some overhead associated
/// with going through the Aho-Corasick machinery. So using the packed
/// searcher directly results in some gains.
Packed { s: packed::Searcher, lits: Vec<Literal> },
}
impl LiteralSearcher {
/// Returns a matcher that never matches and never advances the input.
pub fn empty() -> Self {
Self::new(Literals::empty(), Matcher::Empty)
}
/// Returns a matcher for literal prefixes from the given set.
pub fn prefixes(lits: Literals) -> Self {
let matcher = Matcher::prefixes(&lits);
Self::new(lits, matcher)
}
/// Returns a matcher for literal suffixes from the given set.
pub fn suffixes(lits: Literals) -> Self {
let matcher = Matcher::suffixes(&lits);
Self::new(lits, matcher)
}
fn new(lits: Literals, matcher: Matcher) -> Self {
let complete = lits.all_complete();
LiteralSearcher {
complete,
lcp: Memmem::new(lits.longest_common_prefix()),
lcs: Memmem::new(lits.longest_common_suffix()),
matcher,
}
}
/// Returns true if all matches comprise the entire regular expression.
///
/// This does not necessarily mean that a literal match implies a match
/// of the regular expression. For example, the regular expression `^a`
/// is comprised of a single complete literal `a`, but the regular
/// expression demands that it only match at the beginning of a string.
pub fn complete(&self) -> bool {
self.complete && !self.is_empty()
}
/// Find the position of a literal in `haystack` if it exists.
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn find(&self, haystack: &[u8]) -> Option<(usize, usize)> {
use self::Matcher::*;
match self.matcher {
Empty => Some((0, 0)),
Bytes(ref sset) => sset.find(haystack).map(|i| (i, i + 1)),
Memmem(ref s) => s.find(haystack).map(|i| (i, i + s.len())),
AC { ref ac, .. } => {
ac.find(haystack).map(|m| (m.start(), m.end()))
}
Packed { ref s, .. } => {
s.find(haystack).map(|m| (m.start(), m.end()))
}
}
}
/// Like find, except matches must start at index `0`.
pub fn find_start(&self, haystack: &[u8]) -> Option<(usize, usize)> {
for lit in self.iter() {
if lit.len() > haystack.len() {
continue;
}
if lit == &haystack[0..lit.len()] {
return Some((0, lit.len()));
}
}
None
}
/// Like find, except matches must end at index `haystack.len()`.
pub fn find_end(&self, haystack: &[u8]) -> Option<(usize, usize)> {
for lit in self.iter() {
if lit.len() > haystack.len() {
continue;
}
if lit == &haystack[haystack.len() - lit.len()..] {
return Some((haystack.len() - lit.len(), haystack.len()));
}
}
None
}
/// Returns an iterator over all literals to be matched.
pub fn iter(&self) -> LiteralIter<'_> {
match self.matcher {
Matcher::Empty => LiteralIter::Empty,
Matcher::Bytes(ref sset) => LiteralIter::Bytes(&sset.dense),
Matcher::Memmem(ref s) => LiteralIter::Single(&s.finder.needle()),
Matcher::AC { ref lits, .. } => LiteralIter::AC(lits),
Matcher::Packed { ref lits, .. } => LiteralIter::Packed(lits),
}
}
/// Returns a matcher for the longest common prefix of this matcher.
pub fn lcp(&self) -> &Memmem {
&self.lcp
}
/// Returns a matcher for the longest common suffix of this matcher.
pub fn lcs(&self) -> &Memmem {
&self.lcs
}
/// Returns true iff this prefix is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of prefixes in this machine.
pub fn len(&self) -> usize {
use self::Matcher::*;
match self.matcher {
Empty => 0,
Bytes(ref sset) => sset.dense.len(),
Memmem(_) => 1,
AC { ref ac, .. } => ac.pattern_count(),
Packed { ref lits, .. } => lits.len(),
}
}
/// Return the approximate heap usage of literals in bytes.
pub fn approximate_size(&self) -> usize {
use self::Matcher::*;
match self.matcher {
Empty => 0,
Bytes(ref sset) => sset.approximate_size(),
Memmem(ref single) => single.approximate_size(),
AC { ref ac, .. } => ac.heap_bytes(),
Packed { ref s, .. } => s.heap_bytes(),
}
}
}
impl Matcher {
fn prefixes(lits: &Literals) -> Self {
let sset = SingleByteSet::prefixes(lits);
Matcher::new(lits, sset)
}
fn suffixes(lits: &Literals) -> Self {
let sset = SingleByteSet::suffixes(lits);
Matcher::new(lits, sset)
}
fn new(lits: &Literals, sset: SingleByteSet) -> Self |
}
#[derive(Debug)]
pub enum LiteralIter<'a> {
Empty,
Bytes(&'a [u8]),
Single(&'a [u8]),
AC(&'a [Literal]),
Packed(&'a [Literal]),
}
impl<'a> Iterator for LiteralIter<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<Self::Item> {
match *self {
LiteralIter::Empty => None,
LiteralIter::Bytes(ref mut many) => {
if many.is_empty() {
None
} else {
let next = &many[0..1];
*many = &many[1..];
Some(next)
}
}
LiteralIter::Single(ref mut one) => {
if one.is_empty() {
None
} else {
let next = &one[..];
*one = &[];
Some(next)
}
}
LiteralIter::AC(ref mut lits) => {
if lits.is_empty() {
None
} else {
let next = &lits[0];
*lits = &lits[1..];
Some(&**next)
}
}
LiteralIter::Packed(ref mut lits) => {
if lits.is_empty() {
None
} else {
let next = &lits[0];
*lits = &lits[1..];
Some(&**next)
}
}
}
}
}
#[derive(Clone, Debug)]
struct SingleByteSet {
sparse: Vec<bool>,
dense: Vec<u8>,
complete: bool,
all_ascii: bool,
}
impl SingleByteSet {
fn new() -> SingleByteSet {
SingleByteSet {
sparse: vec![false; 256],
dense: vec![],
complete: true,
all_ascii: true,
}
}
fn prefixes(lits: &Literals) -> SingleByteSet {
let mut sset = SingleByteSet::new();
for lit in lits.literals() {
sset.complete = sset.complete && lit.len() == 1;
if let Some(&b) = lit.get(0) {
if !sset.sparse[b as usize] {
if b > 0x7F {
sset.all_ascii = false;
}
sset.dense.push(b);
sset.sparse[b as usize] = true;
}
}
}
sset
}
fn suffixes(lits: &Literals) -> SingleByteSet {
let mut sset = SingleByteSet::new();
for lit in lits.literals() {
sset.complete = sset.complete && lit.len() == 1;
if let Some(&b) = lit.get(lit.len().checked_sub(1).unwrap()) {
if !sset.sparse[b as usize] {
if b > 0x7F {
sset.all_ascii = false;
}
sset.dense.push(b);
sset.sparse[b as usize] = true;
}
}
}
sset
}
/// Faster find that special cases certain sizes to use memchr.
#[cfg_attr(feature = "perf-inline", inline(always))]
fn find(&self, text: &[u8]) -> Option<usize> {
match self.dense.len() {
0 => None,
1 => memchr(self.dense[0], text),
2 => memchr2(self.dense[0], self.dense[1], text),
3 => memchr3(self.dense[0], self.dense[1], self.dense[2], text),
_ => self._find(text),
}
}
/// Generic find that works on any sized set.
fn _find(&self, haystack: &[u8]) -> Option<usize> {
for (i, &b) in haystack.iter().enumerate() {
if self.sparse[b as usize] {
return Some(i);
}
}
None
}
fn approximate_size(&self) -> usize {
(self.dense.len() * mem::size_of::<u8>())
+ (self.sparse.len() * mem::size_of::<bool>())
}
}
/// A simple wrapper around the memchr crate's memmem implementation.
///
/// The API this exposes mirrors the API of previous substring searchers that
/// this supplanted.
#[derive(Clone, Debug)]
pub struct Memmem {
finder: memmem::Finder<'static>,
char_len: usize,
}
impl Memmem {
fn new(pat: &[u8]) -> Memmem {
Memmem {
finder: memmem::Finder::new(pat).into_owned(),
char_len: char_len_lossy(pat),
}
}
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn find(&self, haystack: &[u8]) -> Option<usize> {
self.finder.find(haystack)
}
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn is_suffix(&self, text: &[u8]) -> bool {
if text.len() < self.len() {
return false;
}
&text[text.len() - self.len()..] == self.finder.needle()
}
pub fn len(&self) -> usize {
self.finder.needle().len()
}
pub fn char_len(&self) -> usize {
self.char_len
}
fn approximate_size(&self) -> usize {
self.finder.needle().len() * mem::size_of::<u8>()
}
}
fn char_len_lossy(bytes: &[u8]) -> usize {
String::from_utf8_lossy(bytes).chars().count()
}
| {
if lits.literals().is_empty() {
return Matcher::Empty;
}
if sset.dense.len() >= 26 {
// Avoid trying to match a large number of single bytes.
// This is *very* sensitive to a frequency analysis comparison
// between the bytes in sset and the composition of the haystack.
// No matter the size of sset, if its members all are rare in the
// haystack, then it'd be worth using it. How to tune this... IDK.
// ---AG
return Matcher::Empty;
}
if sset.complete {
return Matcher::Bytes(sset);
}
if lits.literals().len() == 1 {
return Matcher::Memmem(Memmem::new(&lits.literals()[0]));
}
let pats = lits.literals().to_owned();
let is_aho_corasick_fast = sset.dense.len() <= 1 && sset.all_ascii;
if lits.literals().len() <= 100 && !is_aho_corasick_fast {
let mut builder = packed::Config::new()
.match_kind(packed::MatchKind::LeftmostFirst)
.builder();
if let Some(s) = builder.extend(&pats).build() {
return Matcher::Packed { s, lits: pats };
}
}
let ac = AhoCorasickBuilder::new()
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
.dfa(true)
.build_with_size::<u32, _, _>(&pats)
.unwrap();
Matcher::AC { ac, lits: pats }
} | identifier_body |
imp.rs | use std::mem;
use aho_corasick::{self, packed, AhoCorasick, AhoCorasickBuilder};
use memchr::{memchr, memchr2, memchr3, memmem};
use regex_syntax::hir::literal::{Literal, Literals};
/// A prefix extracted from a compiled regular expression.
///
/// A regex prefix is a set of literal strings that *must* be matched at the
/// beginning of a regex in order for the entire regex to match. Similarly
/// for a regex suffix.
#[derive(Clone, Debug)]
pub struct LiteralSearcher {
complete: bool,
lcp: Memmem,
lcs: Memmem,
matcher: Matcher,
}
#[derive(Clone, Debug)]
enum Matcher {
/// No literals. (Never advances through the input.)
Empty,
/// A set of four or more single byte literals.
Bytes(SingleByteSet),
/// A single substring, using vector accelerated routines when available.
Memmem(Memmem),
/// An Aho-Corasick automaton.
AC { ac: AhoCorasick<u32>, lits: Vec<Literal> },
/// A packed multiple substring searcher, using SIMD.
///
/// Note that Aho-Corasick will actually use this packed searcher
/// internally automatically, however, there is some overhead associated
/// with going through the Aho-Corasick machinery. So using the packed
/// searcher directly results in some gains.
Packed { s: packed::Searcher, lits: Vec<Literal> },
}
impl LiteralSearcher {
/// Returns a matcher that never matches and never advances the input.
pub fn empty() -> Self {
Self::new(Literals::empty(), Matcher::Empty)
}
/// Returns a matcher for literal prefixes from the given set.
pub fn prefixes(lits: Literals) -> Self {
let matcher = Matcher::prefixes(&lits);
Self::new(lits, matcher)
}
/// Returns a matcher for literal suffixes from the given set.
pub fn suffixes(lits: Literals) -> Self {
let matcher = Matcher::suffixes(&lits);
Self::new(lits, matcher)
}
fn new(lits: Literals, matcher: Matcher) -> Self {
let complete = lits.all_complete();
LiteralSearcher {
complete,
lcp: Memmem::new(lits.longest_common_prefix()),
lcs: Memmem::new(lits.longest_common_suffix()),
matcher,
}
}
/// Returns true if all matches comprise the entire regular expression.
///
/// This does not necessarily mean that a literal match implies a match
/// of the regular expression. For example, the regular expression `^a`
/// is comprised of a single complete literal `a`, but the regular
/// expression demands that it only match at the beginning of a string.
pub fn complete(&self) -> bool {
self.complete && !self.is_empty()
}
/// Find the position of a literal in `haystack` if it exists.
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn find(&self, haystack: &[u8]) -> Option<(usize, usize)> {
use self::Matcher::*;
match self.matcher {
Empty => Some((0, 0)),
Bytes(ref sset) => sset.find(haystack).map(|i| (i, i + 1)),
Memmem(ref s) => s.find(haystack).map(|i| (i, i + s.len())),
AC { ref ac, .. } => {
ac.find(haystack).map(|m| (m.start(), m.end()))
}
Packed { ref s, .. } => {
s.find(haystack).map(|m| (m.start(), m.end()))
}
}
}
/// Like find, except matches must start at index `0`.
pub fn | (&self, haystack: &[u8]) -> Option<(usize, usize)> {
for lit in self.iter() {
if lit.len() > haystack.len() {
continue;
}
if lit == &haystack[0..lit.len()] {
return Some((0, lit.len()));
}
}
None
}
/// Like find, except matches must end at index `haystack.len()`.
pub fn find_end(&self, haystack: &[u8]) -> Option<(usize, usize)> {
for lit in self.iter() {
if lit.len() > haystack.len() {
continue;
}
if lit == &haystack[haystack.len() - lit.len()..] {
return Some((haystack.len() - lit.len(), haystack.len()));
}
}
None
}
/// Returns an iterator over all literals to be matched.
pub fn iter(&self) -> LiteralIter<'_> {
match self.matcher {
Matcher::Empty => LiteralIter::Empty,
Matcher::Bytes(ref sset) => LiteralIter::Bytes(&sset.dense),
Matcher::Memmem(ref s) => LiteralIter::Single(&s.finder.needle()),
Matcher::AC { ref lits, .. } => LiteralIter::AC(lits),
Matcher::Packed { ref lits, .. } => LiteralIter::Packed(lits),
}
}
/// Returns a matcher for the longest common prefix of this matcher.
pub fn lcp(&self) -> &Memmem {
&self.lcp
}
/// Returns a matcher for the longest common suffix of this matcher.
pub fn lcs(&self) -> &Memmem {
&self.lcs
}
/// Returns true iff this prefix is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of prefixes in this machine.
pub fn len(&self) -> usize {
use self::Matcher::*;
match self.matcher {
Empty => 0,
Bytes(ref sset) => sset.dense.len(),
Memmem(_) => 1,
AC { ref ac, .. } => ac.pattern_count(),
Packed { ref lits, .. } => lits.len(),
}
}
/// Return the approximate heap usage of literals in bytes.
pub fn approximate_size(&self) -> usize {
use self::Matcher::*;
match self.matcher {
Empty => 0,
Bytes(ref sset) => sset.approximate_size(),
Memmem(ref single) => single.approximate_size(),
AC { ref ac, .. } => ac.heap_bytes(),
Packed { ref s, .. } => s.heap_bytes(),
}
}
}
impl Matcher {
fn prefixes(lits: &Literals) -> Self {
let sset = SingleByteSet::prefixes(lits);
Matcher::new(lits, sset)
}
fn suffixes(lits: &Literals) -> Self {
let sset = SingleByteSet::suffixes(lits);
Matcher::new(lits, sset)
}
fn new(lits: &Literals, sset: SingleByteSet) -> Self {
if lits.literals().is_empty() {
return Matcher::Empty;
}
if sset.dense.len() >= 26 {
// Avoid trying to match a large number of single bytes.
// This is *very* sensitive to a frequency analysis comparison
// between the bytes in sset and the composition of the haystack.
// No matter the size of sset, if its members all are rare in the
// haystack, then it'd be worth using it. How to tune this... IDK.
// ---AG
return Matcher::Empty;
}
if sset.complete {
return Matcher::Bytes(sset);
}
if lits.literals().len() == 1 {
return Matcher::Memmem(Memmem::new(&lits.literals()[0]));
}
let pats = lits.literals().to_owned();
let is_aho_corasick_fast = sset.dense.len() <= 1 && sset.all_ascii;
if lits.literals().len() <= 100 && !is_aho_corasick_fast {
let mut builder = packed::Config::new()
.match_kind(packed::MatchKind::LeftmostFirst)
.builder();
if let Some(s) = builder.extend(&pats).build() {
return Matcher::Packed { s, lits: pats };
}
}
let ac = AhoCorasickBuilder::new()
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
.dfa(true)
.build_with_size::<u32, _, _>(&pats)
.unwrap();
Matcher::AC { ac, lits: pats }
}
}
#[derive(Debug)]
pub enum LiteralIter<'a> {
Empty,
Bytes(&'a [u8]),
Single(&'a [u8]),
AC(&'a [Literal]),
Packed(&'a [Literal]),
}
impl<'a> Iterator for LiteralIter<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<Self::Item> {
match *self {
LiteralIter::Empty => None,
LiteralIter::Bytes(ref mut many) => {
if many.is_empty() {
None
} else {
let next = &many[0..1];
*many = &many[1..];
Some(next)
}
}
LiteralIter::Single(ref mut one) => {
if one.is_empty() {
None
} else {
let next = &one[..];
*one = &[];
Some(next)
}
}
LiteralIter::AC(ref mut lits) => {
if lits.is_empty() {
None
} else {
let next = &lits[0];
*lits = &lits[1..];
Some(&**next)
}
}
LiteralIter::Packed(ref mut lits) => {
if lits.is_empty() {
None
} else {
let next = &lits[0];
*lits = &lits[1..];
Some(&**next)
}
}
}
}
}
#[derive(Clone, Debug)]
struct SingleByteSet {
sparse: Vec<bool>,
dense: Vec<u8>,
complete: bool,
all_ascii: bool,
}
impl SingleByteSet {
fn new() -> SingleByteSet {
SingleByteSet {
sparse: vec![false; 256],
dense: vec![],
complete: true,
all_ascii: true,
}
}
fn prefixes(lits: &Literals) -> SingleByteSet {
let mut sset = SingleByteSet::new();
for lit in lits.literals() {
sset.complete = sset.complete && lit.len() == 1;
if let Some(&b) = lit.get(0) {
if !sset.sparse[b as usize] {
if b > 0x7F {
sset.all_ascii = false;
}
sset.dense.push(b);
sset.sparse[b as usize] = true;
}
}
}
sset
}
fn suffixes(lits: &Literals) -> SingleByteSet {
let mut sset = SingleByteSet::new();
for lit in lits.literals() {
sset.complete = sset.complete && lit.len() == 1;
if let Some(&b) = lit.get(lit.len().checked_sub(1).unwrap()) {
if !sset.sparse[b as usize] {
if b > 0x7F {
sset.all_ascii = false;
}
sset.dense.push(b);
sset.sparse[b as usize] = true;
}
}
}
sset
}
/// Faster find that special cases certain sizes to use memchr.
#[cfg_attr(feature = "perf-inline", inline(always))]
fn find(&self, text: &[u8]) -> Option<usize> {
match self.dense.len() {
0 => None,
1 => memchr(self.dense[0], text),
2 => memchr2(self.dense[0], self.dense[1], text),
3 => memchr3(self.dense[0], self.dense[1], self.dense[2], text),
_ => self._find(text),
}
}
/// Generic find that works on any sized set.
fn _find(&self, haystack: &[u8]) -> Option<usize> {
for (i, &b) in haystack.iter().enumerate() {
if self.sparse[b as usize] {
return Some(i);
}
}
None
}
fn approximate_size(&self) -> usize {
(self.dense.len() * mem::size_of::<u8>())
+ (self.sparse.len() * mem::size_of::<bool>())
}
}
/// A simple wrapper around the memchr crate's memmem implementation.
///
/// The API this exposes mirrors the API of previous substring searchers that
/// this supplanted.
#[derive(Clone, Debug)]
pub struct Memmem {
finder: memmem::Finder<'static>,
char_len: usize,
}
impl Memmem {
fn new(pat: &[u8]) -> Memmem {
Memmem {
finder: memmem::Finder::new(pat).into_owned(),
char_len: char_len_lossy(pat),
}
}
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn find(&self, haystack: &[u8]) -> Option<usize> {
self.finder.find(haystack)
}
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn is_suffix(&self, text: &[u8]) -> bool {
if text.len() < self.len() {
return false;
}
&text[text.len() - self.len()..] == self.finder.needle()
}
pub fn len(&self) -> usize {
self.finder.needle().len()
}
pub fn char_len(&self) -> usize {
self.char_len
}
fn approximate_size(&self) -> usize {
self.finder.needle().len() * mem::size_of::<u8>()
}
}
fn char_len_lossy(bytes: &[u8]) -> usize {
String::from_utf8_lossy(bytes).chars().count()
}
| find_start | identifier_name |
imp.rs | use std::mem;
use aho_corasick::{self, packed, AhoCorasick, AhoCorasickBuilder};
use memchr::{memchr, memchr2, memchr3, memmem};
use regex_syntax::hir::literal::{Literal, Literals};
/// A prefix extracted from a compiled regular expression.
///
/// A regex prefix is a set of literal strings that *must* be matched at the
/// beginning of a regex in order for the entire regex to match. Similarly
/// for a regex suffix.
#[derive(Clone, Debug)]
pub struct LiteralSearcher {
complete: bool,
lcp: Memmem,
lcs: Memmem,
matcher: Matcher,
}
#[derive(Clone, Debug)]
enum Matcher {
/// No literals. (Never advances through the input.)
Empty,
/// A set of four or more single byte literals.
Bytes(SingleByteSet),
/// A single substring, using vector accelerated routines when available.
Memmem(Memmem),
/// An Aho-Corasick automaton.
AC { ac: AhoCorasick<u32>, lits: Vec<Literal> },
/// A packed multiple substring searcher, using SIMD.
///
/// Note that Aho-Corasick will actually use this packed searcher
/// internally automatically, however, there is some overhead associated
/// with going through the Aho-Corasick machinery. So using the packed
/// searcher directly results in some gains.
Packed { s: packed::Searcher, lits: Vec<Literal> },
}
impl LiteralSearcher {
/// Returns a matcher that never matches and never advances the input.
pub fn empty() -> Self {
Self::new(Literals::empty(), Matcher::Empty)
}
/// Returns a matcher for literal prefixes from the given set.
pub fn prefixes(lits: Literals) -> Self {
let matcher = Matcher::prefixes(&lits);
Self::new(lits, matcher)
}
/// Returns a matcher for literal suffixes from the given set.
pub fn suffixes(lits: Literals) -> Self {
let matcher = Matcher::suffixes(&lits);
Self::new(lits, matcher)
}
fn new(lits: Literals, matcher: Matcher) -> Self {
let complete = lits.all_complete();
LiteralSearcher {
complete,
lcp: Memmem::new(lits.longest_common_prefix()),
lcs: Memmem::new(lits.longest_common_suffix()),
matcher,
}
}
/// Returns true if all matches comprise the entire regular expression.
///
/// This does not necessarily mean that a literal match implies a match
/// of the regular expression. For example, the regular expression `^a`
/// is comprised of a single complete literal `a`, but the regular
/// expression demands that it only match at the beginning of a string.
pub fn complete(&self) -> bool {
self.complete && !self.is_empty()
}
/// Find the position of a literal in `haystack` if it exists.
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn find(&self, haystack: &[u8]) -> Option<(usize, usize)> {
use self::Matcher::*;
match self.matcher {
Empty => Some((0, 0)),
Bytes(ref sset) => sset.find(haystack).map(|i| (i, i + 1)),
Memmem(ref s) => s.find(haystack).map(|i| (i, i + s.len())),
AC { ref ac, .. } => {
ac.find(haystack).map(|m| (m.start(), m.end()))
}
Packed { ref s, .. } => {
s.find(haystack).map(|m| (m.start(), m.end()))
}
}
}
/// Like find, except matches must start at index `0`.
pub fn find_start(&self, haystack: &[u8]) -> Option<(usize, usize)> {
for lit in self.iter() {
if lit.len() > haystack.len() {
continue;
}
if lit == &haystack[0..lit.len()] {
return Some((0, lit.len()));
}
}
None
}
/// Like find, except matches must end at index `haystack.len()`.
pub fn find_end(&self, haystack: &[u8]) -> Option<(usize, usize)> {
for lit in self.iter() {
if lit.len() > haystack.len() {
continue;
}
if lit == &haystack[haystack.len() - lit.len()..] {
return Some((haystack.len() - lit.len(), haystack.len()));
}
}
None
}
/// Returns an iterator over all literals to be matched.
pub fn iter(&self) -> LiteralIter<'_> {
match self.matcher {
Matcher::Empty => LiteralIter::Empty,
Matcher::Bytes(ref sset) => LiteralIter::Bytes(&sset.dense),
Matcher::Memmem(ref s) => LiteralIter::Single(&s.finder.needle()),
Matcher::AC { ref lits, .. } => LiteralIter::AC(lits),
Matcher::Packed { ref lits, .. } => LiteralIter::Packed(lits),
}
}
/// Returns a matcher for the longest common prefix of this matcher.
pub fn lcp(&self) -> &Memmem {
&self.lcp
}
/// Returns a matcher for the longest common suffix of this matcher.
pub fn lcs(&self) -> &Memmem {
&self.lcs
}
/// Returns true iff this prefix is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of prefixes in this machine.
pub fn len(&self) -> usize {
use self::Matcher::*;
match self.matcher {
Empty => 0,
Bytes(ref sset) => sset.dense.len(),
Memmem(_) => 1,
AC { ref ac, .. } => ac.pattern_count(),
Packed { ref lits, .. } => lits.len(),
}
}
/// Return the approximate heap usage of literals in bytes.
pub fn approximate_size(&self) -> usize {
use self::Matcher::*;
match self.matcher {
Empty => 0,
Bytes(ref sset) => sset.approximate_size(),
Memmem(ref single) => single.approximate_size(),
AC { ref ac, .. } => ac.heap_bytes(),
Packed { ref s, .. } => s.heap_bytes(),
}
}
}
impl Matcher {
fn prefixes(lits: &Literals) -> Self {
let sset = SingleByteSet::prefixes(lits);
Matcher::new(lits, sset)
}
fn suffixes(lits: &Literals) -> Self {
let sset = SingleByteSet::suffixes(lits);
Matcher::new(lits, sset)
}
fn new(lits: &Literals, sset: SingleByteSet) -> Self {
if lits.literals().is_empty() {
return Matcher::Empty;
}
if sset.dense.len() >= 26 {
// Avoid trying to match a large number of single bytes.
// This is *very* sensitive to a frequency analysis comparison
// between the bytes in sset and the composition of the haystack.
// No matter the size of sset, if its members all are rare in the
// haystack, then it'd be worth using it. How to tune this... IDK.
// ---AG
return Matcher::Empty;
}
if sset.complete {
return Matcher::Bytes(sset);
}
if lits.literals().len() == 1 {
return Matcher::Memmem(Memmem::new(&lits.literals()[0]));
}
let pats = lits.literals().to_owned();
let is_aho_corasick_fast = sset.dense.len() <= 1 && sset.all_ascii;
if lits.literals().len() <= 100 && !is_aho_corasick_fast {
let mut builder = packed::Config::new()
.match_kind(packed::MatchKind::LeftmostFirst)
.builder();
if let Some(s) = builder.extend(&pats).build() {
return Matcher::Packed { s, lits: pats };
}
}
let ac = AhoCorasickBuilder::new()
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
.dfa(true)
.build_with_size::<u32, _, _>(&pats)
.unwrap();
Matcher::AC { ac, lits: pats }
}
}
#[derive(Debug)]
pub enum LiteralIter<'a> {
Empty,
Bytes(&'a [u8]),
Single(&'a [u8]),
AC(&'a [Literal]),
Packed(&'a [Literal]),
}
impl<'a> Iterator for LiteralIter<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<Self::Item> {
match *self {
LiteralIter::Empty => None,
LiteralIter::Bytes(ref mut many) => {
if many.is_empty() {
None
} else {
let next = &many[0..1];
*many = &many[1..];
Some(next)
}
}
LiteralIter::Single(ref mut one) => {
if one.is_empty() {
None
} else {
let next = &one[..];
*one = &[];
Some(next)
}
}
LiteralIter::AC(ref mut lits) => {
if lits.is_empty() {
None
} else {
let next = &lits[0];
*lits = &lits[1..];
Some(&**next)
}
}
LiteralIter::Packed(ref mut lits) => {
if lits.is_empty() {
None
} else {
let next = &lits[0];
*lits = &lits[1..];
Some(&**next)
}
}
}
}
}
#[derive(Clone, Debug)]
struct SingleByteSet {
sparse: Vec<bool>,
dense: Vec<u8>,
complete: bool,
all_ascii: bool,
}
impl SingleByteSet {
fn new() -> SingleByteSet {
SingleByteSet {
sparse: vec![false; 256],
dense: vec![], | }
}
fn prefixes(lits: &Literals) -> SingleByteSet {
let mut sset = SingleByteSet::new();
for lit in lits.literals() {
sset.complete = sset.complete && lit.len() == 1;
if let Some(&b) = lit.get(0) {
if !sset.sparse[b as usize] {
if b > 0x7F {
sset.all_ascii = false;
}
sset.dense.push(b);
sset.sparse[b as usize] = true;
}
}
}
sset
}
fn suffixes(lits: &Literals) -> SingleByteSet {
let mut sset = SingleByteSet::new();
for lit in lits.literals() {
sset.complete = sset.complete && lit.len() == 1;
if let Some(&b) = lit.get(lit.len().checked_sub(1).unwrap()) {
if !sset.sparse[b as usize] {
if b > 0x7F {
sset.all_ascii = false;
}
sset.dense.push(b);
sset.sparse[b as usize] = true;
}
}
}
sset
}
/// Faster find that special cases certain sizes to use memchr.
#[cfg_attr(feature = "perf-inline", inline(always))]
fn find(&self, text: &[u8]) -> Option<usize> {
match self.dense.len() {
0 => None,
1 => memchr(self.dense[0], text),
2 => memchr2(self.dense[0], self.dense[1], text),
3 => memchr3(self.dense[0], self.dense[1], self.dense[2], text),
_ => self._find(text),
}
}
/// Generic find that works on any sized set.
fn _find(&self, haystack: &[u8]) -> Option<usize> {
for (i, &b) in haystack.iter().enumerate() {
if self.sparse[b as usize] {
return Some(i);
}
}
None
}
fn approximate_size(&self) -> usize {
(self.dense.len() * mem::size_of::<u8>())
+ (self.sparse.len() * mem::size_of::<bool>())
}
}
/// A simple wrapper around the memchr crate's memmem implementation.
///
/// The API this exposes mirrors the API of previous substring searchers that
/// this supplanted.
#[derive(Clone, Debug)]
pub struct Memmem {
finder: memmem::Finder<'static>,
char_len: usize,
}
impl Memmem {
fn new(pat: &[u8]) -> Memmem {
Memmem {
finder: memmem::Finder::new(pat).into_owned(),
char_len: char_len_lossy(pat),
}
}
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn find(&self, haystack: &[u8]) -> Option<usize> {
self.finder.find(haystack)
}
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn is_suffix(&self, text: &[u8]) -> bool {
if text.len() < self.len() {
return false;
}
&text[text.len() - self.len()..] == self.finder.needle()
}
pub fn len(&self) -> usize {
self.finder.needle().len()
}
pub fn char_len(&self) -> usize {
self.char_len
}
fn approximate_size(&self) -> usize {
self.finder.needle().len() * mem::size_of::<u8>()
}
}
fn char_len_lossy(bytes: &[u8]) -> usize {
String::from_utf8_lossy(bytes).chars().count()
} | complete: true,
all_ascii: true, | random_line_split |
imp.rs | use std::mem;
use aho_corasick::{self, packed, AhoCorasick, AhoCorasickBuilder};
use memchr::{memchr, memchr2, memchr3, memmem};
use regex_syntax::hir::literal::{Literal, Literals};
/// A prefix extracted from a compiled regular expression.
///
/// A regex prefix is a set of literal strings that *must* be matched at the
/// beginning of a regex in order for the entire regex to match. Similarly
/// for a regex suffix.
#[derive(Clone, Debug)]
pub struct LiteralSearcher {
complete: bool,
lcp: Memmem,
lcs: Memmem,
matcher: Matcher,
}
#[derive(Clone, Debug)]
enum Matcher {
/// No literals. (Never advances through the input.)
Empty,
/// A set of four or more single byte literals.
Bytes(SingleByteSet),
/// A single substring, using vector accelerated routines when available.
Memmem(Memmem),
/// An Aho-Corasick automaton.
AC { ac: AhoCorasick<u32>, lits: Vec<Literal> },
/// A packed multiple substring searcher, using SIMD.
///
/// Note that Aho-Corasick will actually use this packed searcher
/// internally automatically, however, there is some overhead associated
/// with going through the Aho-Corasick machinery. So using the packed
/// searcher directly results in some gains.
Packed { s: packed::Searcher, lits: Vec<Literal> },
}
impl LiteralSearcher {
/// Returns a matcher that never matches and never advances the input.
pub fn empty() -> Self {
Self::new(Literals::empty(), Matcher::Empty)
}
/// Returns a matcher for literal prefixes from the given set.
pub fn prefixes(lits: Literals) -> Self {
let matcher = Matcher::prefixes(&lits);
Self::new(lits, matcher)
}
/// Returns a matcher for literal suffixes from the given set.
pub fn suffixes(lits: Literals) -> Self {
let matcher = Matcher::suffixes(&lits);
Self::new(lits, matcher)
}
fn new(lits: Literals, matcher: Matcher) -> Self {
let complete = lits.all_complete();
LiteralSearcher {
complete,
lcp: Memmem::new(lits.longest_common_prefix()),
lcs: Memmem::new(lits.longest_common_suffix()),
matcher,
}
}
/// Returns true if all matches comprise the entire regular expression.
///
/// This does not necessarily mean that a literal match implies a match
/// of the regular expression. For example, the regular expression `^a`
/// is comprised of a single complete literal `a`, but the regular
/// expression demands that it only match at the beginning of a string.
pub fn complete(&self) -> bool {
self.complete && !self.is_empty()
}
/// Find the position of a literal in `haystack` if it exists.
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn find(&self, haystack: &[u8]) -> Option<(usize, usize)> {
use self::Matcher::*;
match self.matcher {
Empty => Some((0, 0)),
Bytes(ref sset) => sset.find(haystack).map(|i| (i, i + 1)),
Memmem(ref s) => s.find(haystack).map(|i| (i, i + s.len())),
AC { ref ac, .. } => {
ac.find(haystack).map(|m| (m.start(), m.end()))
}
Packed { ref s, .. } => {
s.find(haystack).map(|m| (m.start(), m.end()))
}
}
}
/// Like find, except matches must start at index `0`.
pub fn find_start(&self, haystack: &[u8]) -> Option<(usize, usize)> {
for lit in self.iter() {
if lit.len() > haystack.len() {
continue;
}
if lit == &haystack[0..lit.len()] {
return Some((0, lit.len()));
}
}
None
}
/// Like find, except matches must end at index `haystack.len()`.
pub fn find_end(&self, haystack: &[u8]) -> Option<(usize, usize)> {
for lit in self.iter() {
if lit.len() > haystack.len() {
continue;
}
if lit == &haystack[haystack.len() - lit.len()..] {
return Some((haystack.len() - lit.len(), haystack.len()));
}
}
None
}
/// Returns an iterator over all literals to be matched.
pub fn iter(&self) -> LiteralIter<'_> {
match self.matcher {
Matcher::Empty => LiteralIter::Empty,
Matcher::Bytes(ref sset) => LiteralIter::Bytes(&sset.dense),
Matcher::Memmem(ref s) => LiteralIter::Single(&s.finder.needle()),
Matcher::AC { ref lits, .. } => LiteralIter::AC(lits),
Matcher::Packed { ref lits, .. } => LiteralIter::Packed(lits),
}
}
/// Returns a matcher for the longest common prefix of this matcher.
pub fn lcp(&self) -> &Memmem {
&self.lcp
}
/// Returns a matcher for the longest common suffix of this matcher.
pub fn lcs(&self) -> &Memmem {
&self.lcs
}
/// Returns true iff this prefix is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of prefixes in this machine.
pub fn len(&self) -> usize {
use self::Matcher::*;
match self.matcher {
Empty => 0,
Bytes(ref sset) => sset.dense.len(),
Memmem(_) => 1,
AC { ref ac, .. } => ac.pattern_count(),
Packed { ref lits, .. } => lits.len(),
}
}
/// Return the approximate heap usage of literals in bytes.
pub fn approximate_size(&self) -> usize {
use self::Matcher::*;
match self.matcher {
Empty => 0,
Bytes(ref sset) => sset.approximate_size(),
Memmem(ref single) => single.approximate_size(),
AC { ref ac, .. } => ac.heap_bytes(),
Packed { ref s, .. } => s.heap_bytes(),
}
}
}
impl Matcher {
fn prefixes(lits: &Literals) -> Self {
let sset = SingleByteSet::prefixes(lits);
Matcher::new(lits, sset)
}
fn suffixes(lits: &Literals) -> Self {
let sset = SingleByteSet::suffixes(lits);
Matcher::new(lits, sset)
}
fn new(lits: &Literals, sset: SingleByteSet) -> Self {
if lits.literals().is_empty() {
return Matcher::Empty;
}
if sset.dense.len() >= 26 {
// Avoid trying to match a large number of single bytes.
// This is *very* sensitive to a frequency analysis comparison
// between the bytes in sset and the composition of the haystack.
// No matter the size of sset, if its members all are rare in the
// haystack, then it'd be worth using it. How to tune this... IDK.
// ---AG
return Matcher::Empty;
}
if sset.complete {
return Matcher::Bytes(sset);
}
if lits.literals().len() == 1 {
return Matcher::Memmem(Memmem::new(&lits.literals()[0]));
}
let pats = lits.literals().to_owned();
let is_aho_corasick_fast = sset.dense.len() <= 1 && sset.all_ascii;
if lits.literals().len() <= 100 && !is_aho_corasick_fast {
let mut builder = packed::Config::new()
.match_kind(packed::MatchKind::LeftmostFirst)
.builder();
if let Some(s) = builder.extend(&pats).build() {
return Matcher::Packed { s, lits: pats };
}
}
let ac = AhoCorasickBuilder::new()
.match_kind(aho_corasick::MatchKind::LeftmostFirst)
.dfa(true)
.build_with_size::<u32, _, _>(&pats)
.unwrap();
Matcher::AC { ac, lits: pats }
}
}
#[derive(Debug)]
pub enum LiteralIter<'a> {
Empty,
Bytes(&'a [u8]),
Single(&'a [u8]),
AC(&'a [Literal]),
Packed(&'a [Literal]),
}
impl<'a> Iterator for LiteralIter<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<Self::Item> {
match *self {
LiteralIter::Empty => None,
LiteralIter::Bytes(ref mut many) => {
if many.is_empty() {
None
} else {
let next = &many[0..1];
*many = &many[1..];
Some(next)
}
}
LiteralIter::Single(ref mut one) => {
if one.is_empty() | else {
let next = &one[..];
*one = &[];
Some(next)
}
}
LiteralIter::AC(ref mut lits) => {
if lits.is_empty() {
None
} else {
let next = &lits[0];
*lits = &lits[1..];
Some(&**next)
}
}
LiteralIter::Packed(ref mut lits) => {
if lits.is_empty() {
None
} else {
let next = &lits[0];
*lits = &lits[1..];
Some(&**next)
}
}
}
}
}
#[derive(Clone, Debug)]
struct SingleByteSet {
sparse: Vec<bool>,
dense: Vec<u8>,
complete: bool,
all_ascii: bool,
}
impl SingleByteSet {
fn new() -> SingleByteSet {
SingleByteSet {
sparse: vec![false; 256],
dense: vec![],
complete: true,
all_ascii: true,
}
}
fn prefixes(lits: &Literals) -> SingleByteSet {
let mut sset = SingleByteSet::new();
for lit in lits.literals() {
sset.complete = sset.complete && lit.len() == 1;
if let Some(&b) = lit.get(0) {
if !sset.sparse[b as usize] {
if b > 0x7F {
sset.all_ascii = false;
}
sset.dense.push(b);
sset.sparse[b as usize] = true;
}
}
}
sset
}
fn suffixes(lits: &Literals) -> SingleByteSet {
let mut sset = SingleByteSet::new();
for lit in lits.literals() {
sset.complete = sset.complete && lit.len() == 1;
if let Some(&b) = lit.get(lit.len().checked_sub(1).unwrap()) {
if !sset.sparse[b as usize] {
if b > 0x7F {
sset.all_ascii = false;
}
sset.dense.push(b);
sset.sparse[b as usize] = true;
}
}
}
sset
}
/// Faster find that special cases certain sizes to use memchr.
#[cfg_attr(feature = "perf-inline", inline(always))]
fn find(&self, text: &[u8]) -> Option<usize> {
match self.dense.len() {
0 => None,
1 => memchr(self.dense[0], text),
2 => memchr2(self.dense[0], self.dense[1], text),
3 => memchr3(self.dense[0], self.dense[1], self.dense[2], text),
_ => self._find(text),
}
}
/// Generic find that works on any sized set.
fn _find(&self, haystack: &[u8]) -> Option<usize> {
for (i, &b) in haystack.iter().enumerate() {
if self.sparse[b as usize] {
return Some(i);
}
}
None
}
fn approximate_size(&self) -> usize {
(self.dense.len() * mem::size_of::<u8>())
+ (self.sparse.len() * mem::size_of::<bool>())
}
}
/// A simple wrapper around the memchr crate's memmem implementation.
///
/// The API this exposes mirrors the API of previous substring searchers that
/// this supplanted.
#[derive(Clone, Debug)]
pub struct Memmem {
finder: memmem::Finder<'static>,
char_len: usize,
}
impl Memmem {
fn new(pat: &[u8]) -> Memmem {
Memmem {
finder: memmem::Finder::new(pat).into_owned(),
char_len: char_len_lossy(pat),
}
}
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn find(&self, haystack: &[u8]) -> Option<usize> {
self.finder.find(haystack)
}
#[cfg_attr(feature = "perf-inline", inline(always))]
pub fn is_suffix(&self, text: &[u8]) -> bool {
if text.len() < self.len() {
return false;
}
&text[text.len() - self.len()..] == self.finder.needle()
}
pub fn len(&self) -> usize {
self.finder.needle().len()
}
pub fn char_len(&self) -> usize {
self.char_len
}
fn approximate_size(&self) -> usize {
self.finder.needle().len() * mem::size_of::<u8>()
}
}
fn char_len_lossy(bytes: &[u8]) -> usize {
String::from_utf8_lossy(bytes).chars().count()
}
| {
None
} | conditional_block |
GetOutput.py | from Actor import *
import common
import string, os, time
from crab_util import *
class GetOutput(Actor):
def __init__(self, *args):
self.cfg_params = args[0]
self.jobs = args[1]
self.log=0
dir = os.getcwd()+'/'
self.outDir = self.cfg_params.get('USER.outputdir' ,common.work_space.resDir())
if ( self.outDir[-1] != '/' ) : self.outDir = self.outDir + '/'
if ( self.outDir[0] != '/') : self.outDir = dir + self.outDir
self.logDir = self.cfg_params.get('USER.logdir' ,common.work_space.resDir())
if ( self.logDir[-1] != '/' ) : self.logDir = self.logDir + '/'
if ( self.logDir[0] != '/') : self.logDir = dir +self.logDir
if self.logDir != self.outDir:
self.log=1
self.return_data = self.cfg_params.get('USER.return_data',0)
self.dontCheckSpaceLeft = int(self.cfg_params.get('USER.dontCheckSpaceLeft' ,0))
return
def run(self):
"""
The main method of the class: Check destination dirs and
perform the get output
"""
common.logger.debug( "GetOutput::run() called")
start = time.time()
self.getOutput()
stop = time.time()
common.logger.debug( "GetOutput Time: "+str(stop - start))
pass
def checkBeforeGet(self):
# should be in this way... but a core dump appear... waiting for solution
#self.up_task = common.scheduler.queryEverything(1)
self.up_task = common._db.getTask()
list_id_done=[]
list_id_done_not_term=[]
self.list_id=[]
self.all_id =[]
for job in self.up_task.jobs:
if (job.runningJob['state'] == 'Terminated'):
list_id_done.append(job['jobId'])
elif job.runningJob['status'] in ['Done', 'Done (Failed)']:
list_id_done_not_term.append(job['jobId'])
self.all_id.append(job['jobId'])
check = -1
if self.jobs != 'all': check = len( set(self.jobs).intersection(set(list_id_done)) )
if len(list_id_done)==0 or ( check == 0 ) :
msg=''
list_jobs=self.jobs
if self.jobs == 'all': list_jobs=self.all_id
msg += 'Jobs %s are not in Done status. It is not possible yet to retrieve the output. \n'% readableList(self,list_jobs)
if len(list_id_done) > 0:
msg += ' Retrieve the jobs if those are in Done status and Terminatedi action. \n'
msg += ' To know the action of a job run: "crab -status v " \n'
raise CrabException(msg)
else:
if self.jobs == 'all':
self.list_id= list_id_done
if len(self.up_task.jobs)>len(self.list_id):
msg = 'Only %d jobs will be retrieved '% (len(self.list_id))
msg += ' from %d requested.\n'%(len(self.up_task.jobs))
msg += '\t(for details: crab -status)'
common.logger.info(msg)
else:
for id in self.jobs:
if id in list_id_done: self.list_id.append(id)
if len(self.jobs) > len(self.list_id):
msg = 'Only %d jobs will be retrieved '% (len(self.list_id))
msg += ' from %d requested.\n'%(len(self.jobs))
msg += '\t(for details: crab -status)'
common.logger.info(msg)
if not os.path.isdir(self.logDir) or not os.path.isdir(self.outDir):
msg = ' Output or Log dir not found!! check '+self.logDir+' and '+self.outDir
raise CrabException(msg)
return
def getOutput(self):
"""
Get output for a finished job with id.
"""
self.checkBeforeGet()
# Get first job of the list
if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, 10*1024): # First check for more than 10 Mb
msg = "You have LESS than 10 MB of free space on your working dir\n"
msg +="Please make some room before retrying\n\n"
msg +="To bypass this check, run \n"
msg +="crab -get -USER.dontCheckSpaceLeft=1 \n"
raise CrabException(msg)
list_first=self.list_id[0:1]
task= common.scheduler.getOutput(1, list_first, self.outDir)
lastSize = self.organizeOutput( task, list_first )
# here check disk space for first job
if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, lastSize*len(self.list_id)*1.2) : # add a 20% overhead
msg = "Estimated space needed for getOutput is "+str(lastSize*len(self.list_id)*1.2)
msg +=" which is LESS than available space on disk\n"
msg +="Please make some room before retrying\n"
msg +="To bypass this check, run \n"
msg +="crab -get -USER.dontCheckSpaceLeft=1 \n"
raise CrabException(msg)
# get the size of the actual OSB of first job
if (len(self.list_id)>1) :
# check disk space for other N jobs using estimate from the first
list_other=self.list_id[1:]
task= common.scheduler.getOutput(1, list_other, self.outDir)
self.organizeOutput( task, list_other )
return
def organizeOutput(self, task, list_id):
|
def parseFinalReport(self, input):
"""
Parses the FJR produced by job in order to retrieve
the WrapperExitCode and ExeExitCode.
Updates the BossDB with these values.
"""
from ProdCommon.FwkJobRep.ReportParser import readJobReport
codeValue = {}
jreports = readJobReport(input)
if len(jreports) <= 0 :
codeValue["applicationReturnCode"] = str(50115)
codeValue["wrapperReturnCode"] = str(50115)
common.logger.debug("Empty FWkobreport: error code assigned is 50115 ")
return codeValue
jobReport = jreports[0]
exit_status = ''
##### temporary fix for FJR incomplete ####
fjr = open (input)
len_fjr = len(fjr.readlines())
if (len_fjr <= 6):
### 50115 - cmsRun did not produce a valid/readable job report at runtime
codeValue["applicationReturnCode"] = str(50115)
codeValue["wrapperReturnCode"] = str(50115)
if len(jobReport.errors) != 0 :
for error in jobReport.errors:
if error['Type'] == 'WrapperExitCode':
codeValue["wrapperReturnCode"] = error['ExitStatus']
elif error['Type'] == 'ExeExitCode':
codeValue["applicationReturnCode"] = error['ExitStatus']
if error['Type'] == 'CMSException':
codeValue["applicationReturnCodeOrig"] = error['ExitStatus']
else:
continue
if not codeValue.has_key('wrapperReturnCode'):
codeValue["wrapperReturnCode"] = ''
if not codeValue.has_key('applicationReturnCode'):
if codeValue.has_key('applicationReturnCodeOrig'):
codeValue["applicationReturnCode"] = \
codeValue["applicationReturnCodeOrig"]
codeValue.pop("applicationReturnCodeOrig")
else:
codeValue["applicationReturnCode"] = ''
else:
if codeValue.has_key('applicationReturnCodeOrig'):
codeValue.pop("applicationReturnCodeOrig")
#### Filling BOSS DB with SE name and LFN, for edm and not_edm files ####
lfns=[]
pfns=[]
if (len(jobReport.files) != 0):
for f in jobReport.files:
if f['LFN']:
lfns.append(f['LFN'])
if f['PFN']:
#### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr )
if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308:
pfns.append(os.path.dirname(f['SurlForGrid'])+'/')
else:
pfns.append(os.path.dirname(f['PFN'])+'/')
##########
if (len(jobReport.analysisFiles) != 0):
for aFile in jobReport.analysisFiles:
if aFile['LFN']:
lfns.append(aFile['LFN'])
if aFile['PFN']:
#### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr )
if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308:
pfns.append(os.path.dirname(aFile['SurlForGrid'])+'/')
else:
pfns.append(os.path.dirname(aFile['PFN'])+'/')
#########
codeValue["storage"] = pfns
codeValue["lfn"] = lfns
return codeValue
def moveOutput(self,id, max_id,path,file):
"""
Move output of job already retrieved
into the correct backup directory
"""
Dir_Base=path +'Submission_'
for i in range(1, max_id):
if not os.path.isdir( Dir_Base + str(i) + '/'):
cmd=('mkdir '+ Dir_Base + str(i) + '/ >& /dev/null')
cmd_out = runCommand(cmd)
common.logger.debug(str(cmd_out))
cmd='mv '+ path + file + ' ' + Dir_Base + str(max_id -1) + '/ >& /dev/null'
try:
cmd_out = runCommand(cmd)
common.logger.debug(cmd_out)
except:
msg = 'no output to move for job '+str(id)
common.logger.debug(msg)
pass
return
| """
Untar Output
"""
listCode = []
job_id = []
success_ret = 0
size = 0 # in kB
for id in list_id:
runningJob = task.getJob( id ).runningJob
if runningJob.isError() :
continue
file = 'out_files_'+ str(id)+'.tgz'
if os.path.exists(self.outDir + file):
self.max_id = runningJob['submission']
if self.max_id > 1:
for f in os.listdir(self.outDir):
if (f.find('_'+str(id)+'.') != -1 ) and (f != file) and f.find('Submission_'+str(id)) == -1:
self.moveOutput(id, self.max_id, self.outDir, f)
if self.log==1:
for f in os.listdir(self.logDir):
if f.find('_'+str(id)+'.') != -1 and f.find('Submission_'+str(id)) == -1:
self.moveOutput(id, self.max_id, self.logDir, f)
pass
pass
pass
try:
size = getGZSize(self.outDir + file)/1024 # in kB
cmd = 'tar zxf ' + self.outDir + file + ' ' + '-C ' + self.outDir
cmd_out = runCommand(cmd)
cmd_2 ='rm ' + self.outDir + 'out_files_'+ str(id)+'.tgz'
cmd_out2 = runCommand(cmd_2)
msg = 'Results of Jobs # '+str(id)+' are in '+self.outDir
common.logger.info(msg)
except IOError, eio:
common.logger.info("Output files for job "+ str(id) +" seems corrupted.\n")
continue
else:
msg ="Output files for job "+ str(id) +" not available.\n"
common.logger.info(msg)
FieldToUpdate={}
FieldToUpdate['state']= 'Cleared'
FieldToUpdate["applicationReturnCode"] = str(50700)
FieldToUpdate["wrapperReturnCode"] = str(50700)
job_id.append(id)
listCode.append(FieldToUpdate)
#continue
input = 'crab_fjr_' + str(id) + '.xml'
if os.path.exists(self.outDir + input):
FiledToUpdate = self.parseFinalReport(self.outDir + input)
FiledToUpdate['state']= 'Cleared'
job_id.append(id)
listCode.append(FiledToUpdate)
else:
msg = "Problems with "+str(input)+". File not available.\n"
common.logger.info(msg)
success_ret +=1
common._db.updateRunJob_(job_id , listCode)
if self.logDir != self.outDir:
for i_id in list_id:
try:
cmd = 'mv '+str(self.outDir)+'/*'+str(i_id)+'.std* '+str(self.outDir)+'/.BrokerInfo '+str(self.logDir)
cmd_out =os.system(cmd)
except:
msg = 'Problem with copy of job results'
common.logger.info(msg)
msg = 'Results of Jobs # '+str(list_id)+' are in '+self.outDir+' (log files are in '+self.logDir+')'
common.logger.info(msg)
return size | identifier_body |
GetOutput.py | from Actor import *
import common
import string, os, time
from crab_util import *
class GetOutput(Actor):
def __init__(self, *args):
self.cfg_params = args[0]
self.jobs = args[1]
self.log=0
dir = os.getcwd()+'/'
self.outDir = self.cfg_params.get('USER.outputdir' ,common.work_space.resDir())
if ( self.outDir[-1] != '/' ) : self.outDir = self.outDir + '/'
if ( self.outDir[0] != '/') : self.outDir = dir + self.outDir
self.logDir = self.cfg_params.get('USER.logdir' ,common.work_space.resDir())
if ( self.logDir[-1] != '/' ) : self.logDir = self.logDir + '/'
if ( self.logDir[0] != '/') : self.logDir = dir +self.logDir
if self.logDir != self.outDir:
self.log=1
self.return_data = self.cfg_params.get('USER.return_data',0)
self.dontCheckSpaceLeft = int(self.cfg_params.get('USER.dontCheckSpaceLeft' ,0))
return
def run(self):
"""
The main method of the class: Check destination dirs and
perform the get output
"""
common.logger.debug( "GetOutput::run() called")
start = time.time()
self.getOutput()
stop = time.time()
common.logger.debug( "GetOutput Time: "+str(stop - start))
pass
def checkBeforeGet(self):
# should be in this way... but a core dump appear... waiting for solution
#self.up_task = common.scheduler.queryEverything(1)
self.up_task = common._db.getTask()
list_id_done=[]
list_id_done_not_term=[]
self.list_id=[]
self.all_id =[]
for job in self.up_task.jobs:
if (job.runningJob['state'] == 'Terminated'):
list_id_done.append(job['jobId'])
elif job.runningJob['status'] in ['Done', 'Done (Failed)']:
list_id_done_not_term.append(job['jobId'])
self.all_id.append(job['jobId'])
check = -1
if self.jobs != 'all': check = len( set(self.jobs).intersection(set(list_id_done)) )
if len(list_id_done)==0 or ( check == 0 ) :
msg=''
list_jobs=self.jobs
if self.jobs == 'all': list_jobs=self.all_id
msg += 'Jobs %s are not in Done status. It is not possible yet to retrieve the output. \n'% readableList(self,list_jobs)
if len(list_id_done) > 0:
msg += ' Retrieve the jobs if those are in Done status and Terminatedi action. \n'
msg += ' To know the action of a job run: "crab -status v " \n'
raise CrabException(msg)
else:
if self.jobs == 'all':
self.list_id= list_id_done
if len(self.up_task.jobs)>len(self.list_id):
msg = 'Only %d jobs will be retrieved '% (len(self.list_id))
msg += ' from %d requested.\n'%(len(self.up_task.jobs))
msg += '\t(for details: crab -status)'
common.logger.info(msg)
else:
for id in self.jobs:
if id in list_id_done: self.list_id.append(id)
if len(self.jobs) > len(self.list_id):
msg = 'Only %d jobs will be retrieved '% (len(self.list_id))
msg += ' from %d requested.\n'%(len(self.jobs))
msg += '\t(for details: crab -status)'
common.logger.info(msg)
if not os.path.isdir(self.logDir) or not os.path.isdir(self.outDir):
msg = ' Output or Log dir not found!! check '+self.logDir+' and '+self.outDir
raise CrabException(msg)
return
def getOutput(self):
"""
Get output for a finished job with id.
"""
self.checkBeforeGet()
# Get first job of the list
if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, 10*1024): # First check for more than 10 Mb
msg = "You have LESS than 10 MB of free space on your working dir\n"
msg +="Please make some room before retrying\n\n"
msg +="To bypass this check, run \n"
msg +="crab -get -USER.dontCheckSpaceLeft=1 \n"
raise CrabException(msg)
list_first=self.list_id[0:1]
task= common.scheduler.getOutput(1, list_first, self.outDir)
lastSize = self.organizeOutput( task, list_first )
# here check disk space for first job
if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, lastSize*len(self.list_id)*1.2) : # add a 20% overhead
msg = "Estimated space needed for getOutput is "+str(lastSize*len(self.list_id)*1.2)
msg +=" which is LESS than available space on disk\n"
msg +="Please make some room before retrying\n"
msg +="To bypass this check, run \n"
msg +="crab -get -USER.dontCheckSpaceLeft=1 \n"
raise CrabException(msg)
# get the size of the actual OSB of first job
if (len(self.list_id)>1) :
# check disk space for other N jobs using estimate from the first
list_other=self.list_id[1:]
task= common.scheduler.getOutput(1, list_other, self.outDir)
self.organizeOutput( task, list_other )
return
def organizeOutput(self, task, list_id):
"""
Untar Output
"""
listCode = []
job_id = []
success_ret = 0
size = 0 # in kB
for id in list_id:
runningJob = task.getJob( id ).runningJob
if runningJob.isError() :
continue
file = 'out_files_'+ str(id)+'.tgz'
if os.path.exists(self.outDir + file):
self.max_id = runningJob['submission']
if self.max_id > 1:
for f in os.listdir(self.outDir):
if (f.find('_'+str(id)+'.') != -1 ) and (f != file) and f.find('Submission_'+str(id)) == -1:
self.moveOutput(id, self.max_id, self.outDir, f)
if self.log==1:
for f in os.listdir(self.logDir):
if f.find('_'+str(id)+'.') != -1 and f.find('Submission_'+str(id)) == -1:
self.moveOutput(id, self.max_id, self.logDir, f)
pass
pass
pass
try:
size = getGZSize(self.outDir + file)/1024 # in kB
cmd = 'tar zxf ' + self.outDir + file + ' ' + '-C ' + self.outDir
cmd_out = runCommand(cmd)
cmd_2 ='rm ' + self.outDir + 'out_files_'+ str(id)+'.tgz'
cmd_out2 = runCommand(cmd_2)
msg = 'Results of Jobs # '+str(id)+' are in '+self.outDir
common.logger.info(msg)
except IOError, eio:
common.logger.info("Output files for job "+ str(id) +" seems corrupted.\n")
continue
else:
msg ="Output files for job "+ str(id) +" not available.\n"
common.logger.info(msg)
FieldToUpdate={}
FieldToUpdate['state']= 'Cleared'
FieldToUpdate["applicationReturnCode"] = str(50700)
FieldToUpdate["wrapperReturnCode"] = str(50700)
job_id.append(id)
listCode.append(FieldToUpdate)
#continue
input = 'crab_fjr_' + str(id) + '.xml'
if os.path.exists(self.outDir + input):
FiledToUpdate = self.parseFinalReport(self.outDir + input)
FiledToUpdate['state']= 'Cleared'
job_id.append(id)
listCode.append(FiledToUpdate)
else:
msg = "Problems with "+str(input)+". File not available.\n"
common.logger.info(msg)
success_ret +=1
common._db.updateRunJob_(job_id , listCode)
if self.logDir != self.outDir:
for i_id in list_id:
try:
cmd = 'mv '+str(self.outDir)+'/*'+str(i_id)+'.std* '+str(self.outDir)+'/.BrokerInfo '+str(self.logDir)
cmd_out =os.system(cmd)
except:
msg = 'Problem with copy of job results'
common.logger.info(msg)
msg = 'Results of Jobs # '+str(list_id)+' are in '+self.outDir+' (log files are in '+self.logDir+')'
common.logger.info(msg)
return size
def parseFinalReport(self, input):
"""
Parses the FJR produced by job in order to retrieve
the WrapperExitCode and ExeExitCode.
Updates the BossDB with these values.
"""
from ProdCommon.FwkJobRep.ReportParser import readJobReport
codeValue = {}
jreports = readJobReport(input)
if len(jreports) <= 0 :
codeValue["applicationReturnCode"] = str(50115)
codeValue["wrapperReturnCode"] = str(50115)
common.logger.debug("Empty FWkobreport: error code assigned is 50115 ")
return codeValue
jobReport = jreports[0]
exit_status = ''
##### temporary fix for FJR incomplete ####
fjr = open (input)
len_fjr = len(fjr.readlines())
if (len_fjr <= 6):
### 50115 - cmsRun did not produce a valid/readable job report at runtime
codeValue["applicationReturnCode"] = str(50115)
codeValue["wrapperReturnCode"] = str(50115)
if len(jobReport.errors) != 0 :
for error in jobReport.errors:
if error['Type'] == 'WrapperExitCode':
codeValue["wrapperReturnCode"] = error['ExitStatus']
elif error['Type'] == 'ExeExitCode':
codeValue["applicationReturnCode"] = error['ExitStatus']
if error['Type'] == 'CMSException':
codeValue["applicationReturnCodeOrig"] = error['ExitStatus']
else:
continue
if not codeValue.has_key('wrapperReturnCode'):
codeValue["wrapperReturnCode"] = ''
if not codeValue.has_key('applicationReturnCode'):
if codeValue.has_key('applicationReturnCodeOrig'):
codeValue["applicationReturnCode"] = \
codeValue["applicationReturnCodeOrig"]
codeValue.pop("applicationReturnCodeOrig")
else:
codeValue["applicationReturnCode"] = ''
else:
if codeValue.has_key('applicationReturnCodeOrig'):
codeValue.pop("applicationReturnCodeOrig")
#### Filling BOSS DB with SE name and LFN, for edm and not_edm files ####
lfns=[]
pfns=[]
if (len(jobReport.files) != 0):
for f in jobReport.files:
if f['LFN']:
lfns.append(f['LFN'])
if f['PFN']:
#### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr )
if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308:
pfns.append(os.path.dirname(f['SurlForGrid'])+'/')
else:
pfns.append(os.path.dirname(f['PFN'])+'/')
##########
if (len(jobReport.analysisFiles) != 0):
for aFile in jobReport.analysisFiles:
if aFile['LFN']:
lfns.append(aFile['LFN'])
if aFile['PFN']:
#### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr )
|
codeValue["storage"] = pfns
codeValue["lfn"] = lfns
return codeValue
def moveOutput(self,id, max_id,path,file):
"""
Move output of job already retrieved
into the correct backup directory
"""
Dir_Base=path +'Submission_'
for i in range(1, max_id):
if not os.path.isdir( Dir_Base + str(i) + '/'):
cmd=('mkdir '+ Dir_Base + str(i) + '/ >& /dev/null')
cmd_out = runCommand(cmd)
common.logger.debug(str(cmd_out))
cmd='mv '+ path + file + ' ' + Dir_Base + str(max_id -1) + '/ >& /dev/null'
try:
cmd_out = runCommand(cmd)
common.logger.debug(cmd_out)
except:
msg = 'no output to move for job '+str(id)
common.logger.debug(msg)
pass
return
| if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308:
pfns.append(os.path.dirname(aFile['SurlForGrid'])+'/')
else:
pfns.append(os.path.dirname(aFile['PFN'])+'/')
######### | conditional_block |
GetOutput.py | from Actor import *
import common
import string, os, time
from crab_util import *
class GetOutput(Actor):
def __init__(self, *args):
self.cfg_params = args[0]
self.jobs = args[1]
self.log=0
dir = os.getcwd()+'/'
self.outDir = self.cfg_params.get('USER.outputdir' ,common.work_space.resDir())
if ( self.outDir[-1] != '/' ) : self.outDir = self.outDir + '/'
if ( self.outDir[0] != '/') : self.outDir = dir + self.outDir
self.logDir = self.cfg_params.get('USER.logdir' ,common.work_space.resDir())
if ( self.logDir[-1] != '/' ) : self.logDir = self.logDir + '/'
if ( self.logDir[0] != '/') : self.logDir = dir +self.logDir
if self.logDir != self.outDir:
self.log=1
self.return_data = self.cfg_params.get('USER.return_data',0)
self.dontCheckSpaceLeft = int(self.cfg_params.get('USER.dontCheckSpaceLeft' ,0))
return
def run(self):
"""
The main method of the class: Check destination dirs and
perform the get output
"""
common.logger.debug( "GetOutput::run() called")
start = time.time()
self.getOutput()
stop = time.time()
common.logger.debug( "GetOutput Time: "+str(stop - start))
pass
def checkBeforeGet(self):
# should be in this way... but a core dump appear... waiting for solution
#self.up_task = common.scheduler.queryEverything(1)
self.up_task = common._db.getTask()
list_id_done=[]
list_id_done_not_term=[]
self.list_id=[]
self.all_id =[]
for job in self.up_task.jobs:
if (job.runningJob['state'] == 'Terminated'):
list_id_done.append(job['jobId'])
elif job.runningJob['status'] in ['Done', 'Done (Failed)']:
list_id_done_not_term.append(job['jobId'])
self.all_id.append(job['jobId'])
check = -1
if self.jobs != 'all': check = len( set(self.jobs).intersection(set(list_id_done)) )
if len(list_id_done)==0 or ( check == 0 ) :
msg=''
list_jobs=self.jobs
if self.jobs == 'all': list_jobs=self.all_id
msg += 'Jobs %s are not in Done status. It is not possible yet to retrieve the output. \n'% readableList(self,list_jobs)
if len(list_id_done) > 0:
msg += ' Retrieve the jobs if those are in Done status and Terminatedi action. \n'
msg += ' To know the action of a job run: "crab -status v " \n'
raise CrabException(msg)
else:
if self.jobs == 'all':
self.list_id= list_id_done
if len(self.up_task.jobs)>len(self.list_id):
msg = 'Only %d jobs will be retrieved '% (len(self.list_id))
msg += ' from %d requested.\n'%(len(self.up_task.jobs))
msg += '\t(for details: crab -status)'
common.logger.info(msg)
else:
for id in self.jobs:
if id in list_id_done: self.list_id.append(id)
if len(self.jobs) > len(self.list_id):
msg = 'Only %d jobs will be retrieved '% (len(self.list_id))
msg += ' from %d requested.\n'%(len(self.jobs))
msg += '\t(for details: crab -status)'
common.logger.info(msg)
if not os.path.isdir(self.logDir) or not os.path.isdir(self.outDir):
msg = ' Output or Log dir not found!! check '+self.logDir+' and '+self.outDir
raise CrabException(msg)
return
def | (self):
"""
Get output for a finished job with id.
"""
self.checkBeforeGet()
# Get first job of the list
if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, 10*1024): # First check for more than 10 Mb
msg = "You have LESS than 10 MB of free space on your working dir\n"
msg +="Please make some room before retrying\n\n"
msg +="To bypass this check, run \n"
msg +="crab -get -USER.dontCheckSpaceLeft=1 \n"
raise CrabException(msg)
list_first=self.list_id[0:1]
task= common.scheduler.getOutput(1, list_first, self.outDir)
lastSize = self.organizeOutput( task, list_first )
# here check disk space for first job
if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, lastSize*len(self.list_id)*1.2) : # add a 20% overhead
msg = "Estimated space needed for getOutput is "+str(lastSize*len(self.list_id)*1.2)
msg +=" which is LESS than available space on disk\n"
msg +="Please make some room before retrying\n"
msg +="To bypass this check, run \n"
msg +="crab -get -USER.dontCheckSpaceLeft=1 \n"
raise CrabException(msg)
# get the size of the actual OSB of first job
if (len(self.list_id)>1) :
# check disk space for other N jobs using estimate from the first
list_other=self.list_id[1:]
task= common.scheduler.getOutput(1, list_other, self.outDir)
self.organizeOutput( task, list_other )
return
def organizeOutput(self, task, list_id):
"""
Untar Output
"""
listCode = []
job_id = []
success_ret = 0
size = 0 # in kB
for id in list_id:
runningJob = task.getJob( id ).runningJob
if runningJob.isError() :
continue
file = 'out_files_'+ str(id)+'.tgz'
if os.path.exists(self.outDir + file):
self.max_id = runningJob['submission']
if self.max_id > 1:
for f in os.listdir(self.outDir):
if (f.find('_'+str(id)+'.') != -1 ) and (f != file) and f.find('Submission_'+str(id)) == -1:
self.moveOutput(id, self.max_id, self.outDir, f)
if self.log==1:
for f in os.listdir(self.logDir):
if f.find('_'+str(id)+'.') != -1 and f.find('Submission_'+str(id)) == -1:
self.moveOutput(id, self.max_id, self.logDir, f)
pass
pass
pass
try:
size = getGZSize(self.outDir + file)/1024 # in kB
cmd = 'tar zxf ' + self.outDir + file + ' ' + '-C ' + self.outDir
cmd_out = runCommand(cmd)
cmd_2 ='rm ' + self.outDir + 'out_files_'+ str(id)+'.tgz'
cmd_out2 = runCommand(cmd_2)
msg = 'Results of Jobs # '+str(id)+' are in '+self.outDir
common.logger.info(msg)
except IOError, eio:
common.logger.info("Output files for job "+ str(id) +" seems corrupted.\n")
continue
else:
msg ="Output files for job "+ str(id) +" not available.\n"
common.logger.info(msg)
FieldToUpdate={}
FieldToUpdate['state']= 'Cleared'
FieldToUpdate["applicationReturnCode"] = str(50700)
FieldToUpdate["wrapperReturnCode"] = str(50700)
job_id.append(id)
listCode.append(FieldToUpdate)
#continue
input = 'crab_fjr_' + str(id) + '.xml'
if os.path.exists(self.outDir + input):
FiledToUpdate = self.parseFinalReport(self.outDir + input)
FiledToUpdate['state']= 'Cleared'
job_id.append(id)
listCode.append(FiledToUpdate)
else:
msg = "Problems with "+str(input)+". File not available.\n"
common.logger.info(msg)
success_ret +=1
common._db.updateRunJob_(job_id , listCode)
if self.logDir != self.outDir:
for i_id in list_id:
try:
cmd = 'mv '+str(self.outDir)+'/*'+str(i_id)+'.std* '+str(self.outDir)+'/.BrokerInfo '+str(self.logDir)
cmd_out =os.system(cmd)
except:
msg = 'Problem with copy of job results'
common.logger.info(msg)
msg = 'Results of Jobs # '+str(list_id)+' are in '+self.outDir+' (log files are in '+self.logDir+')'
common.logger.info(msg)
return size
def parseFinalReport(self, input):
"""
Parses the FJR produced by job in order to retrieve
the WrapperExitCode and ExeExitCode.
Updates the BossDB with these values.
"""
from ProdCommon.FwkJobRep.ReportParser import readJobReport
codeValue = {}
jreports = readJobReport(input)
if len(jreports) <= 0 :
codeValue["applicationReturnCode"] = str(50115)
codeValue["wrapperReturnCode"] = str(50115)
common.logger.debug("Empty FWkobreport: error code assigned is 50115 ")
return codeValue
jobReport = jreports[0]
exit_status = ''
##### temporary fix for FJR incomplete ####
fjr = open (input)
len_fjr = len(fjr.readlines())
if (len_fjr <= 6):
### 50115 - cmsRun did not produce a valid/readable job report at runtime
codeValue["applicationReturnCode"] = str(50115)
codeValue["wrapperReturnCode"] = str(50115)
if len(jobReport.errors) != 0 :
for error in jobReport.errors:
if error['Type'] == 'WrapperExitCode':
codeValue["wrapperReturnCode"] = error['ExitStatus']
elif error['Type'] == 'ExeExitCode':
codeValue["applicationReturnCode"] = error['ExitStatus']
if error['Type'] == 'CMSException':
codeValue["applicationReturnCodeOrig"] = error['ExitStatus']
else:
continue
if not codeValue.has_key('wrapperReturnCode'):
codeValue["wrapperReturnCode"] = ''
if not codeValue.has_key('applicationReturnCode'):
if codeValue.has_key('applicationReturnCodeOrig'):
codeValue["applicationReturnCode"] = \
codeValue["applicationReturnCodeOrig"]
codeValue.pop("applicationReturnCodeOrig")
else:
codeValue["applicationReturnCode"] = ''
else:
if codeValue.has_key('applicationReturnCodeOrig'):
codeValue.pop("applicationReturnCodeOrig")
#### Filling BOSS DB with SE name and LFN, for edm and not_edm files ####
lfns=[]
pfns=[]
if (len(jobReport.files) != 0):
for f in jobReport.files:
if f['LFN']:
lfns.append(f['LFN'])
if f['PFN']:
#### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr )
if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308:
pfns.append(os.path.dirname(f['SurlForGrid'])+'/')
else:
pfns.append(os.path.dirname(f['PFN'])+'/')
##########
if (len(jobReport.analysisFiles) != 0):
for aFile in jobReport.analysisFiles:
if aFile['LFN']:
lfns.append(aFile['LFN'])
if aFile['PFN']:
#### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr )
if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308:
pfns.append(os.path.dirname(aFile['SurlForGrid'])+'/')
else:
pfns.append(os.path.dirname(aFile['PFN'])+'/')
#########
codeValue["storage"] = pfns
codeValue["lfn"] = lfns
return codeValue
def moveOutput(self,id, max_id,path,file):
"""
Move output of job already retrieved
into the correct backup directory
"""
Dir_Base=path +'Submission_'
for i in range(1, max_id):
if not os.path.isdir( Dir_Base + str(i) + '/'):
cmd=('mkdir '+ Dir_Base + str(i) + '/ >& /dev/null')
cmd_out = runCommand(cmd)
common.logger.debug(str(cmd_out))
cmd='mv '+ path + file + ' ' + Dir_Base + str(max_id -1) + '/ >& /dev/null'
try:
cmd_out = runCommand(cmd)
common.logger.debug(cmd_out)
except:
msg = 'no output to move for job '+str(id)
common.logger.debug(msg)
pass
return
| getOutput | identifier_name |
GetOutput.py | from Actor import *
import common
import string, os, time
from crab_util import *
class GetOutput(Actor):
def __init__(self, *args):
self.cfg_params = args[0]
self.jobs = args[1]
self.log=0
dir = os.getcwd()+'/'
self.outDir = self.cfg_params.get('USER.outputdir' ,common.work_space.resDir())
if ( self.outDir[-1] != '/' ) : self.outDir = self.outDir + '/'
if ( self.outDir[0] != '/') : self.outDir = dir + self.outDir
self.logDir = self.cfg_params.get('USER.logdir' ,common.work_space.resDir())
if ( self.logDir[-1] != '/' ) : self.logDir = self.logDir + '/'
if ( self.logDir[0] != '/') : self.logDir = dir +self.logDir
if self.logDir != self.outDir:
self.log=1
self.return_data = self.cfg_params.get('USER.return_data',0)
self.dontCheckSpaceLeft = int(self.cfg_params.get('USER.dontCheckSpaceLeft' ,0))
return
def run(self):
"""
The main method of the class: Check destination dirs and
perform the get output
"""
common.logger.debug( "GetOutput::run() called")
start = time.time()
self.getOutput()
stop = time.time()
common.logger.debug( "GetOutput Time: "+str(stop - start))
pass
def checkBeforeGet(self):
# should be in this way... but a core dump appear... waiting for solution
#self.up_task = common.scheduler.queryEverything(1)
self.up_task = common._db.getTask()
list_id_done=[]
list_id_done_not_term=[]
self.list_id=[]
self.all_id =[]
for job in self.up_task.jobs:
if (job.runningJob['state'] == 'Terminated'):
list_id_done.append(job['jobId'])
elif job.runningJob['status'] in ['Done', 'Done (Failed)']:
list_id_done_not_term.append(job['jobId'])
self.all_id.append(job['jobId'])
check = -1
if self.jobs != 'all': check = len( set(self.jobs).intersection(set(list_id_done)) )
if len(list_id_done)==0 or ( check == 0 ) :
msg=''
list_jobs=self.jobs
if self.jobs == 'all': list_jobs=self.all_id
msg += 'Jobs %s are not in Done status. It is not possible yet to retrieve the output. \n'% readableList(self,list_jobs)
if len(list_id_done) > 0:
msg += ' Retrieve the jobs if those are in Done status and Terminatedi action. \n'
msg += ' To know the action of a job run: "crab -status v " \n'
raise CrabException(msg)
else:
if self.jobs == 'all':
self.list_id= list_id_done
if len(self.up_task.jobs)>len(self.list_id):
msg = 'Only %d jobs will be retrieved '% (len(self.list_id))
msg += ' from %d requested.\n'%(len(self.up_task.jobs))
msg += '\t(for details: crab -status)'
common.logger.info(msg)
else:
for id in self.jobs:
if id in list_id_done: self.list_id.append(id)
if len(self.jobs) > len(self.list_id):
msg = 'Only %d jobs will be retrieved '% (len(self.list_id))
msg += ' from %d requested.\n'%(len(self.jobs))
msg += '\t(for details: crab -status)'
common.logger.info(msg)
if not os.path.isdir(self.logDir) or not os.path.isdir(self.outDir):
msg = ' Output or Log dir not found!! check '+self.logDir+' and '+self.outDir
raise CrabException(msg)
return
def getOutput(self):
"""
Get output for a finished job with id.
"""
self.checkBeforeGet()
# Get first job of the list
if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, 10*1024): # First check for more than 10 Mb
msg = "You have LESS than 10 MB of free space on your working dir\n"
msg +="Please make some room before retrying\n\n"
msg +="To bypass this check, run \n"
msg +="crab -get -USER.dontCheckSpaceLeft=1 \n"
raise CrabException(msg)
list_first=self.list_id[0:1]
task= common.scheduler.getOutput(1, list_first, self.outDir)
lastSize = self.organizeOutput( task, list_first )
# here check disk space for first job
if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, lastSize*len(self.list_id)*1.2) : # add a 20% overhead
msg = "Estimated space needed for getOutput is "+str(lastSize*len(self.list_id)*1.2)
msg +=" which is LESS than available space on disk\n"
msg +="Please make some room before retrying\n"
msg +="To bypass this check, run \n"
msg +="crab -get -USER.dontCheckSpaceLeft=1 \n"
raise CrabException(msg)
# get the size of the actual OSB of first job
if (len(self.list_id)>1) :
# check disk space for other N jobs using estimate from the first
list_other=self.list_id[1:]
task= common.scheduler.getOutput(1, list_other, self.outDir)
self.organizeOutput( task, list_other )
return
def organizeOutput(self, task, list_id):
"""
Untar Output
"""
listCode = []
job_id = []
success_ret = 0
size = 0 # in kB
for id in list_id:
runningJob = task.getJob( id ).runningJob
if runningJob.isError() :
continue
file = 'out_files_'+ str(id)+'.tgz'
if os.path.exists(self.outDir + file):
self.max_id = runningJob['submission']
if self.max_id > 1:
for f in os.listdir(self.outDir):
if (f.find('_'+str(id)+'.') != -1 ) and (f != file) and f.find('Submission_'+str(id)) == -1:
self.moveOutput(id, self.max_id, self.outDir, f)
if self.log==1:
for f in os.listdir(self.logDir):
if f.find('_'+str(id)+'.') != -1 and f.find('Submission_'+str(id)) == -1:
self.moveOutput(id, self.max_id, self.logDir, f)
pass
pass
pass
try:
size = getGZSize(self.outDir + file)/1024 # in kB
cmd = 'tar zxf ' + self.outDir + file + ' ' + '-C ' + self.outDir
cmd_out = runCommand(cmd)
cmd_2 ='rm ' + self.outDir + 'out_files_'+ str(id)+'.tgz'
cmd_out2 = runCommand(cmd_2)
msg = 'Results of Jobs # '+str(id)+' are in '+self.outDir
common.logger.info(msg)
except IOError, eio:
common.logger.info("Output files for job "+ str(id) +" seems corrupted.\n")
continue
else:
msg ="Output files for job "+ str(id) +" not available.\n"
common.logger.info(msg)
FieldToUpdate={}
FieldToUpdate['state']= 'Cleared'
FieldToUpdate["applicationReturnCode"] = str(50700)
FieldToUpdate["wrapperReturnCode"] = str(50700)
job_id.append(id)
listCode.append(FieldToUpdate)
#continue
input = 'crab_fjr_' + str(id) + '.xml'
if os.path.exists(self.outDir + input):
FiledToUpdate = self.parseFinalReport(self.outDir + input)
FiledToUpdate['state']= 'Cleared'
job_id.append(id)
listCode.append(FiledToUpdate)
else:
msg = "Problems with "+str(input)+". File not available.\n"
common.logger.info(msg)
success_ret +=1
common._db.updateRunJob_(job_id , listCode)
if self.logDir != self.outDir:
for i_id in list_id:
try:
cmd = 'mv '+str(self.outDir)+'/*'+str(i_id)+'.std* '+str(self.outDir)+'/.BrokerInfo '+str(self.logDir)
cmd_out =os.system(cmd)
except:
msg = 'Problem with copy of job results'
common.logger.info(msg)
msg = 'Results of Jobs # '+str(list_id)+' are in '+self.outDir+' (log files are in '+self.logDir+')'
common.logger.info(msg)
return size
def parseFinalReport(self, input):
"""
Parses the FJR produced by job in order to retrieve
the WrapperExitCode and ExeExitCode.
Updates the BossDB with these values.
"""
from ProdCommon.FwkJobRep.ReportParser import readJobReport
codeValue = {}
jreports = readJobReport(input)
if len(jreports) <= 0 :
codeValue["applicationReturnCode"] = str(50115)
codeValue["wrapperReturnCode"] = str(50115)
common.logger.debug("Empty FWkobreport: error code assigned is 50115 ")
return codeValue
jobReport = jreports[0]
exit_status = ''
##### temporary fix for FJR incomplete ####
fjr = open (input)
len_fjr = len(fjr.readlines())
if (len_fjr <= 6):
### 50115 - cmsRun did not produce a valid/readable job report at runtime
codeValue["applicationReturnCode"] = str(50115)
codeValue["wrapperReturnCode"] = str(50115)
if len(jobReport.errors) != 0 :
for error in jobReport.errors:
if error['Type'] == 'WrapperExitCode':
codeValue["wrapperReturnCode"] = error['ExitStatus']
elif error['Type'] == 'ExeExitCode':
codeValue["applicationReturnCode"] = error['ExitStatus']
if error['Type'] == 'CMSException':
codeValue["applicationReturnCodeOrig"] = error['ExitStatus']
else:
continue
if not codeValue.has_key('wrapperReturnCode'):
codeValue["wrapperReturnCode"] = ''
if not codeValue.has_key('applicationReturnCode'):
if codeValue.has_key('applicationReturnCodeOrig'):
codeValue["applicationReturnCode"] = \
codeValue["applicationReturnCodeOrig"]
codeValue.pop("applicationReturnCodeOrig")
else:
codeValue["applicationReturnCode"] = ''
else:
if codeValue.has_key('applicationReturnCodeOrig'): | #### Filling BOSS DB with SE name and LFN, for edm and not_edm files ####
lfns=[]
pfns=[]
if (len(jobReport.files) != 0):
for f in jobReport.files:
if f['LFN']:
lfns.append(f['LFN'])
if f['PFN']:
#### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr )
if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308:
pfns.append(os.path.dirname(f['SurlForGrid'])+'/')
else:
pfns.append(os.path.dirname(f['PFN'])+'/')
##########
if (len(jobReport.analysisFiles) != 0):
for aFile in jobReport.analysisFiles:
if aFile['LFN']:
lfns.append(aFile['LFN'])
if aFile['PFN']:
#### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr )
if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308:
pfns.append(os.path.dirname(aFile['SurlForGrid'])+'/')
else:
pfns.append(os.path.dirname(aFile['PFN'])+'/')
#########
codeValue["storage"] = pfns
codeValue["lfn"] = lfns
return codeValue
def moveOutput(self,id, max_id,path,file):
"""
Move output of job already retrieved
into the correct backup directory
"""
Dir_Base=path +'Submission_'
for i in range(1, max_id):
if not os.path.isdir( Dir_Base + str(i) + '/'):
cmd=('mkdir '+ Dir_Base + str(i) + '/ >& /dev/null')
cmd_out = runCommand(cmd)
common.logger.debug(str(cmd_out))
cmd='mv '+ path + file + ' ' + Dir_Base + str(max_id -1) + '/ >& /dev/null'
try:
cmd_out = runCommand(cmd)
common.logger.debug(cmd_out)
except:
msg = 'no output to move for job '+str(id)
common.logger.debug(msg)
pass
return | codeValue.pop("applicationReturnCodeOrig")
| random_line_split |
connect_four.py | # This script implements connect four and highlights winning series,
# series with greater than three consecutive Xs or Os.
# The pick_markers module for the start of the game.
def pick_markers():
'''
Module to assign X or O to the two players. Returns a list with two elements,
where index 0 is player 1's marker, index 1 is player 2's marker.
'''
import string
plyr1 = input("Player 1, choose X or O as your marker: ")
plyr1_mkr = plyr1.lower()
while True:
if plyr1_mkr != 'x' and plyr1_mkr != 'o':
plyr1 = input('ERROR. Invalid marker. Choose X or O: ')
plyr1_mkr = plyr1.lower()
else:
if plyr1_mkr == 'o':
print('Player 2, your marker is X')
plyr2_mkr = 'X'
else:
print('Player 2, you are O''s')
plyr2_mkr = 'O'
break
return [plyr1.upper(), plyr2_mkr]
# Code to make clear() clear the command line console. NOTE!!! If on linux, replace 'cls' with 'clear'.
import os
clear = lambda: os.system('cls')
import colorama
# This module constructs the intitial, and print the updated board throughout the game.
def print_board(spaces):
''' Uses the the contents of spaces (list variable) to construct/update
the gameboard on the console. '''
clear()
while len(spaces) < 49:
spaces.append(' ')
# Start with 7 empty lists... Index 0 corresponds to the TOP ROW of the printed board!!!
board = [[], [], [], [], [], [], []]
for x1 in range(0,len(board)):
single_row = [' '*20,'| ', spaces[(42-x1*7+0)],' | ', spaces[(42-x1*7+1)], ' | ', spaces[(42-x1*7+2)],' | ', spaces[(42-x1*7+3)], ' | ', spaces[(42-x1*7+4)], ' | ', spaces[(42-x1*7+5)], ' | ', spaces[(42-x1*7+6)], ' |']
board[x1] = single_row
print('\n'*3)
print(' '*20+'-'*43)
# Nested for loops to print off the board. Top row, being at index 0
# in board (list of lists variable) prints off first.
for x1 in board:
board_string = ''
for x2 in x1:
board_string = board_string + x2
print(board_string)
print(' '*20+'-'*43)
print(' '*21+' 1 '+' 2 '+' 3 '+' 4 '+' 5 '+' 6 '+' 7 ')
print('\n'*2)
def board_full(spaces):
'''Check if board is full by checking if spaces (list variable) contains a char of a single whitespace.
Returns a boolean of True is no whitespaces are found, else returns False.'''
for x in spaces:
if x == ' ':
return False
else:
pass
return True
def winning_combo(spaces, markers):
'''Returns False, 'NO_WINNER' if no winning combination is detected. Else, returns (True, winning_coor1, ...,
winning_coor4, direction_of_the_winning_combination).'''
# The (list) variable markers is either ['X', 'O'] or ['O','X'] depending on
# marker choice by player 1 at start of game. The list variable, spaces, contains
# where markers have been placed.
def horizontal_win(marker):
# Check for horizontal four-in-a-row
|
def vertical_win(marker):
for x2 in range(0,7):
# Iterate up a columns checking for four-in-a-row. Run this check on spaces in first four rows (x3) of the
# to avoid going off the board (an indexing error).
for x3 in range(0,4):
if marker == spaces[x2+x3*7] == spaces[x2+x3*7+7] == spaces[x2+x3*7+14] == spaces[x2+x3*7+21]:
return [True, x2+x3*7+0, x2+x3*7+7, x2+x3*7+14, x2+x3*7+21]
#return True, the first found winning four vertical coordinates (searches bottom to top).
return (False, 0)
def ascending_diagonal_win(marker):
# Check for an ascending left-right diagonal four-in-a-row. Left-right ascending diagonals are 8 indeces apart.
# Iterate on first four columns.
for x2 in range(0,4):
# Iterate on first four rows
for x3 in range(0,4):
if marker == spaces[x2+x3*7+0] == spaces[x2+x3*7+8] == spaces[x2+x3*7+16] == spaces[x2+x3*7+24]:
return [True, x2+x3*7+0, x2+x3*7+8, x2+x3*7+16, x2+x3*7+24]
#return True, first found winning diagonal found. Searches 0-3, then moves over 1 column.
return (False, 0)
def descending_diagonal_win(marker):
# Check for a descending left-right diagonal. Left-right descending diagonals are 6 indeces apart.
# Iterate on first four columns (0-3)
for x2 in range(0,4):
# Iterate on highest four rows (3-6)
for x3 in range(3,7):
if marker == spaces[x2+x3*7-0] == spaces[x2+x3*7-6] == spaces[x2+x3*7-12] == spaces[x2+x3*7-18]:
return [True, x2+x3*7-0, x2+x3*7-6, x2+x3*7-12, x2+x3*7-18]
#return True
return (False, 0)
# Iterate over both markers (X and O).
for t in markers:
hor_win = horizontal_win(t)
ver_win = vertical_win(t)
asc_d_win = ascending_diagonal_win(t)
des_d_win = descending_diagonal_win(t)
if hor_win[0]==True or ver_win[0]==True or asc_d_win[0]==True or des_d_win[0]==True:
return [True, t, hor_win, ver_win, asc_d_win, des_d_win]
return False, 'NO_WINNER'
def extend_winning_combos(list_with_winning_combo_info, spaces):
winners_marker = list_with_winning_combo_info[1]
# If a horizontal win occurred, highlight all consecutive markers in that winning series.
if list_with_winning_combo_info[2][0] == True:
# Set the off-board limit to the last winning coordinate the horizontal series
last_winning_coor = list_with_winning_combo_info[2][-1]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# Determine the rightmost board position in the row of the winning horizontal combinations
while board_limit % 7 != 6:
board_limit += 1
# While we haven't moved to far right on the board, <= test, and the marker there matches,
# and this while loop is not executing the first time (inner if test), append the board index
# contained in last_winning_coor to the winning_combo to the tuple inside of list_with_winning_combo_info
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[2].append(last_winning_coor)
last_winning_coor += 1
else:
pass
# If a vertical win occurred
if list_with_winning_combo_info[3][0] == True:
last_winning_coor = list_with_winning_combo_info[3][-1]
board_limit = (last_winning_coor % 7) + 42 #Find column number with %7, add 42 to get index of highest board position in that column
initial_last_coor = last_winning_coor
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[3].append(last_winning_coor)
last_winning_coor += 7
# If an ascending diagonal win occurred
if list_with_winning_combo_info[4][0] == True:
last_winning_coor = list_with_winning_combo_info[4][-1]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# The ascending diagonal board limit is always 7th (index 6) column spot, or row 7 columns 4-7.
while (board_limit % 7 != 6 and board_limit < 45):
board_limit += 8 # Ascending diagonals are 8 indeces apart
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[4].append(last_winning_coor)
last_winning_coor += 8
else:
pass
# If a descending diagonal win occurred
if list_with_winning_combo_info[5][0] == True:
last_winning_coor = list_with_winning_combo_info[5][4]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# The descending diagonal board limit calculation
while (board_limit - 6 > 0) and (board_limit % 7 != 6):
board_limit -= 6
while last_winning_coor >= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[5].append(last_winning_coor)
last_winning_coor -= 6
else:
pass
return list_with_winning_combo_info
def make_winning_combos_green(list_with_winning_combo_info):
for x1 in range(2,6):
if list_with_winning_combo_info[x1][0] == True:
for x2 in range(1, len(list_with_winning_combo_info[x1])):
spaces[list_with_winning_combo_info[x1][x2]]='\u001b[1m\u001b[32m'+spaces[list_with_winning_combo_info[x1][x2]]+'\u001b[0m'
def place_move(spaces, markers, turn):
def valid_column(Move):
while True:
if (Move < '1') or ('7' < Move) or (len(Move) > 1):
Move = input('Error. Invalid move. Try again player {}? '.format(turn))
else:
Move = int(Move)
return Move
def column_full(spaces, move):
for x2 in range(0,7):
if spaces[(move-1)+7*x2] == ' ':
return (False, x2)
return (True, x2)
while True:
move = input('Where would you like to go player {}? '.format(turn))
move = valid_column(move)
move = int(move)
ColumnFullTuple = column_full(spaces, move)
if ColumnFullTuple[0] == True:
print('Column {} is full. I''ll ask you again.'.format(move))
else:
spaces[(move-1)+ColumnFullTuple[1]*7] = markers[turn-1]
break
def ConnectFour():
Markers = pick_markers()
global spaces
spaces = [' ']
print_board(spaces)
Winner = winning_combo(spaces, Markers)
Board_Full = board_full(spaces)
Turn = 1
while Winner[0] == False and Board_Full == False:
place_move(spaces, Markers, Turn)
print_board(spaces)
Winner = winning_combo(spaces, Markers)
Board_Full = board_full(spaces)
if Winner[0] == True:
# Replaces the first four detected winning coordinates with the ANSI codes to make them green.
list_with_winning_combos_info = extend_winning_combos(Winner, spaces)
make_winning_combos_green(list_with_winning_combos_info)
print_board(spaces)
print('Congratulations Player {}! YOU WIN!'.format(Turn))
break
elif Winner[0] == False and Board_Full == True:
print('Draw. No winner.')
break
else:
pass
if Turn == 1:
Turn = 2
else:
Turn = 1
# Start the game
ConnectFour()
while True:
play_again = input("Would you like to play again? (Y/N): ")
play_again = play_again.lower()
while True:
if play_again[0] != 'y' and play_again[0] != 'n':
play_again = input("I don't understand. Try again (Y/N): ")
play_again = play_again.lower()
else:
break
if play_again == 'y':
ConnectFour()
else:
print('Goodbye!')
break | for x2 in [0,7,14,21,28,35,42]:
for x3 in range(0,4): # Can't be 7
if marker == spaces[x2+x3] == spaces[x2+x3+1] == spaces[x2+x3+2] == spaces[x2+x3+3]:
return [True, x2+x3, x2+x3+1, x2+x3+2, x2+x3+3]
#return True, and the first winning horizontal combo (searches board left to right)
# Check for vertical three-in-a-row. Iterate up and down (inner x3 for) on a single column (outter x2 for).
return (False, 0) | identifier_body |
connect_four.py | # This script implements connect four and highlights winning series,
# series with greater than three consecutive Xs or Os.
# The pick_markers module for the start of the game.
def | ():
'''
Module to assign X or O to the two players. Returns a list with two elements,
where index 0 is player 1's marker, index 1 is player 2's marker.
'''
import string
plyr1 = input("Player 1, choose X or O as your marker: ")
plyr1_mkr = plyr1.lower()
while True:
if plyr1_mkr != 'x' and plyr1_mkr != 'o':
plyr1 = input('ERROR. Invalid marker. Choose X or O: ')
plyr1_mkr = plyr1.lower()
else:
if plyr1_mkr == 'o':
print('Player 2, your marker is X')
plyr2_mkr = 'X'
else:
print('Player 2, you are O''s')
plyr2_mkr = 'O'
break
return [plyr1.upper(), plyr2_mkr]
# Code to make clear() clear the command line console. NOTE!!! If on linux, replace 'cls' with 'clear'.
import os
clear = lambda: os.system('cls')
import colorama
# This module constructs the intitial, and print the updated board throughout the game.
def print_board(spaces):
''' Uses the the contents of spaces (list variable) to construct/update
the gameboard on the console. '''
clear()
while len(spaces) < 49:
spaces.append(' ')
# Start with 7 empty lists... Index 0 corresponds to the TOP ROW of the printed board!!!
board = [[], [], [], [], [], [], []]
for x1 in range(0,len(board)):
single_row = [' '*20,'| ', spaces[(42-x1*7+0)],' | ', spaces[(42-x1*7+1)], ' | ', spaces[(42-x1*7+2)],' | ', spaces[(42-x1*7+3)], ' | ', spaces[(42-x1*7+4)], ' | ', spaces[(42-x1*7+5)], ' | ', spaces[(42-x1*7+6)], ' |']
board[x1] = single_row
print('\n'*3)
print(' '*20+'-'*43)
# Nested for loops to print off the board. Top row, being at index 0
# in board (list of lists variable) prints off first.
for x1 in board:
board_string = ''
for x2 in x1:
board_string = board_string + x2
print(board_string)
print(' '*20+'-'*43)
print(' '*21+' 1 '+' 2 '+' 3 '+' 4 '+' 5 '+' 6 '+' 7 ')
print('\n'*2)
def board_full(spaces):
'''Check if board is full by checking if spaces (list variable) contains a char of a single whitespace.
Returns a boolean of True is no whitespaces are found, else returns False.'''
for x in spaces:
if x == ' ':
return False
else:
pass
return True
def winning_combo(spaces, markers):
'''Returns False, 'NO_WINNER' if no winning combination is detected. Else, returns (True, winning_coor1, ...,
winning_coor4, direction_of_the_winning_combination).'''
# The (list) variable markers is either ['X', 'O'] or ['O','X'] depending on
# marker choice by player 1 at start of game. The list variable, spaces, contains
# where markers have been placed.
def horizontal_win(marker):
# Check for horizontal four-in-a-row
for x2 in [0,7,14,21,28,35,42]:
for x3 in range(0,4): # Can't be 7
if marker == spaces[x2+x3] == spaces[x2+x3+1] == spaces[x2+x3+2] == spaces[x2+x3+3]:
return [True, x2+x3, x2+x3+1, x2+x3+2, x2+x3+3]
#return True, and the first winning horizontal combo (searches board left to right)
# Check for vertical three-in-a-row. Iterate up and down (inner x3 for) on a single column (outter x2 for).
return (False, 0)
def vertical_win(marker):
for x2 in range(0,7):
# Iterate up a columns checking for four-in-a-row. Run this check on spaces in first four rows (x3) of the
# to avoid going off the board (an indexing error).
for x3 in range(0,4):
if marker == spaces[x2+x3*7] == spaces[x2+x3*7+7] == spaces[x2+x3*7+14] == spaces[x2+x3*7+21]:
return [True, x2+x3*7+0, x2+x3*7+7, x2+x3*7+14, x2+x3*7+21]
#return True, the first found winning four vertical coordinates (searches bottom to top).
return (False, 0)
def ascending_diagonal_win(marker):
# Check for an ascending left-right diagonal four-in-a-row. Left-right ascending diagonals are 8 indeces apart.
# Iterate on first four columns.
for x2 in range(0,4):
# Iterate on first four rows
for x3 in range(0,4):
if marker == spaces[x2+x3*7+0] == spaces[x2+x3*7+8] == spaces[x2+x3*7+16] == spaces[x2+x3*7+24]:
return [True, x2+x3*7+0, x2+x3*7+8, x2+x3*7+16, x2+x3*7+24]
#return True, first found winning diagonal found. Searches 0-3, then moves over 1 column.
return (False, 0)
def descending_diagonal_win(marker):
# Check for a descending left-right diagonal. Left-right descending diagonals are 6 indeces apart.
# Iterate on first four columns (0-3)
for x2 in range(0,4):
# Iterate on highest four rows (3-6)
for x3 in range(3,7):
if marker == spaces[x2+x3*7-0] == spaces[x2+x3*7-6] == spaces[x2+x3*7-12] == spaces[x2+x3*7-18]:
return [True, x2+x3*7-0, x2+x3*7-6, x2+x3*7-12, x2+x3*7-18]
#return True
return (False, 0)
# Iterate over both markers (X and O).
for t in markers:
hor_win = horizontal_win(t)
ver_win = vertical_win(t)
asc_d_win = ascending_diagonal_win(t)
des_d_win = descending_diagonal_win(t)
if hor_win[0]==True or ver_win[0]==True or asc_d_win[0]==True or des_d_win[0]==True:
return [True, t, hor_win, ver_win, asc_d_win, des_d_win]
return False, 'NO_WINNER'
def extend_winning_combos(list_with_winning_combo_info, spaces):
winners_marker = list_with_winning_combo_info[1]
# If a horizontal win occurred, highlight all consecutive markers in that winning series.
if list_with_winning_combo_info[2][0] == True:
# Set the off-board limit to the last winning coordinate the horizontal series
last_winning_coor = list_with_winning_combo_info[2][-1]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# Determine the rightmost board position in the row of the winning horizontal combinations
while board_limit % 7 != 6:
board_limit += 1
# While we haven't moved to far right on the board, <= test, and the marker there matches,
# and this while loop is not executing the first time (inner if test), append the board index
# contained in last_winning_coor to the winning_combo to the tuple inside of list_with_winning_combo_info
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[2].append(last_winning_coor)
last_winning_coor += 1
else:
pass
# If a vertical win occurred
if list_with_winning_combo_info[3][0] == True:
last_winning_coor = list_with_winning_combo_info[3][-1]
board_limit = (last_winning_coor % 7) + 42 #Find column number with %7, add 42 to get index of highest board position in that column
initial_last_coor = last_winning_coor
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[3].append(last_winning_coor)
last_winning_coor += 7
# If an ascending diagonal win occurred
if list_with_winning_combo_info[4][0] == True:
last_winning_coor = list_with_winning_combo_info[4][-1]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# The ascending diagonal board limit is always 7th (index 6) column spot, or row 7 columns 4-7.
while (board_limit % 7 != 6 and board_limit < 45):
board_limit += 8 # Ascending diagonals are 8 indeces apart
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[4].append(last_winning_coor)
last_winning_coor += 8
else:
pass
# If a descending diagonal win occurred
if list_with_winning_combo_info[5][0] == True:
last_winning_coor = list_with_winning_combo_info[5][4]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# The descending diagonal board limit calculation
while (board_limit - 6 > 0) and (board_limit % 7 != 6):
board_limit -= 6
while last_winning_coor >= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[5].append(last_winning_coor)
last_winning_coor -= 6
else:
pass
return list_with_winning_combo_info
def make_winning_combos_green(list_with_winning_combo_info):
for x1 in range(2,6):
if list_with_winning_combo_info[x1][0] == True:
for x2 in range(1, len(list_with_winning_combo_info[x1])):
spaces[list_with_winning_combo_info[x1][x2]]='\u001b[1m\u001b[32m'+spaces[list_with_winning_combo_info[x1][x2]]+'\u001b[0m'
def place_move(spaces, markers, turn):
def valid_column(Move):
while True:
if (Move < '1') or ('7' < Move) or (len(Move) > 1):
Move = input('Error. Invalid move. Try again player {}? '.format(turn))
else:
Move = int(Move)
return Move
def column_full(spaces, move):
for x2 in range(0,7):
if spaces[(move-1)+7*x2] == ' ':
return (False, x2)
return (True, x2)
while True:
move = input('Where would you like to go player {}? '.format(turn))
move = valid_column(move)
move = int(move)
ColumnFullTuple = column_full(spaces, move)
if ColumnFullTuple[0] == True:
print('Column {} is full. I''ll ask you again.'.format(move))
else:
spaces[(move-1)+ColumnFullTuple[1]*7] = markers[turn-1]
break
def ConnectFour():
Markers = pick_markers()
global spaces
spaces = [' ']
print_board(spaces)
Winner = winning_combo(spaces, Markers)
Board_Full = board_full(spaces)
Turn = 1
while Winner[0] == False and Board_Full == False:
place_move(spaces, Markers, Turn)
print_board(spaces)
Winner = winning_combo(spaces, Markers)
Board_Full = board_full(spaces)
if Winner[0] == True:
# Replaces the first four detected winning coordinates with the ANSI codes to make them green.
list_with_winning_combos_info = extend_winning_combos(Winner, spaces)
make_winning_combos_green(list_with_winning_combos_info)
print_board(spaces)
print('Congratulations Player {}! YOU WIN!'.format(Turn))
break
elif Winner[0] == False and Board_Full == True:
print('Draw. No winner.')
break
else:
pass
if Turn == 1:
Turn = 2
else:
Turn = 1
# Start the game
ConnectFour()
while True:
play_again = input("Would you like to play again? (Y/N): ")
play_again = play_again.lower()
while True:
if play_again[0] != 'y' and play_again[0] != 'n':
play_again = input("I don't understand. Try again (Y/N): ")
play_again = play_again.lower()
else:
break
if play_again == 'y':
ConnectFour()
else:
print('Goodbye!')
break | pick_markers | identifier_name |
connect_four.py | # This script implements connect four and highlights winning series,
# series with greater than three consecutive Xs or Os.
# The pick_markers module for the start of the game.
def pick_markers():
'''
Module to assign X or O to the two players. Returns a list with two elements,
where index 0 is player 1's marker, index 1 is player 2's marker.
'''
import string
plyr1 = input("Player 1, choose X or O as your marker: ")
plyr1_mkr = plyr1.lower()
while True:
if plyr1_mkr != 'x' and plyr1_mkr != 'o':
plyr1 = input('ERROR. Invalid marker. Choose X or O: ')
plyr1_mkr = plyr1.lower()
else:
if plyr1_mkr == 'o':
print('Player 2, your marker is X')
plyr2_mkr = 'X'
else:
print('Player 2, you are O''s')
plyr2_mkr = 'O'
break
return [plyr1.upper(), plyr2_mkr]
# Code to make clear() clear the command line console. NOTE!!! If on linux, replace 'cls' with 'clear'.
import os
| ''' Uses the the contents of spaces (list variable) to construct/update
the gameboard on the console. '''
clear()
while len(spaces) < 49:
spaces.append(' ')
# Start with 7 empty lists... Index 0 corresponds to the TOP ROW of the printed board!!!
board = [[], [], [], [], [], [], []]
for x1 in range(0,len(board)):
single_row = [' '*20,'| ', spaces[(42-x1*7+0)],' | ', spaces[(42-x1*7+1)], ' | ', spaces[(42-x1*7+2)],' | ', spaces[(42-x1*7+3)], ' | ', spaces[(42-x1*7+4)], ' | ', spaces[(42-x1*7+5)], ' | ', spaces[(42-x1*7+6)], ' |']
board[x1] = single_row
print('\n'*3)
print(' '*20+'-'*43)
# Nested for loops to print off the board. Top row, being at index 0
# in board (list of lists variable) prints off first.
for x1 in board:
board_string = ''
for x2 in x1:
board_string = board_string + x2
print(board_string)
print(' '*20+'-'*43)
print(' '*21+' 1 '+' 2 '+' 3 '+' 4 '+' 5 '+' 6 '+' 7 ')
print('\n'*2)
def board_full(spaces):
'''Check if board is full by checking if spaces (list variable) contains a char of a single whitespace.
Returns a boolean of True is no whitespaces are found, else returns False.'''
for x in spaces:
if x == ' ':
return False
else:
pass
return True
def winning_combo(spaces, markers):
'''Returns False, 'NO_WINNER' if no winning combination is detected. Else, returns (True, winning_coor1, ...,
winning_coor4, direction_of_the_winning_combination).'''
# The (list) variable markers is either ['X', 'O'] or ['O','X'] depending on
# marker choice by player 1 at start of game. The list variable, spaces, contains
# where markers have been placed.
def horizontal_win(marker):
# Check for horizontal four-in-a-row
for x2 in [0,7,14,21,28,35,42]:
for x3 in range(0,4): # Can't be 7
if marker == spaces[x2+x3] == spaces[x2+x3+1] == spaces[x2+x3+2] == spaces[x2+x3+3]:
return [True, x2+x3, x2+x3+1, x2+x3+2, x2+x3+3]
#return True, and the first winning horizontal combo (searches board left to right)
# Check for vertical three-in-a-row. Iterate up and down (inner x3 for) on a single column (outter x2 for).
return (False, 0)
def vertical_win(marker):
for x2 in range(0,7):
# Iterate up a columns checking for four-in-a-row. Run this check on spaces in first four rows (x3) of the
# to avoid going off the board (an indexing error).
for x3 in range(0,4):
if marker == spaces[x2+x3*7] == spaces[x2+x3*7+7] == spaces[x2+x3*7+14] == spaces[x2+x3*7+21]:
return [True, x2+x3*7+0, x2+x3*7+7, x2+x3*7+14, x2+x3*7+21]
#return True, the first found winning four vertical coordinates (searches bottom to top).
return (False, 0)
def ascending_diagonal_win(marker):
# Check for an ascending left-right diagonal four-in-a-row. Left-right ascending diagonals are 8 indeces apart.
# Iterate on first four columns.
for x2 in range(0,4):
# Iterate on first four rows
for x3 in range(0,4):
if marker == spaces[x2+x3*7+0] == spaces[x2+x3*7+8] == spaces[x2+x3*7+16] == spaces[x2+x3*7+24]:
return [True, x2+x3*7+0, x2+x3*7+8, x2+x3*7+16, x2+x3*7+24]
#return True, first found winning diagonal found. Searches 0-3, then moves over 1 column.
return (False, 0)
def descending_diagonal_win(marker):
# Check for a descending left-right diagonal. Left-right descending diagonals are 6 indeces apart.
# Iterate on first four columns (0-3)
for x2 in range(0,4):
# Iterate on highest four rows (3-6)
for x3 in range(3,7):
if marker == spaces[x2+x3*7-0] == spaces[x2+x3*7-6] == spaces[x2+x3*7-12] == spaces[x2+x3*7-18]:
return [True, x2+x3*7-0, x2+x3*7-6, x2+x3*7-12, x2+x3*7-18]
#return True
return (False, 0)
# Iterate over both markers (X and O).
for t in markers:
hor_win = horizontal_win(t)
ver_win = vertical_win(t)
asc_d_win = ascending_diagonal_win(t)
des_d_win = descending_diagonal_win(t)
if hor_win[0]==True or ver_win[0]==True or asc_d_win[0]==True or des_d_win[0]==True:
return [True, t, hor_win, ver_win, asc_d_win, des_d_win]
return False, 'NO_WINNER'
def extend_winning_combos(list_with_winning_combo_info, spaces):
winners_marker = list_with_winning_combo_info[1]
# If a horizontal win occurred, highlight all consecutive markers in that winning series.
if list_with_winning_combo_info[2][0] == True:
# Set the off-board limit to the last winning coordinate the horizontal series
last_winning_coor = list_with_winning_combo_info[2][-1]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# Determine the rightmost board position in the row of the winning horizontal combinations
while board_limit % 7 != 6:
board_limit += 1
# While we haven't moved to far right on the board, <= test, and the marker there matches,
# and this while loop is not executing the first time (inner if test), append the board index
# contained in last_winning_coor to the winning_combo to the tuple inside of list_with_winning_combo_info
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[2].append(last_winning_coor)
last_winning_coor += 1
else:
pass
# If a vertical win occurred
if list_with_winning_combo_info[3][0] == True:
last_winning_coor = list_with_winning_combo_info[3][-1]
board_limit = (last_winning_coor % 7) + 42 #Find column number with %7, add 42 to get index of highest board position in that column
initial_last_coor = last_winning_coor
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[3].append(last_winning_coor)
last_winning_coor += 7
# If an ascending diagonal win occurred
if list_with_winning_combo_info[4][0] == True:
last_winning_coor = list_with_winning_combo_info[4][-1]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# The ascending diagonal board limit is always 7th (index 6) column spot, or row 7 columns 4-7.
while (board_limit % 7 != 6 and board_limit < 45):
board_limit += 8 # Ascending diagonals are 8 indeces apart
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[4].append(last_winning_coor)
last_winning_coor += 8
else:
pass
# If a descending diagonal win occurred
if list_with_winning_combo_info[5][0] == True:
last_winning_coor = list_with_winning_combo_info[5][4]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# The descending diagonal board limit calculation
while (board_limit - 6 > 0) and (board_limit % 7 != 6):
board_limit -= 6
while last_winning_coor >= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[5].append(last_winning_coor)
last_winning_coor -= 6
else:
pass
return list_with_winning_combo_info
def make_winning_combos_green(list_with_winning_combo_info):
for x1 in range(2,6):
if list_with_winning_combo_info[x1][0] == True:
for x2 in range(1, len(list_with_winning_combo_info[x1])):
spaces[list_with_winning_combo_info[x1][x2]]='\u001b[1m\u001b[32m'+spaces[list_with_winning_combo_info[x1][x2]]+'\u001b[0m'
def place_move(spaces, markers, turn):
def valid_column(Move):
while True:
if (Move < '1') or ('7' < Move) or (len(Move) > 1):
Move = input('Error. Invalid move. Try again player {}? '.format(turn))
else:
Move = int(Move)
return Move
def column_full(spaces, move):
for x2 in range(0,7):
if spaces[(move-1)+7*x2] == ' ':
return (False, x2)
return (True, x2)
while True:
move = input('Where would you like to go player {}? '.format(turn))
move = valid_column(move)
move = int(move)
ColumnFullTuple = column_full(spaces, move)
if ColumnFullTuple[0] == True:
print('Column {} is full. I''ll ask you again.'.format(move))
else:
spaces[(move-1)+ColumnFullTuple[1]*7] = markers[turn-1]
break
def ConnectFour():
Markers = pick_markers()
global spaces
spaces = [' ']
print_board(spaces)
Winner = winning_combo(spaces, Markers)
Board_Full = board_full(spaces)
Turn = 1
while Winner[0] == False and Board_Full == False:
place_move(spaces, Markers, Turn)
print_board(spaces)
Winner = winning_combo(spaces, Markers)
Board_Full = board_full(spaces)
if Winner[0] == True:
# Replaces the first four detected winning coordinates with the ANSI codes to make them green.
list_with_winning_combos_info = extend_winning_combos(Winner, spaces)
make_winning_combos_green(list_with_winning_combos_info)
print_board(spaces)
print('Congratulations Player {}! YOU WIN!'.format(Turn))
break
elif Winner[0] == False and Board_Full == True:
print('Draw. No winner.')
break
else:
pass
if Turn == 1:
Turn = 2
else:
Turn = 1
# Start the game
ConnectFour()
while True:
play_again = input("Would you like to play again? (Y/N): ")
play_again = play_again.lower()
while True:
if play_again[0] != 'y' and play_again[0] != 'n':
play_again = input("I don't understand. Try again (Y/N): ")
play_again = play_again.lower()
else:
break
if play_again == 'y':
ConnectFour()
else:
print('Goodbye!')
break | clear = lambda: os.system('cls')
import colorama
# This module constructs the intitial, and print the updated board throughout the game.
def print_board(spaces):
| random_line_split |
connect_four.py | # This script implements connect four and highlights winning series,
# series with greater than three consecutive Xs or Os.
# The pick_markers module for the start of the game.
def pick_markers():
'''
Module to assign X or O to the two players. Returns a list with two elements,
where index 0 is player 1's marker, index 1 is player 2's marker.
'''
import string
plyr1 = input("Player 1, choose X or O as your marker: ")
plyr1_mkr = plyr1.lower()
while True:
if plyr1_mkr != 'x' and plyr1_mkr != 'o':
plyr1 = input('ERROR. Invalid marker. Choose X or O: ')
plyr1_mkr = plyr1.lower()
else:
if plyr1_mkr == 'o':
print('Player 2, your marker is X')
plyr2_mkr = 'X'
else:
print('Player 2, you are O''s')
plyr2_mkr = 'O'
break
return [plyr1.upper(), plyr2_mkr]
# Code to make clear() clear the command line console. NOTE!!! If on linux, replace 'cls' with 'clear'.
import os
clear = lambda: os.system('cls')
import colorama
# This module constructs the intitial, and print the updated board throughout the game.
def print_board(spaces):
''' Uses the the contents of spaces (list variable) to construct/update
the gameboard on the console. '''
clear()
while len(spaces) < 49:
spaces.append(' ')
# Start with 7 empty lists... Index 0 corresponds to the TOP ROW of the printed board!!!
board = [[], [], [], [], [], [], []]
for x1 in range(0,len(board)):
single_row = [' '*20,'| ', spaces[(42-x1*7+0)],' | ', spaces[(42-x1*7+1)], ' | ', spaces[(42-x1*7+2)],' | ', spaces[(42-x1*7+3)], ' | ', spaces[(42-x1*7+4)], ' | ', spaces[(42-x1*7+5)], ' | ', spaces[(42-x1*7+6)], ' |']
board[x1] = single_row
print('\n'*3)
print(' '*20+'-'*43)
# Nested for loops to print off the board. Top row, being at index 0
# in board (list of lists variable) prints off first.
for x1 in board:
board_string = ''
for x2 in x1:
board_string = board_string + x2
print(board_string)
print(' '*20+'-'*43)
print(' '*21+' 1 '+' 2 '+' 3 '+' 4 '+' 5 '+' 6 '+' 7 ')
print('\n'*2)
def board_full(spaces):
'''Check if board is full by checking if spaces (list variable) contains a char of a single whitespace.
Returns a boolean of True is no whitespaces are found, else returns False.'''
for x in spaces:
if x == ' ':
return False
else:
pass
return True
def winning_combo(spaces, markers):
'''Returns False, 'NO_WINNER' if no winning combination is detected. Else, returns (True, winning_coor1, ...,
winning_coor4, direction_of_the_winning_combination).'''
# The (list) variable markers is either ['X', 'O'] or ['O','X'] depending on
# marker choice by player 1 at start of game. The list variable, spaces, contains
# where markers have been placed.
def horizontal_win(marker):
# Check for horizontal four-in-a-row
for x2 in [0,7,14,21,28,35,42]:
for x3 in range(0,4): # Can't be 7
if marker == spaces[x2+x3] == spaces[x2+x3+1] == spaces[x2+x3+2] == spaces[x2+x3+3]:
return [True, x2+x3, x2+x3+1, x2+x3+2, x2+x3+3]
#return True, and the first winning horizontal combo (searches board left to right)
# Check for vertical three-in-a-row. Iterate up and down (inner x3 for) on a single column (outter x2 for).
return (False, 0)
def vertical_win(marker):
for x2 in range(0,7):
# Iterate up a columns checking for four-in-a-row. Run this check on spaces in first four rows (x3) of the
# to avoid going off the board (an indexing error).
for x3 in range(0,4):
if marker == spaces[x2+x3*7] == spaces[x2+x3*7+7] == spaces[x2+x3*7+14] == spaces[x2+x3*7+21]:
return [True, x2+x3*7+0, x2+x3*7+7, x2+x3*7+14, x2+x3*7+21]
#return True, the first found winning four vertical coordinates (searches bottom to top).
return (False, 0)
def ascending_diagonal_win(marker):
# Check for an ascending left-right diagonal four-in-a-row. Left-right ascending diagonals are 8 indeces apart.
# Iterate on first four columns.
for x2 in range(0,4):
# Iterate on first four rows
for x3 in range(0,4):
if marker == spaces[x2+x3*7+0] == spaces[x2+x3*7+8] == spaces[x2+x3*7+16] == spaces[x2+x3*7+24]:
return [True, x2+x3*7+0, x2+x3*7+8, x2+x3*7+16, x2+x3*7+24]
#return True, first found winning diagonal found. Searches 0-3, then moves over 1 column.
return (False, 0)
def descending_diagonal_win(marker):
# Check for a descending left-right diagonal. Left-right descending diagonals are 6 indeces apart.
# Iterate on first four columns (0-3)
for x2 in range(0,4):
# Iterate on highest four rows (3-6)
for x3 in range(3,7):
if marker == spaces[x2+x3*7-0] == spaces[x2+x3*7-6] == spaces[x2+x3*7-12] == spaces[x2+x3*7-18]:
return [True, x2+x3*7-0, x2+x3*7-6, x2+x3*7-12, x2+x3*7-18]
#return True
return (False, 0)
# Iterate over both markers (X and O).
for t in markers:
hor_win = horizontal_win(t)
ver_win = vertical_win(t)
asc_d_win = ascending_diagonal_win(t)
des_d_win = descending_diagonal_win(t)
if hor_win[0]==True or ver_win[0]==True or asc_d_win[0]==True or des_d_win[0]==True:
return [True, t, hor_win, ver_win, asc_d_win, des_d_win]
return False, 'NO_WINNER'
def extend_winning_combos(list_with_winning_combo_info, spaces):
winners_marker = list_with_winning_combo_info[1]
# If a horizontal win occurred, highlight all consecutive markers in that winning series.
if list_with_winning_combo_info[2][0] == True:
# Set the off-board limit to the last winning coordinate the horizontal series
last_winning_coor = list_with_winning_combo_info[2][-1]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# Determine the rightmost board position in the row of the winning horizontal combinations
while board_limit % 7 != 6:
board_limit += 1
# While we haven't moved to far right on the board, <= test, and the marker there matches,
# and this while loop is not executing the first time (inner if test), append the board index
# contained in last_winning_coor to the winning_combo to the tuple inside of list_with_winning_combo_info
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[2].append(last_winning_coor)
last_winning_coor += 1
else:
pass
# If a vertical win occurred
if list_with_winning_combo_info[3][0] == True:
last_winning_coor = list_with_winning_combo_info[3][-1]
board_limit = (last_winning_coor % 7) + 42 #Find column number with %7, add 42 to get index of highest board position in that column
initial_last_coor = last_winning_coor
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[3].append(last_winning_coor)
last_winning_coor += 7
# If an ascending diagonal win occurred
if list_with_winning_combo_info[4][0] == True:
last_winning_coor = list_with_winning_combo_info[4][-1]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# The ascending diagonal board limit is always 7th (index 6) column spot, or row 7 columns 4-7.
while (board_limit % 7 != 6 and board_limit < 45):
board_limit += 8 # Ascending diagonals are 8 indeces apart
while last_winning_coor <= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[4].append(last_winning_coor)
last_winning_coor += 8
else:
pass
# If a descending diagonal win occurred
if list_with_winning_combo_info[5][0] == True:
last_winning_coor = list_with_winning_combo_info[5][4]
board_limit = last_winning_coor
initial_last_coor = last_winning_coor
# The descending diagonal board limit calculation
while (board_limit - 6 > 0) and (board_limit % 7 != 6):
board_limit -= 6
while last_winning_coor >= board_limit and spaces[last_winning_coor]==winners_marker:
if initial_last_coor != last_winning_coor:
list_with_winning_combo_info[5].append(last_winning_coor)
last_winning_coor -= 6
else:
pass
return list_with_winning_combo_info
def make_winning_combos_green(list_with_winning_combo_info):
for x1 in range(2,6):
if list_with_winning_combo_info[x1][0] == True:
for x2 in range(1, len(list_with_winning_combo_info[x1])):
spaces[list_with_winning_combo_info[x1][x2]]='\u001b[1m\u001b[32m'+spaces[list_with_winning_combo_info[x1][x2]]+'\u001b[0m'
def place_move(spaces, markers, turn):
def valid_column(Move):
while True:
if (Move < '1') or ('7' < Move) or (len(Move) > 1):
Move = input('Error. Invalid move. Try again player {}? '.format(turn))
else:
Move = int(Move)
return Move
def column_full(spaces, move):
for x2 in range(0,7):
if spaces[(move-1)+7*x2] == ' ':
return (False, x2)
return (True, x2)
while True:
move = input('Where would you like to go player {}? '.format(turn))
move = valid_column(move)
move = int(move)
ColumnFullTuple = column_full(spaces, move)
if ColumnFullTuple[0] == True:
print('Column {} is full. I''ll ask you again.'.format(move))
else:
spaces[(move-1)+ColumnFullTuple[1]*7] = markers[turn-1]
break
def ConnectFour():
Markers = pick_markers()
global spaces
spaces = [' ']
print_board(spaces)
Winner = winning_combo(spaces, Markers)
Board_Full = board_full(spaces)
Turn = 1
while Winner[0] == False and Board_Full == False:
place_move(spaces, Markers, Turn)
print_board(spaces)
Winner = winning_combo(spaces, Markers)
Board_Full = board_full(spaces)
if Winner[0] == True:
# Replaces the first four detected winning coordinates with the ANSI codes to make them green.
list_with_winning_combos_info = extend_winning_combos(Winner, spaces)
make_winning_combos_green(list_with_winning_combos_info)
print_board(spaces)
print('Congratulations Player {}! YOU WIN!'.format(Turn))
break
elif Winner[0] == False and Board_Full == True:
print('Draw. No winner.')
break
else:
pass
if Turn == 1:
Turn = 2
else:
Turn = 1
# Start the game
ConnectFour()
while True:
play_again = input("Would you like to play again? (Y/N): ")
play_again = play_again.lower()
while True:
if play_again[0] != 'y' and play_again[0] != 'n':
play_again = input("I don't understand. Try again (Y/N): ")
play_again = play_again.lower()
else:
break
if play_again == 'y':
|
else:
print('Goodbye!')
break | ConnectFour() | conditional_block |
ctrl_server.py | #!/usr/bin/env python
"""Server that accepts and executes control-type commands on the bot."""
import sys
import os
from inspect import getmembers, ismethod
from simplejson.decoder import JSONDecodeError
import zmq
import signal
# This is required to make imports work
sys.path = [os.getcwd()] + sys.path
import bot.lib.lib as lib
import pub_server as pub_server_mod
import bot.lib.messages as msgs
from bot.driver.mec_driver import MecDriver
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return (ismethod(method) and hasattr(method, "__api_call"))
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file="bot/config.yaml"):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
# Register signal handler, shut down cleanly (think motors)
signal.signal(signal.SIGINT, self.signal_handler)
# Load configuration and logger
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
# Testing flag will cause objects to run on simulated hardware
if testing is True or testing == "True":
self.logger.info("CtrlServer running in test mode")
lib.set_testing(True)
elif testing is None:
self.logger.info(
"Defaulting to config testing flag: {}".format(
self.config["testing"]))
lib.set_testing(self.config["testing"])
else:
self.logger.info("CtrlServer running in non-test mode")
lib.set_testing(False)
# Build socket to listen for requests
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = "{protocol}://{host}:{port}".format(
protocol=self.config["server_protocol"],
host=self.config["server_bind_host"],
port=self.config["ctrl_server_port"])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error("ZMQ error. Is a server already running?")
self.logger.warning("May be connected to an old server instance.")
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info("Control server initialized")
# Don't spawn pub_server until told to
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info("Caught SIGINT (Ctrl+C), closing cleanly")
self.clean_up()
self.logger.info("Cleaned up bot, exiting...")
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems["ctrl"] = self
systems["driver"] = self.driver
self.logger.debug("Systems: {}".format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info("Control server: {}".format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = "Not a JSON message!"
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info("Exiting control server. Bye!")
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug("Received: {}".format(msg))
try:
msg_type = msg["type"]
except KeyError as e:
return msgs.error(e)
if msg_type == "ping_req":
reply = msgs.ping_reply()
elif msg_type == "list_req":
reply = self.list_callables()
elif msg_type == "call_req":
try:
obj_name = msg["obj_name"]
method = msg["method"]
params = msg["params"]
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == "exit_req": | reply = msgs.exit_reply()
# Need to actually send reply here as we're about to exit
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = "Unrecognized message: {}".format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug("List of callable API objects requested")
# Dict of subsystem object names to their callable methods.
callables = {}
for name, obj in self.systems.items():
methods = []
# Filter out methods which are not explicitly flagged for export
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug("API call: {}.{}({})".format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
# Calls given obj.method, unpacking and passing params dict
call_return = getattr(obj, method)(**params)
msg = "Called {}.{}".format(name, method)
self.logger.debug(msg + ",returned:{}".format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
# Raised when we have a mismatch of the method's kwargs
# TODO: Return argspec here?
err_msg = "Invalid params for {}.{}".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
# Catch exception raised by called method, notify client
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception("Exception test")
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
# Prevent pub_server thread from blocking the process from closing
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = "Spawned pub server"
self.logger.info(msg)
return msg
else:
err_msg = "PubServer is already running"
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems["driver"].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == "__main__":
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen() | self.logger.info("Received message to die. Bye!") | random_line_split |
ctrl_server.py | #!/usr/bin/env python
"""Server that accepts and executes control-type commands on the bot."""
import sys
import os
from inspect import getmembers, ismethod
from simplejson.decoder import JSONDecodeError
import zmq
import signal
# This is required to make imports work
sys.path = [os.getcwd()] + sys.path
import bot.lib.lib as lib
import pub_server as pub_server_mod
import bot.lib.messages as msgs
from bot.driver.mec_driver import MecDriver
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return (ismethod(method) and hasattr(method, "__api_call"))
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file="bot/config.yaml"):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
# Register signal handler, shut down cleanly (think motors)
signal.signal(signal.SIGINT, self.signal_handler)
# Load configuration and logger
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
# Testing flag will cause objects to run on simulated hardware
if testing is True or testing == "True":
self.logger.info("CtrlServer running in test mode")
lib.set_testing(True)
elif testing is None:
self.logger.info(
"Defaulting to config testing flag: {}".format(
self.config["testing"]))
lib.set_testing(self.config["testing"])
else:
self.logger.info("CtrlServer running in non-test mode")
lib.set_testing(False)
# Build socket to listen for requests
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = "{protocol}://{host}:{port}".format(
protocol=self.config["server_protocol"],
host=self.config["server_bind_host"],
port=self.config["ctrl_server_port"])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error("ZMQ error. Is a server already running?")
self.logger.warning("May be connected to an old server instance.")
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info("Control server initialized")
# Don't spawn pub_server until told to
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info("Caught SIGINT (Ctrl+C), closing cleanly")
self.clean_up()
self.logger.info("Cleaned up bot, exiting...")
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems["ctrl"] = self
systems["driver"] = self.driver
self.logger.debug("Systems: {}".format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info("Control server: {}".format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = "Not a JSON message!"
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info("Exiting control server. Bye!")
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug("Received: {}".format(msg))
try:
msg_type = msg["type"]
except KeyError as e:
return msgs.error(e)
if msg_type == "ping_req":
reply = msgs.ping_reply()
elif msg_type == "list_req":
reply = self.list_callables()
elif msg_type == "call_req":
try:
obj_name = msg["obj_name"]
method = msg["method"]
params = msg["params"]
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == "exit_req":
self.logger.info("Received message to die. Bye!")
reply = msgs.exit_reply()
# Need to actually send reply here as we're about to exit
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = "Unrecognized message: {}".format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug("List of callable API objects requested")
# Dict of subsystem object names to their callable methods.
callables = {}
for name, obj in self.systems.items():
methods = []
# Filter out methods which are not explicitly flagged for export
for member in getmembers(obj):
if is_api_method(obj, member[0]):
|
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug("API call: {}.{}({})".format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
# Calls given obj.method, unpacking and passing params dict
call_return = getattr(obj, method)(**params)
msg = "Called {}.{}".format(name, method)
self.logger.debug(msg + ",returned:{}".format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
# Raised when we have a mismatch of the method's kwargs
# TODO: Return argspec here?
err_msg = "Invalid params for {}.{}".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
# Catch exception raised by called method, notify client
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception("Exception test")
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
# Prevent pub_server thread from blocking the process from closing
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = "Spawned pub server"
self.logger.info(msg)
return msg
else:
err_msg = "PubServer is already running"
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems["driver"].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == "__main__":
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen()
| methods.append(member[0]) | conditional_block |
ctrl_server.py | #!/usr/bin/env python
"""Server that accepts and executes control-type commands on the bot."""
import sys
import os
from inspect import getmembers, ismethod
from simplejson.decoder import JSONDecodeError
import zmq
import signal
# This is required to make imports work
sys.path = [os.getcwd()] + sys.path
import bot.lib.lib as lib
import pub_server as pub_server_mod
import bot.lib.messages as msgs
from bot.driver.mec_driver import MecDriver
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return (ismethod(method) and hasattr(method, "__api_call"))
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file="bot/config.yaml"):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
# Register signal handler, shut down cleanly (think motors)
signal.signal(signal.SIGINT, self.signal_handler)
# Load configuration and logger
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
# Testing flag will cause objects to run on simulated hardware
if testing is True or testing == "True":
self.logger.info("CtrlServer running in test mode")
lib.set_testing(True)
elif testing is None:
self.logger.info(
"Defaulting to config testing flag: {}".format(
self.config["testing"]))
lib.set_testing(self.config["testing"])
else:
self.logger.info("CtrlServer running in non-test mode")
lib.set_testing(False)
# Build socket to listen for requests
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = "{protocol}://{host}:{port}".format(
protocol=self.config["server_protocol"],
host=self.config["server_bind_host"],
port=self.config["ctrl_server_port"])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error("ZMQ error. Is a server already running?")
self.logger.warning("May be connected to an old server instance.")
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info("Control server initialized")
# Don't spawn pub_server until told to
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info("Caught SIGINT (Ctrl+C), closing cleanly")
self.clean_up()
self.logger.info("Cleaned up bot, exiting...")
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems["ctrl"] = self
systems["driver"] = self.driver
self.logger.debug("Systems: {}".format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info("Control server: {}".format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = "Not a JSON message!"
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info("Exiting control server. Bye!")
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
"""Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug("Received: {}".format(msg))
try:
msg_type = msg["type"]
except KeyError as e:
return msgs.error(e)
if msg_type == "ping_req":
reply = msgs.ping_reply()
elif msg_type == "list_req":
reply = self.list_callables()
elif msg_type == "call_req":
try:
obj_name = msg["obj_name"]
method = msg["method"]
params = msg["params"]
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == "exit_req":
self.logger.info("Received message to die. Bye!")
reply = msgs.exit_reply()
# Need to actually send reply here as we're about to exit
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = "Unrecognized message: {}".format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug("List of callable API objects requested")
# Dict of subsystem object names to their callable methods.
callables = {}
for name, obj in self.systems.items():
methods = []
# Filter out methods which are not explicitly flagged for export
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug("API call: {}.{}({})".format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
# Calls given obj.method, unpacking and passing params dict
call_return = getattr(obj, method)(**params)
msg = "Called {}.{}".format(name, method)
self.logger.debug(msg + ",returned:{}".format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
# Raised when we have a mismatch of the method's kwargs
# TODO: Return argspec here?
err_msg = "Invalid params for {}.{}".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
# Catch exception raised by called method, notify client
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def | (self):
"""Raise a test exception which will be returned to the caller."""
raise Exception("Exception test")
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
# Prevent pub_server thread from blocking the process from closing
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = "Spawned pub server"
self.logger.info(msg)
return msg
else:
err_msg = "PubServer is already running"
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems["driver"].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == "__main__":
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen()
| exception | identifier_name |
ctrl_server.py | #!/usr/bin/env python
"""Server that accepts and executes control-type commands on the bot."""
import sys
import os
from inspect import getmembers, ismethod
from simplejson.decoder import JSONDecodeError
import zmq
import signal
# This is required to make imports work
sys.path = [os.getcwd()] + sys.path
import bot.lib.lib as lib
import pub_server as pub_server_mod
import bot.lib.messages as msgs
from bot.driver.mec_driver import MecDriver
def is_api_method(obj, name):
"""Tests whether named method exists in obj and is flagged for API export.
:param obj: API-exported object to search for the given method on.
:type ojb: string
:param name: Name of method to check for.
:type name: string
:returns: True if given method is on given obj and is exported, else False.
"""
try:
method = getattr(obj, name)
except AttributeError:
return False
return (ismethod(method) and hasattr(method, "__api_call"))
class CtrlServer(object):
"""Exports bot control via ZMQ.
Most functionally exported by CtrlServer is in the form of methods
exposed by the API. @lib.api_call decorators can be added to bot
systems, which tags them for export. They can then be called
remotely via CtrlClient, which is typically owned by an interface
like the CLI, which typically accepts commands from an agent like
a human.
Some control is exported directly by CtrlServer, not through the
API. For example, CtrlServer responds directly to ping messages,
list messages (which give the objects/methods exposed by the API),
and exit messages.
CtrlServer is the primary owner of bot resources, which we call
systems. For example, it's CtrlServer that instantiates gunner
and follower. Through those two, CtrlServer owns the gun, the
IR hub, the turret and basically every other bot system.
The messages that CtrlServer accepts and responds with are fully
specified in lib.messages. Make any changes to messages there.
CtrlServer can be instructed (via the API) to spawn a new thread
for a PubServer. When that happens, CtrlServer passes its systems
to PubServer, which can read their state and publish it over a
ZMQ PUB socket.
"""
def __init__(self, testing=None, config_file="bot/config.yaml"):
"""Build ZMQ REP socket and instantiate bot systems.
:param testing: True if running on simulated HW, False if on bot.
:type testing: boolean
:param config_file: Name of file to read configuration from.
:type config_file: string
"""
# Register signal handler, shut down cleanly (think motors)
signal.signal(signal.SIGINT, self.signal_handler)
# Load configuration and logger
self.config = lib.get_config(config_file)
self.logger = lib.get_logger()
# Testing flag will cause objects to run on simulated hardware
if testing is True or testing == "True":
self.logger.info("CtrlServer running in test mode")
lib.set_testing(True)
elif testing is None:
self.logger.info(
"Defaulting to config testing flag: {}".format(
self.config["testing"]))
lib.set_testing(self.config["testing"])
else:
self.logger.info("CtrlServer running in non-test mode")
lib.set_testing(False)
# Build socket to listen for requests
self.context = zmq.Context()
self.ctrl_sock = self.context.socket(zmq.REP)
self.server_bind_addr = "{protocol}://{host}:{port}".format(
protocol=self.config["server_protocol"],
host=self.config["server_bind_host"],
port=self.config["ctrl_server_port"])
try:
self.ctrl_sock.bind(self.server_bind_addr)
except zmq.ZMQError:
self.logger.error("ZMQ error. Is a server already running?")
self.logger.warning("May be connected to an old server instance.")
sys.exit(1)
self.systems = self.assign_subsystems()
self.logger.info("Control server initialized")
# Don't spawn pub_server until told to
self.pub_server = None
def signal_handler(self, signal, frame):
self.logger.info("Caught SIGINT (Ctrl+C), closing cleanly")
self.clean_up()
self.logger.info("Cleaned up bot, exiting...")
sys.exit(0)
def assign_subsystems(self):
"""Instantiates and stores references to bot subsystems.
:returns: Dict of subsystems, maps system name to instantiated object.
"""
self.driver = MecDriver()
systems = {}
systems["ctrl"] = self
systems["driver"] = self.driver
self.logger.debug("Systems: {}".format(systems))
return systems
def listen(self):
"""Perpetually listen for messages, pass them to generic handler."""
self.logger.info("Control server: {}".format(self.server_bind_addr))
while True:
try:
msg = self.ctrl_sock.recv_json()
reply = self.handle_msg(msg)
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
except JSONDecodeError:
err_msg = "Not a JSON message!"
self.logger.warning(err_msg)
self.ctrl_sock.send_json(msgs.error(err_msg))
except KeyboardInterrupt:
self.logger.info("Exiting control server. Bye!")
self.clean_up()
sys.exit(0)
def handle_msg(self, msg):
|
def list_callables(self):
"""Build list of callable methods on each exported subsystem object.
Uses introspection to create a list of callable methods for each
registered subsystem object. Only methods which are flagged using the
@lib.api_call decorator will be included.
:returns: list_reply message with callable objects and their methods.
"""
self.logger.debug("List of callable API objects requested")
# Dict of subsystem object names to their callable methods.
callables = {}
for name, obj in self.systems.items():
methods = []
# Filter out methods which are not explicitly flagged for export
for member in getmembers(obj):
if is_api_method(obj, member[0]):
methods.append(member[0])
callables[name] = methods
return msgs.list_reply(callables)
def call_method(self, name, method, params):
"""Call a previously registered subsystem method by name. Only
methods tagged with the @api_call decorator can be called.
:param name: Assigned name of the registered subsystem.
:type name: string
:param method: Subsystem method to be called.
:type method: string
:param params: Additional parameters for the called method.
:type params: dict
:returns: call_reply or error message dict to be sent to caller.
"""
self.logger.debug("API call: {}.{}({})".format(name, method, params))
if name in self.systems:
obj = self.systems[name]
if is_api_method(obj, method):
try:
# Calls given obj.method, unpacking and passing params dict
call_return = getattr(obj, method)(**params)
msg = "Called {}.{}".format(name, method)
self.logger.debug(msg + ",returned:{}".format(call_return))
return msgs.call_reply(msg, call_return)
except TypeError:
# Raised when we have a mismatch of the method's kwargs
# TODO: Return argspec here?
err_msg = "Invalid params for {}.{}".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
except Exception as e:
# Catch exception raised by called method, notify client
err_msg = "Exception: '{}'".format(str(e))
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid method: '{}.{}'".format(name, method)
self.logger.warning(err_msg)
return msgs.error(err_msg)
else:
err_msg = "Invalid object: '{}'".format(name)
self.logger.warning(err_msg)
return msgs.error(err_msg)
@lib.api_call
def echo(self, msg=None):
"""Echo a message back to the caller.
:param msg: Message to be echoed back to caller, default is None.
:returns: Message given by param, defaults to None.
"""
return msg
@lib.api_call
def exception(self):
"""Raise a test exception which will be returned to the caller."""
raise Exception("Exception test")
@lib.api_call
def spawn_pub_server(self):
"""Spawn publisher thread."""
if self.pub_server is None:
self.pub_server = pub_server_mod.PubServer(self.systems)
# Prevent pub_server thread from blocking the process from closing
self.pub_server.setDaemon(True)
self.pub_server.start()
msg = "Spawned pub server"
self.logger.info(msg)
return msg
else:
err_msg = "PubServer is already running"
self.logger.warning(err_msg)
return err_msg
@lib.api_call
def stop_full(self):
"""Stop all drive and gun motors, set turret to safe state."""
self.systems["driver"].move(0, 0)
def clean_up(self):
"""Tear down ZMQ socket."""
self.stop_full()
self.ctrl_sock.close()
self.context.term()
if __name__ == "__main__":
if len(sys.argv) == 2:
server = CtrlServer(sys.argv[1])
else:
server = CtrlServer()
server.listen()
| """Generic message handler. Hands-off based on type of message.
:param msg: Message, received via ZMQ from client, to handle.
:type msg: dict
:returns: An appropriate message reply dict, from lib.messages.
"""
self.logger.debug("Received: {}".format(msg))
try:
msg_type = msg["type"]
except KeyError as e:
return msgs.error(e)
if msg_type == "ping_req":
reply = msgs.ping_reply()
elif msg_type == "list_req":
reply = self.list_callables()
elif msg_type == "call_req":
try:
obj_name = msg["obj_name"]
method = msg["method"]
params = msg["params"]
reply = self.call_method(obj_name, method, params)
except KeyError as e:
return msgs.error(e)
elif msg_type == "exit_req":
self.logger.info("Received message to die. Bye!")
reply = msgs.exit_reply()
# Need to actually send reply here as we're about to exit
self.logger.debug("Sending: {}".format(reply))
self.ctrl_sock.send_json(reply)
self.clean_up()
sys.exit(0)
else:
err_msg = "Unrecognized message: {}".format(msg)
self.logger.warning(err_msg)
reply = msgs.error(err_msg)
return reply | identifier_body |
synchronizer.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use super::committee::*;
use super::error::*;
use super::messages::*;
use super::types::*;
use bincode::Error;
use crypto::Digest;
use futures::future::{Fuse, FutureExt};
use futures::select;
use futures::stream::futures_unordered::FuturesUnordered;
use futures::stream::StreamExt;
use futures::{future::FusedFuture, pin_mut};
use log::*;
use rand::seq::SliceRandom;
use std::cmp::max;
use std::cmp::min;
use std::collections::HashSet;
use store::Store;
use tokio::sync::mpsc;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::time::{sleep, Duration};
type DBValue = Vec<u8>;
/// A service that keeps watching for the dependencies of blocks.
pub async fn header_waiter_process(
mut store: Store, // A copy of the inner store
genesis: Vec<Certificate>, // The genesis set of certs
mut rx: mpsc::UnboundedReceiver<(SignedBlockHeader, BlockHeader)>, // A channel to receive Headers
loopback_commands: Sender<PrimaryMessage>,
) {
// Register the genesis certs ...
for cert in &genesis {
// let bytes: Vec<u8> = bincode::serialize(&cert)?;
let digest_in_store = PartialCertificate::make_digest(&cert.digest, 0, cert.primary_id);
let _ = store
.write(digest_in_store.0.to_vec(), digest_in_store.0.to_vec()) //digest_in_store.0.to_vec() --> bytes ?
.await;
}
// Create an unordered set of futures
let mut waiting_headers = FuturesUnordered::new();
// Now process headers and also headers with satisfied dependencies
loop {
select! {
signer_header_result = waiting_headers.select_next_some() => {
// Once we have all header dependencies, we try to re-inject this signed header
if let Ok(signed_header) = signer_header_result {
if let Err(e) = loopback_commands.send(PrimaryMessage::Header(signed_header)).await {
error!("Error sending loopback command: {}", e);
}
}
else {
error!("Error in waiter.");
}
}
msg = rx.recv().fuse() => {
if let Some((signed_header, header)) = msg {
let i2_store = store.clone();
let fut = header_waiter(i2_store, signed_header, header);
waiting_headers.push(fut);
}
else {
// Channel is closed, so we exit.
// Our primary is gone!
warn!("Exiting digest waiter process.");
break;
}
}
}
}
}
/// The future that waits for a set of headers and digest associated with a header.
pub async fn header_waiter(
mut store: Store, // A copy of the store
signed_header: SignedBlockHeader, // The signed header structure
header: BlockHeader, // The header for which we wait for dependencies.
) -> Result<SignedBlockHeader, DagError> {
debug!(
"[DEP] ASK H {:?} D {}",
(header.round, header.author),
header.transactions_digest.len()
);
//Note: we now store different digests for header and certificates to avoid false positive.
for (other_primary_id, digest) in &header.parents {
let digest_in_store =
PartialCertificate::make_digest(&digest, header.round - 1, *other_primary_id);
if store.notify_read(digest_in_store.0.to_vec()).await.is_err() {
return Err(DagError::StorageFailure {
error: "Error in reading store from 'header_waiter'.".to_string(),
});
}
}
for (digest, _) in &header.transactions_digest {
if store.notify_read(digest.0.to_vec()).await.is_err() {
return Err(DagError::StorageFailure {
error: "Error in reading store from 'header_waiter'.".to_string(),
});
}
}
debug!(
"[DEP] GOT H {:?} D {}",
(header.round, header.author),
header.transactions_digest.len()
);
Ok(signed_header)
}
pub async fn dag_synchronizer_process(
mut get_from_dag: Receiver<SyncMessage>,
dag_synchronizer: DagSynchronizer,
) {
let mut digests_to_sync: Vec<Digest> = Vec::new();
let mut round_to_sync: RoundNumber = 0;
let mut last_synced_round: RoundNumber = 0;
let mut rollback_stop_round: RoundNumber = 1;
let mut sq: SyncNumber = 0;
let rollback_fut = Fuse::terminated();
pin_mut!(rollback_fut);
loop {
select! {
msg = get_from_dag.recv().fuse() => {
if let Some(SyncMessage::SyncUpToRound(round, digests, last_gc_round)) = msg {
if round > round_to_sync {
debug!("DAG sync: received request to sync digests: {:?} up to round {}", digests, round);
round_to_sync = round;
digests_to_sync = digests;
rollback_stop_round = max(last_gc_round+1, 1);
if rollback_fut.is_terminated(){
last_synced_round = round_to_sync;
rollback_fut.set(rollback_headers(dag_synchronizer.clone(), digests_to_sync.clone(), round_to_sync, rollback_stop_round, sq).fuse());
debug!("DAG sync: go.");
}
else {
debug!("DAG sync: drop.");
}
}
} else{ | warn!("Exiting DagSynchronizer::start().");
break;
}
}
res = rollback_fut => {
if let Err(e) = res{
error!("rollback_headers returns error: {:?}", e);
}
else{
sq += 1;
if round_to_sync > last_synced_round{
last_synced_round = round_to_sync;
// rollback_stop_round = max(last_synced_round, 1);
rollback_fut.set(rollback_headers(dag_synchronizer.clone(), digests_to_sync.clone(), round_to_sync, rollback_stop_round, sq).fuse());
}
}
}
}
}
}
pub async fn handle_header_digest(
mut dag_synchronizer: DagSynchronizer,
digest: Digest,
rollback_stop_round: RoundNumber,
sq: SyncNumber,
) -> Result<Option<Vec<Digest>>, DagError> {
//TODO: issue: should we try the processor first? we need concurrent access to it...
if let Ok(dbvalue) = dag_synchronizer.store.read(digest.to_vec()).await {
match dbvalue {
None => {
debug!("invoking send_sync_header_requests: {:?}", digest);
dag_synchronizer
.send_sync_header_requests(digest.clone(), sq)
.await?;
// Exponential backoff delay
let mut delay = 50;
loop {
select! {
ret = dag_synchronizer.store.notify_read(digest.to_vec()).fuse() =>{
if let Ok(record_header) = ret {
return Ok(dag_synchronizer.handle_record_header(record_header, rollback_stop_round).await?);
} else {
//handle the error.
error!("Read returned an error: {:?}", ret);
}
}
_ = sleep(Duration::from_millis(delay)).fuse() => {
debug!("Trigger Sync on {:?}", digest);
dag_synchronizer.send_sync_header_requests(digest.clone(), sq).await?;
delay *= 4;
}
}
}
}
// HERE
Some(record_header) => {
let result: Result<HeaderPrimaryRecord, Error> =
bincode::deserialize(&record_header);
if let Err(e) = result {
panic!("Reading digest {:?} from store gives us a struct that we cannot deserialize: {}", digest, e);
}
return Ok(dag_synchronizer
.handle_record_header(record_header, rollback_stop_round)
.await?);
}
}
} else {
//handle the error.
}
Ok(None)
}
//sync all digests' causal history and pass to consensus
pub async fn rollback_headers(
mut dag_synchronizer: DagSynchronizer,
digests: Vec<Digest>,
round: RoundNumber,
rollback_stop_round: RoundNumber,
sq: SyncNumber,
) -> Result<(), DagError> {
let mut asked_for: HashSet<Digest> = HashSet::new();
let mut digests_in_process = FuturesUnordered::new();
for digest in digests {
let fut = handle_header_digest(dag_synchronizer.clone(), digest, rollback_stop_round, sq);
digests_in_process.push(fut);
}
while !digests_in_process.is_empty() {
// let option = digests_in_process.select_next_some().await?;
let option = match digests_in_process.select_next_some().await {
Ok(option) => option,
Err(e) => panic!("Panix {}", e),
};
if let Some(parents_digests) = option {
for digest in parents_digests {
// Only ask for each digest once per rollback.
if asked_for.contains(&digest) {
continue;
}
asked_for.insert(digest.clone());
if !dag_synchronizer.pending_digests.contains(&digest) {
debug!("Seems so {}", digest);
dag_synchronizer.pending_digests.insert(digest.clone());
let fut = handle_header_digest(
dag_synchronizer.clone(),
digest,
rollback_stop_round,
sq,
);
digests_in_process.push(fut);
}
}
}
}
let msg = ConsensusMessage::SyncDone(round);
dag_synchronizer.send_to_consensus(msg).await?;
info!("DAG sync: sent to consensus SyncDone for round {}", round);
Ok(())
}
#[derive(Clone)]
pub struct DagSynchronizer {
pub id: NodeID,
pub send_to_consensus_channel: Sender<ConsensusMessage>,
pub send_to_network: Sender<(RoundNumber, PrimaryMessage)>,
pub store: Store,
pub committee: Committee,
pub pending_digests: HashSet<Digest>,
}
impl DagSynchronizer {
pub fn new(
id: NodeID,
committee: Committee,
store: Store,
send_to_consensus_channel: Sender<ConsensusMessage>,
send_to_network: Sender<(RoundNumber, PrimaryMessage)>,
) -> Self {
DagSynchronizer {
id,
send_to_consensus_channel,
send_to_network,
store,
committee,
pending_digests: HashSet::new(),
}
}
pub async fn send_sync_header_requests(
&mut self,
digest: Digest,
sq: SyncNumber,
) -> Result<(), DagError> {
//Pick random 3 validators. Hopefully one will have the header. We can implement different strategies.
let authorities = self.committee.authorities.choose_multiple(
&mut rand::thread_rng(),
min(self.committee.quorum_threshold() / 2, 3),
); // self.committee.quorum_threshold());
debug!("asking for header with digest: {:?}", digest);
for source in authorities {
if source.primary.name == self.id {
continue;
}
let msg =
PrimaryMessage::SyncHeaderRequest(digest.clone(), self.id, source.primary.name);
// info!("Sync Request (Conse): {:?} from {:?}", digest.clone(), source.primary.name);
if let Err(e) = self.send_to_network.send((sq, msg)).await {
panic!("error: {}", e);
}
}
Ok(())
}
pub async fn handle_record_header(
&mut self,
record_header: DBValue,
rollback_stop_round: RoundNumber,
) -> Result<Option<Vec<Digest>>, DagError> {
// let mut record_header: HeaderPrimaryRecord = bincode::deserialize(&record_header[..])?;
let mut record_header: HeaderPrimaryRecord = bincode::deserialize(&record_header)
.expect("Deserialization of primary record failure");
if self.pending_digests.contains(&record_header.digest) {
self.pending_digests.remove(&record_header.digest);
}
if !record_header.passed_to_consensus {
let msg = ConsensusMessage::Header(
record_header.header.clone(),
record_header.digest.clone(),
);
self.send_to_consensus(msg)
.await
.expect("Fail to send to consensus channel");
record_header.passed_to_consensus = true;
self.store
.write(record_header.digest.0.to_vec(), record_header.to_bytes())
.await;
return if record_header.header.round <= rollback_stop_round
|| record_header.header.round <= 1
{
//no need need to sync parents because we gc them and consensus does not going to order them.
Ok(None)
} else {
let digests: Vec<Digest> = record_header
.header
.parents
.iter()
.clone()
.map(|(_, digest)| digest.clone())
.collect();
Ok(Some(digests))
};
}
Ok(None)
}
pub async fn send_to_consensus(&mut self, msg: ConsensusMessage) -> Result<(), DagError> {
self.send_to_consensus_channel
.send(msg)
.await
.map_err(|e| DagError::ChannelError {
error: format!("{}", e),
})?;
Ok(())
}
} | random_line_split |
|
synchronizer.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use super::committee::*;
use super::error::*;
use super::messages::*;
use super::types::*;
use bincode::Error;
use crypto::Digest;
use futures::future::{Fuse, FutureExt};
use futures::select;
use futures::stream::futures_unordered::FuturesUnordered;
use futures::stream::StreamExt;
use futures::{future::FusedFuture, pin_mut};
use log::*;
use rand::seq::SliceRandom;
use std::cmp::max;
use std::cmp::min;
use std::collections::HashSet;
use store::Store;
use tokio::sync::mpsc;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::time::{sleep, Duration};
type DBValue = Vec<u8>;
/// A service that keeps watching for the dependencies of blocks.
pub async fn header_waiter_process(
mut store: Store, // A copy of the inner store
genesis: Vec<Certificate>, // The genesis set of certs
mut rx: mpsc::UnboundedReceiver<(SignedBlockHeader, BlockHeader)>, // A channel to receive Headers
loopback_commands: Sender<PrimaryMessage>,
) {
// Register the genesis certs ...
for cert in &genesis {
// let bytes: Vec<u8> = bincode::serialize(&cert)?;
let digest_in_store = PartialCertificate::make_digest(&cert.digest, 0, cert.primary_id);
let _ = store
.write(digest_in_store.0.to_vec(), digest_in_store.0.to_vec()) //digest_in_store.0.to_vec() --> bytes ?
.await;
}
// Create an unordered set of futures
let mut waiting_headers = FuturesUnordered::new();
// Now process headers and also headers with satisfied dependencies
loop {
select! {
signer_header_result = waiting_headers.select_next_some() => {
// Once we have all header dependencies, we try to re-inject this signed header
if let Ok(signed_header) = signer_header_result {
if let Err(e) = loopback_commands.send(PrimaryMessage::Header(signed_header)).await {
error!("Error sending loopback command: {}", e);
}
}
else {
error!("Error in waiter.");
}
}
msg = rx.recv().fuse() => {
if let Some((signed_header, header)) = msg {
let i2_store = store.clone();
let fut = header_waiter(i2_store, signed_header, header);
waiting_headers.push(fut);
}
else {
// Channel is closed, so we exit.
// Our primary is gone!
warn!("Exiting digest waiter process.");
break;
}
}
}
}
}
/// The future that waits for a set of headers and digest associated with a header.
pub async fn header_waiter(
mut store: Store, // A copy of the store
signed_header: SignedBlockHeader, // The signed header structure
header: BlockHeader, // The header for which we wait for dependencies.
) -> Result<SignedBlockHeader, DagError> {
debug!(
"[DEP] ASK H {:?} D {}",
(header.round, header.author),
header.transactions_digest.len()
);
//Note: we now store different digests for header and certificates to avoid false positive.
for (other_primary_id, digest) in &header.parents {
let digest_in_store =
PartialCertificate::make_digest(&digest, header.round - 1, *other_primary_id);
if store.notify_read(digest_in_store.0.to_vec()).await.is_err() {
return Err(DagError::StorageFailure {
error: "Error in reading store from 'header_waiter'.".to_string(),
});
}
}
for (digest, _) in &header.transactions_digest {
if store.notify_read(digest.0.to_vec()).await.is_err() {
return Err(DagError::StorageFailure {
error: "Error in reading store from 'header_waiter'.".to_string(),
});
}
}
debug!(
"[DEP] GOT H {:?} D {}",
(header.round, header.author),
header.transactions_digest.len()
);
Ok(signed_header)
}
pub async fn dag_synchronizer_process(
mut get_from_dag: Receiver<SyncMessage>,
dag_synchronizer: DagSynchronizer,
) {
let mut digests_to_sync: Vec<Digest> = Vec::new();
let mut round_to_sync: RoundNumber = 0;
let mut last_synced_round: RoundNumber = 0;
let mut rollback_stop_round: RoundNumber = 1;
let mut sq: SyncNumber = 0;
let rollback_fut = Fuse::terminated();
pin_mut!(rollback_fut);
loop {
select! {
msg = get_from_dag.recv().fuse() => {
if let Some(SyncMessage::SyncUpToRound(round, digests, last_gc_round)) = msg {
if round > round_to_sync {
debug!("DAG sync: received request to sync digests: {:?} up to round {}", digests, round);
round_to_sync = round;
digests_to_sync = digests;
rollback_stop_round = max(last_gc_round+1, 1);
if rollback_fut.is_terminated(){
last_synced_round = round_to_sync;
rollback_fut.set(rollback_headers(dag_synchronizer.clone(), digests_to_sync.clone(), round_to_sync, rollback_stop_round, sq).fuse());
debug!("DAG sync: go.");
}
else {
debug!("DAG sync: drop.");
}
}
} else{
warn!("Exiting DagSynchronizer::start().");
break;
}
}
res = rollback_fut => {
if let Err(e) = res{
error!("rollback_headers returns error: {:?}", e);
}
else{
sq += 1;
if round_to_sync > last_synced_round{
last_synced_round = round_to_sync;
// rollback_stop_round = max(last_synced_round, 1);
rollback_fut.set(rollback_headers(dag_synchronizer.clone(), digests_to_sync.clone(), round_to_sync, rollback_stop_round, sq).fuse());
}
}
}
}
}
}
pub async fn | (
mut dag_synchronizer: DagSynchronizer,
digest: Digest,
rollback_stop_round: RoundNumber,
sq: SyncNumber,
) -> Result<Option<Vec<Digest>>, DagError> {
//TODO: issue: should we try the processor first? we need concurrent access to it...
if let Ok(dbvalue) = dag_synchronizer.store.read(digest.to_vec()).await {
match dbvalue {
None => {
debug!("invoking send_sync_header_requests: {:?}", digest);
dag_synchronizer
.send_sync_header_requests(digest.clone(), sq)
.await?;
// Exponential backoff delay
let mut delay = 50;
loop {
select! {
ret = dag_synchronizer.store.notify_read(digest.to_vec()).fuse() =>{
if let Ok(record_header) = ret {
return Ok(dag_synchronizer.handle_record_header(record_header, rollback_stop_round).await?);
} else {
//handle the error.
error!("Read returned an error: {:?}", ret);
}
}
_ = sleep(Duration::from_millis(delay)).fuse() => {
debug!("Trigger Sync on {:?}", digest);
dag_synchronizer.send_sync_header_requests(digest.clone(), sq).await?;
delay *= 4;
}
}
}
}
// HERE
Some(record_header) => {
let result: Result<HeaderPrimaryRecord, Error> =
bincode::deserialize(&record_header);
if let Err(e) = result {
panic!("Reading digest {:?} from store gives us a struct that we cannot deserialize: {}", digest, e);
}
return Ok(dag_synchronizer
.handle_record_header(record_header, rollback_stop_round)
.await?);
}
}
} else {
//handle the error.
}
Ok(None)
}
//sync all digests' causal history and pass to consensus
pub async fn rollback_headers(
mut dag_synchronizer: DagSynchronizer,
digests: Vec<Digest>,
round: RoundNumber,
rollback_stop_round: RoundNumber,
sq: SyncNumber,
) -> Result<(), DagError> {
let mut asked_for: HashSet<Digest> = HashSet::new();
let mut digests_in_process = FuturesUnordered::new();
for digest in digests {
let fut = handle_header_digest(dag_synchronizer.clone(), digest, rollback_stop_round, sq);
digests_in_process.push(fut);
}
while !digests_in_process.is_empty() {
// let option = digests_in_process.select_next_some().await?;
let option = match digests_in_process.select_next_some().await {
Ok(option) => option,
Err(e) => panic!("Panix {}", e),
};
if let Some(parents_digests) = option {
for digest in parents_digests {
// Only ask for each digest once per rollback.
if asked_for.contains(&digest) {
continue;
}
asked_for.insert(digest.clone());
if !dag_synchronizer.pending_digests.contains(&digest) {
debug!("Seems so {}", digest);
dag_synchronizer.pending_digests.insert(digest.clone());
let fut = handle_header_digest(
dag_synchronizer.clone(),
digest,
rollback_stop_round,
sq,
);
digests_in_process.push(fut);
}
}
}
}
let msg = ConsensusMessage::SyncDone(round);
dag_synchronizer.send_to_consensus(msg).await?;
info!("DAG sync: sent to consensus SyncDone for round {}", round);
Ok(())
}
#[derive(Clone)]
pub struct DagSynchronizer {
pub id: NodeID,
pub send_to_consensus_channel: Sender<ConsensusMessage>,
pub send_to_network: Sender<(RoundNumber, PrimaryMessage)>,
pub store: Store,
pub committee: Committee,
pub pending_digests: HashSet<Digest>,
}
impl DagSynchronizer {
pub fn new(
id: NodeID,
committee: Committee,
store: Store,
send_to_consensus_channel: Sender<ConsensusMessage>,
send_to_network: Sender<(RoundNumber, PrimaryMessage)>,
) -> Self {
DagSynchronizer {
id,
send_to_consensus_channel,
send_to_network,
store,
committee,
pending_digests: HashSet::new(),
}
}
pub async fn send_sync_header_requests(
&mut self,
digest: Digest,
sq: SyncNumber,
) -> Result<(), DagError> {
//Pick random 3 validators. Hopefully one will have the header. We can implement different strategies.
let authorities = self.committee.authorities.choose_multiple(
&mut rand::thread_rng(),
min(self.committee.quorum_threshold() / 2, 3),
); // self.committee.quorum_threshold());
debug!("asking for header with digest: {:?}", digest);
for source in authorities {
if source.primary.name == self.id {
continue;
}
let msg =
PrimaryMessage::SyncHeaderRequest(digest.clone(), self.id, source.primary.name);
// info!("Sync Request (Conse): {:?} from {:?}", digest.clone(), source.primary.name);
if let Err(e) = self.send_to_network.send((sq, msg)).await {
panic!("error: {}", e);
}
}
Ok(())
}
pub async fn handle_record_header(
&mut self,
record_header: DBValue,
rollback_stop_round: RoundNumber,
) -> Result<Option<Vec<Digest>>, DagError> {
// let mut record_header: HeaderPrimaryRecord = bincode::deserialize(&record_header[..])?;
let mut record_header: HeaderPrimaryRecord = bincode::deserialize(&record_header)
.expect("Deserialization of primary record failure");
if self.pending_digests.contains(&record_header.digest) {
self.pending_digests.remove(&record_header.digest);
}
if !record_header.passed_to_consensus {
let msg = ConsensusMessage::Header(
record_header.header.clone(),
record_header.digest.clone(),
);
self.send_to_consensus(msg)
.await
.expect("Fail to send to consensus channel");
record_header.passed_to_consensus = true;
self.store
.write(record_header.digest.0.to_vec(), record_header.to_bytes())
.await;
return if record_header.header.round <= rollback_stop_round
|| record_header.header.round <= 1
{
//no need need to sync parents because we gc them and consensus does not going to order them.
Ok(None)
} else {
let digests: Vec<Digest> = record_header
.header
.parents
.iter()
.clone()
.map(|(_, digest)| digest.clone())
.collect();
Ok(Some(digests))
};
}
Ok(None)
}
pub async fn send_to_consensus(&mut self, msg: ConsensusMessage) -> Result<(), DagError> {
self.send_to_consensus_channel
.send(msg)
.await
.map_err(|e| DagError::ChannelError {
error: format!("{}", e),
})?;
Ok(())
}
}
| handle_header_digest | identifier_name |
synchronizer.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use super::committee::*;
use super::error::*;
use super::messages::*;
use super::types::*;
use bincode::Error;
use crypto::Digest;
use futures::future::{Fuse, FutureExt};
use futures::select;
use futures::stream::futures_unordered::FuturesUnordered;
use futures::stream::StreamExt;
use futures::{future::FusedFuture, pin_mut};
use log::*;
use rand::seq::SliceRandom;
use std::cmp::max;
use std::cmp::min;
use std::collections::HashSet;
use store::Store;
use tokio::sync::mpsc;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::time::{sleep, Duration};
type DBValue = Vec<u8>;
/// A service that keeps watching for the dependencies of blocks.
pub async fn header_waiter_process(
mut store: Store, // A copy of the inner store
genesis: Vec<Certificate>, // The genesis set of certs
mut rx: mpsc::UnboundedReceiver<(SignedBlockHeader, BlockHeader)>, // A channel to receive Headers
loopback_commands: Sender<PrimaryMessage>,
) {
// Register the genesis certs ...
for cert in &genesis {
// let bytes: Vec<u8> = bincode::serialize(&cert)?;
let digest_in_store = PartialCertificate::make_digest(&cert.digest, 0, cert.primary_id);
let _ = store
.write(digest_in_store.0.to_vec(), digest_in_store.0.to_vec()) //digest_in_store.0.to_vec() --> bytes ?
.await;
}
// Create an unordered set of futures
let mut waiting_headers = FuturesUnordered::new();
// Now process headers and also headers with satisfied dependencies
loop {
select! {
signer_header_result = waiting_headers.select_next_some() => {
// Once we have all header dependencies, we try to re-inject this signed header
if let Ok(signed_header) = signer_header_result {
if let Err(e) = loopback_commands.send(PrimaryMessage::Header(signed_header)).await {
error!("Error sending loopback command: {}", e);
}
}
else {
error!("Error in waiter.");
}
}
msg = rx.recv().fuse() => {
if let Some((signed_header, header)) = msg {
let i2_store = store.clone();
let fut = header_waiter(i2_store, signed_header, header);
waiting_headers.push(fut);
}
else {
// Channel is closed, so we exit.
// Our primary is gone!
warn!("Exiting digest waiter process.");
break;
}
}
}
}
}
/// The future that waits for a set of headers and digest associated with a header.
pub async fn header_waiter(
mut store: Store, // A copy of the store
signed_header: SignedBlockHeader, // The signed header structure
header: BlockHeader, // The header for which we wait for dependencies.
) -> Result<SignedBlockHeader, DagError> |
pub async fn dag_synchronizer_process(
mut get_from_dag: Receiver<SyncMessage>,
dag_synchronizer: DagSynchronizer,
) {
let mut digests_to_sync: Vec<Digest> = Vec::new();
let mut round_to_sync: RoundNumber = 0;
let mut last_synced_round: RoundNumber = 0;
let mut rollback_stop_round: RoundNumber = 1;
let mut sq: SyncNumber = 0;
let rollback_fut = Fuse::terminated();
pin_mut!(rollback_fut);
loop {
select! {
msg = get_from_dag.recv().fuse() => {
if let Some(SyncMessage::SyncUpToRound(round, digests, last_gc_round)) = msg {
if round > round_to_sync {
debug!("DAG sync: received request to sync digests: {:?} up to round {}", digests, round);
round_to_sync = round;
digests_to_sync = digests;
rollback_stop_round = max(last_gc_round+1, 1);
if rollback_fut.is_terminated(){
last_synced_round = round_to_sync;
rollback_fut.set(rollback_headers(dag_synchronizer.clone(), digests_to_sync.clone(), round_to_sync, rollback_stop_round, sq).fuse());
debug!("DAG sync: go.");
}
else {
debug!("DAG sync: drop.");
}
}
} else{
warn!("Exiting DagSynchronizer::start().");
break;
}
}
res = rollback_fut => {
if let Err(e) = res{
error!("rollback_headers returns error: {:?}", e);
}
else{
sq += 1;
if round_to_sync > last_synced_round{
last_synced_round = round_to_sync;
// rollback_stop_round = max(last_synced_round, 1);
rollback_fut.set(rollback_headers(dag_synchronizer.clone(), digests_to_sync.clone(), round_to_sync, rollback_stop_round, sq).fuse());
}
}
}
}
}
}
pub async fn handle_header_digest(
mut dag_synchronizer: DagSynchronizer,
digest: Digest,
rollback_stop_round: RoundNumber,
sq: SyncNumber,
) -> Result<Option<Vec<Digest>>, DagError> {
//TODO: issue: should we try the processor first? we need concurrent access to it...
if let Ok(dbvalue) = dag_synchronizer.store.read(digest.to_vec()).await {
match dbvalue {
None => {
debug!("invoking send_sync_header_requests: {:?}", digest);
dag_synchronizer
.send_sync_header_requests(digest.clone(), sq)
.await?;
// Exponential backoff delay
let mut delay = 50;
loop {
select! {
ret = dag_synchronizer.store.notify_read(digest.to_vec()).fuse() =>{
if let Ok(record_header) = ret {
return Ok(dag_synchronizer.handle_record_header(record_header, rollback_stop_round).await?);
} else {
//handle the error.
error!("Read returned an error: {:?}", ret);
}
}
_ = sleep(Duration::from_millis(delay)).fuse() => {
debug!("Trigger Sync on {:?}", digest);
dag_synchronizer.send_sync_header_requests(digest.clone(), sq).await?;
delay *= 4;
}
}
}
}
// HERE
Some(record_header) => {
let result: Result<HeaderPrimaryRecord, Error> =
bincode::deserialize(&record_header);
if let Err(e) = result {
panic!("Reading digest {:?} from store gives us a struct that we cannot deserialize: {}", digest, e);
}
return Ok(dag_synchronizer
.handle_record_header(record_header, rollback_stop_round)
.await?);
}
}
} else {
//handle the error.
}
Ok(None)
}
//sync all digests' causal history and pass to consensus
pub async fn rollback_headers(
mut dag_synchronizer: DagSynchronizer,
digests: Vec<Digest>,
round: RoundNumber,
rollback_stop_round: RoundNumber,
sq: SyncNumber,
) -> Result<(), DagError> {
let mut asked_for: HashSet<Digest> = HashSet::new();
let mut digests_in_process = FuturesUnordered::new();
for digest in digests {
let fut = handle_header_digest(dag_synchronizer.clone(), digest, rollback_stop_round, sq);
digests_in_process.push(fut);
}
while !digests_in_process.is_empty() {
// let option = digests_in_process.select_next_some().await?;
let option = match digests_in_process.select_next_some().await {
Ok(option) => option,
Err(e) => panic!("Panix {}", e),
};
if let Some(parents_digests) = option {
for digest in parents_digests {
// Only ask for each digest once per rollback.
if asked_for.contains(&digest) {
continue;
}
asked_for.insert(digest.clone());
if !dag_synchronizer.pending_digests.contains(&digest) {
debug!("Seems so {}", digest);
dag_synchronizer.pending_digests.insert(digest.clone());
let fut = handle_header_digest(
dag_synchronizer.clone(),
digest,
rollback_stop_round,
sq,
);
digests_in_process.push(fut);
}
}
}
}
let msg = ConsensusMessage::SyncDone(round);
dag_synchronizer.send_to_consensus(msg).await?;
info!("DAG sync: sent to consensus SyncDone for round {}", round);
Ok(())
}
#[derive(Clone)]
pub struct DagSynchronizer {
pub id: NodeID,
pub send_to_consensus_channel: Sender<ConsensusMessage>,
pub send_to_network: Sender<(RoundNumber, PrimaryMessage)>,
pub store: Store,
pub committee: Committee,
pub pending_digests: HashSet<Digest>,
}
impl DagSynchronizer {
pub fn new(
id: NodeID,
committee: Committee,
store: Store,
send_to_consensus_channel: Sender<ConsensusMessage>,
send_to_network: Sender<(RoundNumber, PrimaryMessage)>,
) -> Self {
DagSynchronizer {
id,
send_to_consensus_channel,
send_to_network,
store,
committee,
pending_digests: HashSet::new(),
}
}
pub async fn send_sync_header_requests(
&mut self,
digest: Digest,
sq: SyncNumber,
) -> Result<(), DagError> {
//Pick random 3 validators. Hopefully one will have the header. We can implement different strategies.
let authorities = self.committee.authorities.choose_multiple(
&mut rand::thread_rng(),
min(self.committee.quorum_threshold() / 2, 3),
); // self.committee.quorum_threshold());
debug!("asking for header with digest: {:?}", digest);
for source in authorities {
if source.primary.name == self.id {
continue;
}
let msg =
PrimaryMessage::SyncHeaderRequest(digest.clone(), self.id, source.primary.name);
// info!("Sync Request (Conse): {:?} from {:?}", digest.clone(), source.primary.name);
if let Err(e) = self.send_to_network.send((sq, msg)).await {
panic!("error: {}", e);
}
}
Ok(())
}
pub async fn handle_record_header(
&mut self,
record_header: DBValue,
rollback_stop_round: RoundNumber,
) -> Result<Option<Vec<Digest>>, DagError> {
// let mut record_header: HeaderPrimaryRecord = bincode::deserialize(&record_header[..])?;
let mut record_header: HeaderPrimaryRecord = bincode::deserialize(&record_header)
.expect("Deserialization of primary record failure");
if self.pending_digests.contains(&record_header.digest) {
self.pending_digests.remove(&record_header.digest);
}
if !record_header.passed_to_consensus {
let msg = ConsensusMessage::Header(
record_header.header.clone(),
record_header.digest.clone(),
);
self.send_to_consensus(msg)
.await
.expect("Fail to send to consensus channel");
record_header.passed_to_consensus = true;
self.store
.write(record_header.digest.0.to_vec(), record_header.to_bytes())
.await;
return if record_header.header.round <= rollback_stop_round
|| record_header.header.round <= 1
{
//no need need to sync parents because we gc them and consensus does not going to order them.
Ok(None)
} else {
let digests: Vec<Digest> = record_header
.header
.parents
.iter()
.clone()
.map(|(_, digest)| digest.clone())
.collect();
Ok(Some(digests))
};
}
Ok(None)
}
pub async fn send_to_consensus(&mut self, msg: ConsensusMessage) -> Result<(), DagError> {
self.send_to_consensus_channel
.send(msg)
.await
.map_err(|e| DagError::ChannelError {
error: format!("{}", e),
})?;
Ok(())
}
}
| {
debug!(
"[DEP] ASK H {:?} D {}",
(header.round, header.author),
header.transactions_digest.len()
);
//Note: we now store different digests for header and certificates to avoid false positive.
for (other_primary_id, digest) in &header.parents {
let digest_in_store =
PartialCertificate::make_digest(&digest, header.round - 1, *other_primary_id);
if store.notify_read(digest_in_store.0.to_vec()).await.is_err() {
return Err(DagError::StorageFailure {
error: "Error in reading store from 'header_waiter'.".to_string(),
});
}
}
for (digest, _) in &header.transactions_digest {
if store.notify_read(digest.0.to_vec()).await.is_err() {
return Err(DagError::StorageFailure {
error: "Error in reading store from 'header_waiter'.".to_string(),
});
}
}
debug!(
"[DEP] GOT H {:?} D {}",
(header.round, header.author),
header.transactions_digest.len()
);
Ok(signed_header)
} | identifier_body |
timeseriesrdd.py | from py4j.java_gateway import java_import
from pyspark import RDD
from pyspark.serializers import FramedSerializer, SpecialLengths, write_int, read_int
from pyspark.sql import DataFrame
from utils import datetime_to_millis
from datetimeindex import DateTimeIndex, irregular
import struct
import numpy as np
import pandas as pd
from io import BytesIO
class TimeSeriesRDD(RDD):
"""
A lazy distributed collection of univariate series with a conformed time dimension. Lazy in the
sense that it is an RDD: it encapsulates all the information needed to generate its elements,
but doesn't materialize them upon instantiation. Distributed in the sense that different
univariate series within the collection can be stored and processed on different nodes. Within
each univariate series, observations are not distributed. The time dimension is conformed in the
sense that a single DateTimeIndex applies to all the univariate series. Each univariate series
within the RDD has a String key to identify it.
"""
def __init__(self, dt_index, rdd, jtsrdd = None, sc = None):
if jtsrdd == None:
# Construct from a Python RDD object and a Python DateTimeIndex
jvm = rdd.ctx._jvm
jrdd = rdd._reserialize(_TimeSeriesSerializer())._jrdd.map( \
jvm.com.cloudera.sparkts.BytesToKeyAndSeries())
self._jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD( \
dt_index._jdt_index, jrdd.rdd())
RDD.__init__(self, rdd._jrdd, rdd.ctx)
else:
# Construct from a py4j.JavaObject pointing to a TimeSeriesRDD and a Python SparkContext
jvm = sc._jvm
jrdd = jvm.org.apache.spark.api.java.JavaRDD(jtsrdd, None).map( \
jvm.com.cloudera.sparkts.KeyAndSeriesToBytes())
RDD.__init__(self, jrdd, sc, _TimeSeriesSerializer())
self._jtsrdd = jtsrdd
def __getitem__(self, val):
"""
Returns a TimeSeriesRDD representing a subslice of this TimeSeriesRDD, containing only
values for a sub-range of the time it covers.
"""
start = datetime_to_millis(val.start)
stop = datetime_to_millis(val.stop)
return TimeSeriesRDD(None, None, self._jtsrdd.slice(start, stop), self.ctx)
def differences(self, n):
"""
Returns a TimeSeriesRDD where each time series is differenced with the given order.
The new RDD will be missing the first n date-times.
Parameters
----------
n : int
The order of differencing to perform.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.differences(n), self.ctx)
def fill(self, method):
"""
Returns a TimeSeriesRDD with missing values imputed using the given method.
Parameters
----------
method : string
"nearest" fills in NaNs with the closest non-NaN value, using the closest previous value
in the case of a tie. "linear" does a linear interpolation from the closest filled-in
values. "next" uses the closest value that is in the future of the missing value.
"previous" uses the closest value from the past of the missing value. "spline"
interpolates using a cubic spline.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.fill(method), self.ctx)
def map_series(self, fn, dt_index = None):
"""
Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn))
def to_instants(self):
"""
Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).toJavaRDD().map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer())
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx)
def index(self):
"""Returns the index describing the times referred to by the elements of this TimeSeriesRDD
"""
jindex = self._jtsrdd.index()
return DateTimeIndex(jindex)
def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""
Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx)
def to_pandas_series_rdd(self):
"""
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
"""
pd_index = self.index().to_pandas_index()
return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))
def to_pandas_dataframe(self):
"""
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame.
Each record in the RDD becomes and column, and the DataFrame is indexed with a
DatetimeIndex generated from this RDD's index.
"""
pd_index = self.index().to_pandas_index()
return pd.DataFrame.from_items(self.collect()).set_index(pd_index)
def remove_instants_with_nans(self):
"""
Returns a TimeSeriesRDD with instants containing NaNs cut out.
The resulting TimeSeriesRDD has a slimmed down DateTimeIndex, missing all the instants
for which any series in the RDD contained a NaN.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.removeInstantsWithNaNs(), self.ctx)
def filter(self, predicate):
return TimeSeriesRDD(self.index(), super(TimeSeriesRDD, self).filter(predicate))
def find_series(self, key):
"""
Finds a series in the TimeSeriesRDD by its key.
Parameters
----------
key : string
The key of the series to find.
"""
# TODO: this could be more efficient if we pushed it down into Java
return self.filter(lambda x: x[0] == key).first()[1]
def return_rates(self):
"""
Returns a TimeSeriesRDD where each series is a return rate series for a series in this RDD.
Assumes periodic (as opposed to continuously compounded) returns.
""" | Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
"""
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx)
def time_series_rdd_from_pandas_series_rdd(series_rdd, sc):
"""
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext
"""
first = series_rdd.first()
dt_index = irregular(first[1].index, sc)
return TimeSeriesRDD(dt_index, series_rdd.mapValues(lambda x: x.values))
def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""
Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc)
class _TimeSeriesSerializer(FramedSerializer):
"""Serializes (key, vector) pairs to and from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.{BytesToKeyAndSeries, KeyAndSeriesToBytes}
"""
def dumps(self, obj):
stream = BytesIO()
(key, vector) = obj
key_bytes = key.encode('utf-8')
write_int(len(key_bytes), stream)
stream.write(key_bytes)
write_int(len(vector), stream)
# TODO: maybe some optimized way to write this all at once?
for value in vector:
stream.write(struct.pack('!d', value))
stream.seek(0)
return stream.read()
def loads(self, obj):
stream = BytesIO(obj)
key_length = read_int(stream)
key = stream.read(key_length).decode('utf-8')
return (key, _read_vec(stream))
def __repr__(self):
return '_TimeSeriesSerializer'
class _InstantDeserializer(FramedSerializer):
"""
Serializes (timestamp, vector) pairs to an from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.InstantToBytes
"""
def loads(self, obj):
stream = BytesIO(obj)
timestamp_ms = struct.unpack('!q', stream.read(8))[0]
return (pd.Timestamp(timestamp_ms * 1000000), _read_vec(stream))
def __repr__(self):
return "_InstantDeserializer"
def _read_vec(stream):
vector_length = read_int(stream)
vector = np.empty(vector_length)
# TODO: maybe some optimized way to read this all at once?
for i in xrange(vector_length):
vector[i] = struct.unpack('!d', stream.read(8))[0]
return vector | return TimeSeriesRDD(None, None, self._jtsrdd.returnRates(), self.ctx)
def with_index(self, new_index):
""" | random_line_split |
timeseriesrdd.py | from py4j.java_gateway import java_import
from pyspark import RDD
from pyspark.serializers import FramedSerializer, SpecialLengths, write_int, read_int
from pyspark.sql import DataFrame
from utils import datetime_to_millis
from datetimeindex import DateTimeIndex, irregular
import struct
import numpy as np
import pandas as pd
from io import BytesIO
class TimeSeriesRDD(RDD):
"""
A lazy distributed collection of univariate series with a conformed time dimension. Lazy in the
sense that it is an RDD: it encapsulates all the information needed to generate its elements,
but doesn't materialize them upon instantiation. Distributed in the sense that different
univariate series within the collection can be stored and processed on different nodes. Within
each univariate series, observations are not distributed. The time dimension is conformed in the
sense that a single DateTimeIndex applies to all the univariate series. Each univariate series
within the RDD has a String key to identify it.
"""
def __init__(self, dt_index, rdd, jtsrdd = None, sc = None):
if jtsrdd == None:
# Construct from a Python RDD object and a Python DateTimeIndex
jvm = rdd.ctx._jvm
jrdd = rdd._reserialize(_TimeSeriesSerializer())._jrdd.map( \
jvm.com.cloudera.sparkts.BytesToKeyAndSeries())
self._jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD( \
dt_index._jdt_index, jrdd.rdd())
RDD.__init__(self, rdd._jrdd, rdd.ctx)
else:
# Construct from a py4j.JavaObject pointing to a TimeSeriesRDD and a Python SparkContext
jvm = sc._jvm
jrdd = jvm.org.apache.spark.api.java.JavaRDD(jtsrdd, None).map( \
jvm.com.cloudera.sparkts.KeyAndSeriesToBytes())
RDD.__init__(self, jrdd, sc, _TimeSeriesSerializer())
self._jtsrdd = jtsrdd
def __getitem__(self, val):
"""
Returns a TimeSeriesRDD representing a subslice of this TimeSeriesRDD, containing only
values for a sub-range of the time it covers.
"""
start = datetime_to_millis(val.start)
stop = datetime_to_millis(val.stop)
return TimeSeriesRDD(None, None, self._jtsrdd.slice(start, stop), self.ctx)
def differences(self, n):
"""
Returns a TimeSeriesRDD where each time series is differenced with the given order.
The new RDD will be missing the first n date-times.
Parameters
----------
n : int
The order of differencing to perform.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.differences(n), self.ctx)
def | (self, method):
"""
Returns a TimeSeriesRDD with missing values imputed using the given method.
Parameters
----------
method : string
"nearest" fills in NaNs with the closest non-NaN value, using the closest previous value
in the case of a tie. "linear" does a linear interpolation from the closest filled-in
values. "next" uses the closest value that is in the future of the missing value.
"previous" uses the closest value from the past of the missing value. "spline"
interpolates using a cubic spline.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.fill(method), self.ctx)
def map_series(self, fn, dt_index = None):
"""
Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn))
def to_instants(self):
"""
Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).toJavaRDD().map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer())
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx)
def index(self):
"""Returns the index describing the times referred to by the elements of this TimeSeriesRDD
"""
jindex = self._jtsrdd.index()
return DateTimeIndex(jindex)
def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""
Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx)
def to_pandas_series_rdd(self):
"""
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
"""
pd_index = self.index().to_pandas_index()
return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))
def to_pandas_dataframe(self):
"""
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame.
Each record in the RDD becomes and column, and the DataFrame is indexed with a
DatetimeIndex generated from this RDD's index.
"""
pd_index = self.index().to_pandas_index()
return pd.DataFrame.from_items(self.collect()).set_index(pd_index)
def remove_instants_with_nans(self):
"""
Returns a TimeSeriesRDD with instants containing NaNs cut out.
The resulting TimeSeriesRDD has a slimmed down DateTimeIndex, missing all the instants
for which any series in the RDD contained a NaN.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.removeInstantsWithNaNs(), self.ctx)
def filter(self, predicate):
return TimeSeriesRDD(self.index(), super(TimeSeriesRDD, self).filter(predicate))
def find_series(self, key):
"""
Finds a series in the TimeSeriesRDD by its key.
Parameters
----------
key : string
The key of the series to find.
"""
# TODO: this could be more efficient if we pushed it down into Java
return self.filter(lambda x: x[0] == key).first()[1]
def return_rates(self):
"""
Returns a TimeSeriesRDD where each series is a return rate series for a series in this RDD.
Assumes periodic (as opposed to continuously compounded) returns.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.returnRates(), self.ctx)
def with_index(self, new_index):
"""
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
"""
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx)
def time_series_rdd_from_pandas_series_rdd(series_rdd, sc):
"""
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext
"""
first = series_rdd.first()
dt_index = irregular(first[1].index, sc)
return TimeSeriesRDD(dt_index, series_rdd.mapValues(lambda x: x.values))
def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""
Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc)
class _TimeSeriesSerializer(FramedSerializer):
"""Serializes (key, vector) pairs to and from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.{BytesToKeyAndSeries, KeyAndSeriesToBytes}
"""
def dumps(self, obj):
stream = BytesIO()
(key, vector) = obj
key_bytes = key.encode('utf-8')
write_int(len(key_bytes), stream)
stream.write(key_bytes)
write_int(len(vector), stream)
# TODO: maybe some optimized way to write this all at once?
for value in vector:
stream.write(struct.pack('!d', value))
stream.seek(0)
return stream.read()
def loads(self, obj):
stream = BytesIO(obj)
key_length = read_int(stream)
key = stream.read(key_length).decode('utf-8')
return (key, _read_vec(stream))
def __repr__(self):
return '_TimeSeriesSerializer'
class _InstantDeserializer(FramedSerializer):
"""
Serializes (timestamp, vector) pairs to an from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.InstantToBytes
"""
def loads(self, obj):
stream = BytesIO(obj)
timestamp_ms = struct.unpack('!q', stream.read(8))[0]
return (pd.Timestamp(timestamp_ms * 1000000), _read_vec(stream))
def __repr__(self):
return "_InstantDeserializer"
def _read_vec(stream):
vector_length = read_int(stream)
vector = np.empty(vector_length)
# TODO: maybe some optimized way to read this all at once?
for i in xrange(vector_length):
vector[i] = struct.unpack('!d', stream.read(8))[0]
return vector
| fill | identifier_name |
timeseriesrdd.py | from py4j.java_gateway import java_import
from pyspark import RDD
from pyspark.serializers import FramedSerializer, SpecialLengths, write_int, read_int
from pyspark.sql import DataFrame
from utils import datetime_to_millis
from datetimeindex import DateTimeIndex, irregular
import struct
import numpy as np
import pandas as pd
from io import BytesIO
class TimeSeriesRDD(RDD):
"""
A lazy distributed collection of univariate series with a conformed time dimension. Lazy in the
sense that it is an RDD: it encapsulates all the information needed to generate its elements,
but doesn't materialize them upon instantiation. Distributed in the sense that different
univariate series within the collection can be stored and processed on different nodes. Within
each univariate series, observations are not distributed. The time dimension is conformed in the
sense that a single DateTimeIndex applies to all the univariate series. Each univariate series
within the RDD has a String key to identify it.
"""
def __init__(self, dt_index, rdd, jtsrdd = None, sc = None):
if jtsrdd == None:
# Construct from a Python RDD object and a Python DateTimeIndex
jvm = rdd.ctx._jvm
jrdd = rdd._reserialize(_TimeSeriesSerializer())._jrdd.map( \
jvm.com.cloudera.sparkts.BytesToKeyAndSeries())
self._jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD( \
dt_index._jdt_index, jrdd.rdd())
RDD.__init__(self, rdd._jrdd, rdd.ctx)
else:
# Construct from a py4j.JavaObject pointing to a TimeSeriesRDD and a Python SparkContext
jvm = sc._jvm
jrdd = jvm.org.apache.spark.api.java.JavaRDD(jtsrdd, None).map( \
jvm.com.cloudera.sparkts.KeyAndSeriesToBytes())
RDD.__init__(self, jrdd, sc, _TimeSeriesSerializer())
self._jtsrdd = jtsrdd
def __getitem__(self, val):
"""
Returns a TimeSeriesRDD representing a subslice of this TimeSeriesRDD, containing only
values for a sub-range of the time it covers.
"""
start = datetime_to_millis(val.start)
stop = datetime_to_millis(val.stop)
return TimeSeriesRDD(None, None, self._jtsrdd.slice(start, stop), self.ctx)
def differences(self, n):
"""
Returns a TimeSeriesRDD where each time series is differenced with the given order.
The new RDD will be missing the first n date-times.
Parameters
----------
n : int
The order of differencing to perform.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.differences(n), self.ctx)
def fill(self, method):
"""
Returns a TimeSeriesRDD with missing values imputed using the given method.
Parameters
----------
method : string
"nearest" fills in NaNs with the closest non-NaN value, using the closest previous value
in the case of a tie. "linear" does a linear interpolation from the closest filled-in
values. "next" uses the closest value that is in the future of the missing value.
"previous" uses the closest value from the past of the missing value. "spline"
interpolates using a cubic spline.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.fill(method), self.ctx)
def map_series(self, fn, dt_index = None):
"""
Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn))
def to_instants(self):
"""
Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).toJavaRDD().map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer())
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx)
def index(self):
"""Returns the index describing the times referred to by the elements of this TimeSeriesRDD
"""
jindex = self._jtsrdd.index()
return DateTimeIndex(jindex)
def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""
Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx)
def to_pandas_series_rdd(self):
"""
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
"""
pd_index = self.index().to_pandas_index()
return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))
def to_pandas_dataframe(self):
"""
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame.
Each record in the RDD becomes and column, and the DataFrame is indexed with a
DatetimeIndex generated from this RDD's index.
"""
pd_index = self.index().to_pandas_index()
return pd.DataFrame.from_items(self.collect()).set_index(pd_index)
def remove_instants_with_nans(self):
"""
Returns a TimeSeriesRDD with instants containing NaNs cut out.
The resulting TimeSeriesRDD has a slimmed down DateTimeIndex, missing all the instants
for which any series in the RDD contained a NaN.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.removeInstantsWithNaNs(), self.ctx)
def filter(self, predicate):
return TimeSeriesRDD(self.index(), super(TimeSeriesRDD, self).filter(predicate))
def find_series(self, key):
"""
Finds a series in the TimeSeriesRDD by its key.
Parameters
----------
key : string
The key of the series to find.
"""
# TODO: this could be more efficient if we pushed it down into Java
return self.filter(lambda x: x[0] == key).first()[1]
def return_rates(self):
"""
Returns a TimeSeriesRDD where each series is a return rate series for a series in this RDD.
Assumes periodic (as opposed to continuously compounded) returns.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.returnRates(), self.ctx)
def with_index(self, new_index):
"""
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
"""
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx)
def time_series_rdd_from_pandas_series_rdd(series_rdd, sc):
"""
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext
"""
first = series_rdd.first()
dt_index = irregular(first[1].index, sc)
return TimeSeriesRDD(dt_index, series_rdd.mapValues(lambda x: x.values))
def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""
Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc)
class _TimeSeriesSerializer(FramedSerializer):
"""Serializes (key, vector) pairs to and from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.{BytesToKeyAndSeries, KeyAndSeriesToBytes}
"""
def dumps(self, obj):
stream = BytesIO()
(key, vector) = obj
key_bytes = key.encode('utf-8')
write_int(len(key_bytes), stream)
stream.write(key_bytes)
write_int(len(vector), stream)
# TODO: maybe some optimized way to write this all at once?
for value in vector:
|
stream.seek(0)
return stream.read()
def loads(self, obj):
stream = BytesIO(obj)
key_length = read_int(stream)
key = stream.read(key_length).decode('utf-8')
return (key, _read_vec(stream))
def __repr__(self):
return '_TimeSeriesSerializer'
class _InstantDeserializer(FramedSerializer):
"""
Serializes (timestamp, vector) pairs to an from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.InstantToBytes
"""
def loads(self, obj):
stream = BytesIO(obj)
timestamp_ms = struct.unpack('!q', stream.read(8))[0]
return (pd.Timestamp(timestamp_ms * 1000000), _read_vec(stream))
def __repr__(self):
return "_InstantDeserializer"
def _read_vec(stream):
vector_length = read_int(stream)
vector = np.empty(vector_length)
# TODO: maybe some optimized way to read this all at once?
for i in xrange(vector_length):
vector[i] = struct.unpack('!d', stream.read(8))[0]
return vector
| stream.write(struct.pack('!d', value)) | conditional_block |
timeseriesrdd.py | from py4j.java_gateway import java_import
from pyspark import RDD
from pyspark.serializers import FramedSerializer, SpecialLengths, write_int, read_int
from pyspark.sql import DataFrame
from utils import datetime_to_millis
from datetimeindex import DateTimeIndex, irregular
import struct
import numpy as np
import pandas as pd
from io import BytesIO
class TimeSeriesRDD(RDD):
"""
A lazy distributed collection of univariate series with a conformed time dimension. Lazy in the
sense that it is an RDD: it encapsulates all the information needed to generate its elements,
but doesn't materialize them upon instantiation. Distributed in the sense that different
univariate series within the collection can be stored and processed on different nodes. Within
each univariate series, observations are not distributed. The time dimension is conformed in the
sense that a single DateTimeIndex applies to all the univariate series. Each univariate series
within the RDD has a String key to identify it.
"""
def __init__(self, dt_index, rdd, jtsrdd = None, sc = None):
if jtsrdd == None:
# Construct from a Python RDD object and a Python DateTimeIndex
jvm = rdd.ctx._jvm
jrdd = rdd._reserialize(_TimeSeriesSerializer())._jrdd.map( \
jvm.com.cloudera.sparkts.BytesToKeyAndSeries())
self._jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD( \
dt_index._jdt_index, jrdd.rdd())
RDD.__init__(self, rdd._jrdd, rdd.ctx)
else:
# Construct from a py4j.JavaObject pointing to a TimeSeriesRDD and a Python SparkContext
jvm = sc._jvm
jrdd = jvm.org.apache.spark.api.java.JavaRDD(jtsrdd, None).map( \
jvm.com.cloudera.sparkts.KeyAndSeriesToBytes())
RDD.__init__(self, jrdd, sc, _TimeSeriesSerializer())
self._jtsrdd = jtsrdd
def __getitem__(self, val):
"""
Returns a TimeSeriesRDD representing a subslice of this TimeSeriesRDD, containing only
values for a sub-range of the time it covers.
"""
start = datetime_to_millis(val.start)
stop = datetime_to_millis(val.stop)
return TimeSeriesRDD(None, None, self._jtsrdd.slice(start, stop), self.ctx)
def differences(self, n):
|
def fill(self, method):
"""
Returns a TimeSeriesRDD with missing values imputed using the given method.
Parameters
----------
method : string
"nearest" fills in NaNs with the closest non-NaN value, using the closest previous value
in the case of a tie. "linear" does a linear interpolation from the closest filled-in
values. "next" uses the closest value that is in the future of the missing value.
"previous" uses the closest value from the past of the missing value. "spline"
interpolates using a cubic spline.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.fill(method), self.ctx)
def map_series(self, fn, dt_index = None):
"""
Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn))
def to_instants(self):
"""
Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).toJavaRDD().map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer())
def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx)
def index(self):
"""Returns the index describing the times referred to by the elements of this TimeSeriesRDD
"""
jindex = self._jtsrdd.index()
return DateTimeIndex(jindex)
def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""
Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx)
def to_pandas_series_rdd(self):
"""
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
"""
pd_index = self.index().to_pandas_index()
return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))
def to_pandas_dataframe(self):
"""
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame.
Each record in the RDD becomes and column, and the DataFrame is indexed with a
DatetimeIndex generated from this RDD's index.
"""
pd_index = self.index().to_pandas_index()
return pd.DataFrame.from_items(self.collect()).set_index(pd_index)
def remove_instants_with_nans(self):
"""
Returns a TimeSeriesRDD with instants containing NaNs cut out.
The resulting TimeSeriesRDD has a slimmed down DateTimeIndex, missing all the instants
for which any series in the RDD contained a NaN.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.removeInstantsWithNaNs(), self.ctx)
def filter(self, predicate):
return TimeSeriesRDD(self.index(), super(TimeSeriesRDD, self).filter(predicate))
def find_series(self, key):
"""
Finds a series in the TimeSeriesRDD by its key.
Parameters
----------
key : string
The key of the series to find.
"""
# TODO: this could be more efficient if we pushed it down into Java
return self.filter(lambda x: x[0] == key).first()[1]
def return_rates(self):
"""
Returns a TimeSeriesRDD where each series is a return rate series for a series in this RDD.
Assumes periodic (as opposed to continuously compounded) returns.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.returnRates(), self.ctx)
def with_index(self, new_index):
"""
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
"""
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx)
def time_series_rdd_from_pandas_series_rdd(series_rdd, sc):
"""
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext
"""
first = series_rdd.first()
dt_index = irregular(first[1].index, sc)
return TimeSeriesRDD(dt_index, series_rdd.mapValues(lambda x: x.values))
def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""
Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.TimeSeriesRDD.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc)
class _TimeSeriesSerializer(FramedSerializer):
"""Serializes (key, vector) pairs to and from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.{BytesToKeyAndSeries, KeyAndSeriesToBytes}
"""
def dumps(self, obj):
stream = BytesIO()
(key, vector) = obj
key_bytes = key.encode('utf-8')
write_int(len(key_bytes), stream)
stream.write(key_bytes)
write_int(len(vector), stream)
# TODO: maybe some optimized way to write this all at once?
for value in vector:
stream.write(struct.pack('!d', value))
stream.seek(0)
return stream.read()
def loads(self, obj):
stream = BytesIO(obj)
key_length = read_int(stream)
key = stream.read(key_length).decode('utf-8')
return (key, _read_vec(stream))
def __repr__(self):
return '_TimeSeriesSerializer'
class _InstantDeserializer(FramedSerializer):
"""
Serializes (timestamp, vector) pairs to an from bytes. Must be compatible with the Scala
implementation in com.cloudera.sparkts.InstantToBytes
"""
def loads(self, obj):
stream = BytesIO(obj)
timestamp_ms = struct.unpack('!q', stream.read(8))[0]
return (pd.Timestamp(timestamp_ms * 1000000), _read_vec(stream))
def __repr__(self):
return "_InstantDeserializer"
def _read_vec(stream):
vector_length = read_int(stream)
vector = np.empty(vector_length)
# TODO: maybe some optimized way to read this all at once?
for i in xrange(vector_length):
vector[i] = struct.unpack('!d', stream.read(8))[0]
return vector
| """
Returns a TimeSeriesRDD where each time series is differenced with the given order.
The new RDD will be missing the first n date-times.
Parameters
----------
n : int
The order of differencing to perform.
"""
return TimeSeriesRDD(None, None, self._jtsrdd.differences(n), self.ctx) | identifier_body |
CLQ_search_new.py | import numpy as np
import astropy.io.fits as fits
import scipy as sp
import matplotlib.pyplot as plt
import astroML
from matplotlib.ticker import NullFormatter
import pandas as pd
import os
import urllib2
from scipy import stats
from pydl.pydl.pydlutils.spheregroup import *
from astroML.plotting import hist
from matplotlib.backends.backend_pdf import PdfPages
from scipy.interpolate import interp1d
from astropy.modeling.models import Voigt1D
from astropy import constants as const
from astropy import units as U
from astropy.coordinates import SkyCoord
from dustmaps.sfd import SFDQuery
from specutils import extinction
from matplotlib.ticker import MaxNLocator
import seaborn as sns
from statsmodels.stats.outliers_influence import summary_table
import statsmodels.formula.api as smf
import statsmodels.api as sm
from pydl.pydl.pydlutils import yanny
from astropy.table import Table
from astropy.time import Time
from scipy.stats import kde
from scipy.ndimage.filters import gaussian_filter1d,uniform_filter1d
"""
Program to explore the search of CLQs in eBOSS quasar catalog
@author : Vivek M.
@date : 12/April/2019
@version: 1.0
"""
spallversion='v5_13_0'
params = {
'axes.labelsize': 18,
'axes.linewidth': 1.5,
#'text.fontsize': 8,
'legend.fontsize': 15,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True,
#'figure.figsize': [16, 5]
'legend.frameon': False,
'font.family': 'Times New Roman'
}
plt.rcParams.update(params)
hfont = {'fontname':'Times New Roman'}
def computeSN1700(wave,flux,err):
ww=np.where((wave >=1650) & (wave <= 1750))[0]
return np.median(flux[ww])/np.median(err[ww])
def download_spectra(plate, mjd, fiber, dirname='.'):
''' Downloads SDSS spectra from DR14 and puts it in dirname
Change the SDSS URL to download from a different location
'''
FITS_FILENAME = 'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits'
try :
SDSS_URL = ('https://data.sdss.org/sas/dr14/eboss/spectro/redux/v5_10_0/spectra/%(plate)04i/'
'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits')
urllib2.urlopen(SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber))
print 'Downloadin from dr14'
except urllib2.HTTPError as err:
if err.code == 404 :
SDSS_URL = ('https://data.sdss.org/sas/dr8/sdss/spectro/redux/26/spectra/%(plate)04i/'
'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits')
print 'Downloadin from dr8'
print SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber)
download_url = 'wget '+SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber)
print download_url
os.system(download_url)
mv_cmd='mv '+FITS_FILENAME % dict(plate=plate,mjd=mjd,fiber=fiber) + ' '+dirname+'/.'
#print mv_cmd
os.system(mv_cmd)
def checkUniqueN():
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
print 'Number of entries matching DR12 and spAll_v5-13-0:',len(data['SDSS_NAME'])
print 'Number of quasars matching DR12 and spAll_v5-13-0:',len(np.unique(data['SDSS_NAME']))
def MultipleEpochQSOs():
redux= '/uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/v5_13_0/spectra/lite'
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
uname = np.unique(data['SDSS_NAME'])
count = 0
gcount = 0
out=open('Candidate_CLQ_search_DR16.txt','w')
name = [] ; ra=[] ; dec=[];zvi=[]
umag1 =[] ;gmag1=[];rmag1=[];imag1=[];zmag1=[]
umag2 =[] ;gmag2=[];rmag2=[];imag2=[];zmag2=[]
#umag1err =[] ;gmag1err=[];rmag1err=[];imag1err=[];zmag1err=[]
#umag2err =[] ;gmag2err=[];rmag2err=[];imag2err=[];zmag2err=[]
plate1 = [] ; mjd1=[];fiber1=[]
plate2 = [] ; mjd2=[];fiber2=[]
gdiff = [] ; rdiff = [] ; idiff = []
for i in range(len(uname)):
#for i in range(150):
xx=np.where(data['SDSS_NAME'] == uname[i])[0]
if len(xx)>1:
ndata = data[xx]
#print uname[i],len(xx),xx,data['PLATE_1'][xx[0]],data['MJD_1'][xx[0]],data['FIBERID_1'][xx[0]],data['PLATE_2'][xx[0]],data['MJD_2'][xx[0]],data['FIBERID_2'][xx[0]],data['PLATE_2'][xx[-1]],data['MJD_2'][xx[-1]],data['FIBERID_2'][xx[-1]],data['FIBERMAG'][xx[0],1],data['FIBER2MAG'][xx[-1],1],data['FIBERFLUX'][xx[0],2],data['FIBERFLUX'][xx[-1],2]
mjdl = ndata['MJD_2']
maxmjd = max(mjdl)
minmjd = min(mjdl)
xmax = np.where(mjdl == maxmjd)[0][0]
xmin = np.where(mjdl == minmjd)[0][0]
print mjdl,maxmjd,minmjd
print xmax,xmin,ndata['MJD_2'][xmax],ndata['PLATE_2'][xmax],ndata['FIBERID_2'][xmax],ndata['MJD_2'][xmin],ndata['PLATE_2'][xmin],ndata['FIBERID_2'][xmin]
#ksjhdf=raw_input()
#print 'Check', ndata['MJD_2'],ndata['SDSS_NAME'],ndata['PLATE_2'],ndata['FIBERID_2']
#plate1 = data['PLATE_2'][xx[0]] ; plate2 = data['PLATE_2'][xx[-1]]
#pmf1 = '{0:04d}-{1:05d}-{2:04d}'.format(data['PLATE_2'][xx[0]],data['MJD_2'][xx[0]],data['FIBERID_2'][xx[0]])
#pmf2 = '{0:04d}-{1:05d}-{2:04d}'.format(data['PLATE_2'][xx[-1]],data['MJD_2'][xx[-1]],data['FIBERID_2'][xx[-1]])
#data1 = fits.open(os.path.join(redux,plate1,'spec-'+pmf1+'.fits'))[1].data
gdiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1])))
rdiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,2])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,2])))
idiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,3])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,3])))
if np.abs((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1]))) > 1 :
print>>out,'{0}\t{1}\t{2:10.5f}\t{3:10.5f}\t{4:10.5f}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}'.format( uname[i],len(xx),ndata['RA_1'][0],ndata['DEC_1'][0],ndata['Z_VI'][0],ndata['PLATE_2'][xmin],ndata['MJD_2'][xmin],ndata['FIBERID_2'][xmin],ndata['PLATE_2'][xmax],ndata['MJD_2'][xmax],ndata['FIBERID_2'][xmax])
gcount +=1
name.append(ndata['SDSS_NAME'][0])
ra.append(ndata['RA_1'][0])
dec.append(ndata['DEC_1'][0])
zvi.append(ndata['Z_VI'][0])
plate1.append(ndata['PLATE_2'][xmin])
mjd1.append(ndata['MJD_2'][xmin])
fiber1.append(ndata['FIBERID_2'][xmin])
plate2.append(ndata['PLATE_2'][xmax])
mjd2.append(ndata['MJD_2'][xmax])
fiber2.append(ndata['FIBERID_2'][xmax])
umag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,0])))
gmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])))
rmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,2])))
imag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,3])))
zmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,4])))
umag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,0])))
gmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1])))
rmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,2])))
imag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,3])))
zmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,4])))
count +=1
print str(i+1)+'/'+str(len(uname))+' Running and Found candidates: '+str(gcount)
np.savez('CLQsearch_MasterList_Plate-MJD-Fiber.npz',
name = np.array(name) ,
ra = np.array(ra) ,
dec = np.array(dec) ,
zvi = np.array(zvi) ,
plate1 = np.array(plate1) ,
mjd1 = np.array(mjd1) ,
fiber1 = np.array(fiber1) ,
plate2 = np.array(plate2) ,
mjd2 = np.array(mjd2) ,
fiber2 = np.array(fiber2) ,
umag1=np.array(umag1) ,
gmag1=np.array(gmag1) ,
rmag1=np.array(rmag1) ,
imag1=np.array(imag1) ,
zmag1=np.array(zmag1) ,
umag2=np.array(umag2) ,
gmag2=np.array(gmag2) ,
rmag2=np.array(rmag2) ,
imag2=np.array(imag2) ,
zmag2=np.array(zmag2) ,
)
#print count
#print gdiff
out.close()
gdiff=np.array(gdiff)
rdiff=np.array(rdiff)
idiff=np.array(idiff)
yy=np.where(np.abs(gdiff) > 1)[0]
fig,(ax,ax1,ax2)=plt.subplots(1,3,figsize=(10,5))
ax.plot(gdiff,idiff,'.',color='black',label='gmag vs imag')
nbins=20
#k = kde.gaussian_kde((gdiff,idiff))
#xi, yi = np.mgrid[gdiff.min():gdiff.max():nbins*1j, idiff.min():y.max():nbins*1j]
#zi = k(np.vstack([xi.flatten(), yi.flatten()]))
#ax.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap=plt.cm.BuGn_r)
#ax.contour(xi, yi, zi.reshape(xi.shape) )
ax.plot(gdiff,idiff,'.',color='black',label='gmag vs imag')
ax1.plot(gdiff,rdiff,'.',color='red',label='gmag vs rmag')
ax2.plot(rdiff,idiff,'.',color='blue',label='rmag vs imag')
ax.set(xlabel='$\Delta$g-mag',ylabel='$\Delta$i-mag')
ax1.set(xlabel='$\Delta$g-mag',ylabel='$\Delta$r-mag')
ax2.set(xlabel='$\Delta$r-mag',ylabel='$\Delta$i-mag')
fig.tight_layout()
fig.savefig('Candidate_CLQsearch_DR16_color-magn.jpg')
plt.show()
def download_spectraCLQ(print_cmd=False):
clqC=np.genfromtxt('Candidate_CLQ_search_DR16c.txt',names=['name','nepoch','ra','dec','zvi','plate1','mjd1','fiber1','plate2','mjd2','fiber2'],dtype=('|S30',int,float,float,float,int,int,int,int,int,int))
if print_cmd :
cptxt = open('CLQcopyspectrumfromBOSS_SPECTRO_REDUX_v5_13.txt','w')
checkdownload=[]
specdir = 'CLQsearch_spectra'
for i in range(len(clqC['name'])):
#for i in range(10):
plates=[clqC['plate1'][i],clqC['plate2'][i]]
mjds=[clqC['mjd1'][i],clqC['mjd2'][i]]
fibers=[clqC['fiber1'][i],clqC['fiber2'][i]]
for j in range(2):
if plates[j] >= 10000:
FITS_FILENAME = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(plates[j],mjds[j],fibers[j])
else:
FITS_FILENAME = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(plates[j],mjds[j],fibers[j])
if not print_cmd:
if not ((os.path.isfile(FITS_FILENAME)) | (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
download_spectra(plates[j],mjds[j],fibers[j],specdir)
else :
print 'Spectrum already downloaded'
if print_cmd:
print>>cptxt, 'cp /uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/{0}/spectra/lite/{1}/{2} ~/BOSS_BALDATA_CLQ/.'.format(spallversion,plates[j],FITS_FILENAME)
else :
if not ((os.path.isfile(FITS_FILENAME)) & (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
checkdownload.append(FITS_FILENAME)
print '--'*31
# kfgh=raw_input()
print checkdownload
if print_cmd:
cptxt.close()
def smoothCRTS(mjd,mag,magerr):
mjd= np.array(mjd) ; mag = np.array(mag) ; magerr= np.array(magerr)
minmjd =min(mjd)
msort = np.argsort(mjd)
mjd = mjd[msort] ; mag = mag[msort] ; magerr = magerr[msort]
mjddiff = mjd[1:] - mjd[0:-1]
gp = np.where(mjddiff > 100)[0]
print type(gp)
ngp = np.insert(np.insert(gp,0,0),len(gp)+1,len(mjd)-1)
#fig,ax = plt.subplots(figsize=(10,5))
#ax.plot(mjd,mag,'ok',alpha=0.2,label=name)
#ax.set_xlabel('MJD')
#ax.set_ylabel('CRTS V-mag')
#ax.legend(loc=1)
medmjd = []
medmag = []
medmagerr = []
for ig,g in enumerate(ngp[0:-1]):
#print mjd[g]
if ig == 0:
xg = np.where((mjd >= mjd[ngp[ig]]-10) & (mjd <=mjd[ngp[ig+1]]+10))[0]
medmag.append(np.median(mag[xg]))
medmjd.append(np.mean(mjd[xg]))
medmagerr.append(np.std(mag[xg])/np.sqrt(len(xg)))
# ax.axvline(mjd[g]-10,ls='--',color='red')
else:
xg = np.where((mjd >= mjd[ngp[ig]]+10) & (mjd <=mjd[ngp[ig+1]]+10))[0]
medmag.append(np.median(mag[xg]))
medmjd.append(np.mean(mjd[xg]))
medmagerr.append(np.std(mag[xg])/np.sqrt(len(xg)))
return medmjd,medmag,medmagerr
def plot_spectra():
text_font = {'fontname':'Times New Roman', 'size':'14'}
pp = PdfPages('CLQsearches_plot_spectra_sn1700gt6.pdf')
clqC=np.genfromtxt('Candidate_CLQ_search_DR16c.txt',names=['name','nepoch','ra','dec','zvi','plate1','mjd1','fiber1','plate2','mjd2','fiber2'],dtype=('|S30',int,float,float,float,int,int,int,int,int,int))
master =np.load('CLQsearch_MasterList_Plate-MJD-Fiber.npz')
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
crts = pd.read_csv('CRTS_lc_CLQsearchSample_sn1700gt6.csv') | cptxt = open('copyCLQadditionalspectra.txt','w')
for i in range(len(clqC['name'])):
#for i in range(100):
print 'Working on ',i,' source'
xx = np.where((master['plate1'] == clqC['plate1'][i]) &(master['mjd1'] == clqC['mjd1'][i]) & (master['fiber1'] == clqC['fiber1'][i]))[0][0]
xy =np.where(data['SDSS_NAME'] == clqC['name'][i])[0]
ndata = data[xy]
print xx
if clqC['plate1'][i] >= 10000:
FITS_FILENAME1 = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(clqC['plate1'][i],clqC['mjd1'][i],clqC['fiber1'][i])
else:
FITS_FILENAME1 = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(clqC['plate1'][i],clqC['mjd1'][i],clqC['fiber1'][i])
if clqC['plate2'][i] >= 10000:
FITS_FILENAME2 = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(clqC['plate2'][i],clqC['mjd2'][i],clqC['fiber2'][i])
else:
FITS_FILENAME2 = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(clqC['plate2'][i],clqC['mjd2'][i],clqC['fiber2'][i])
data1 = fits.open(os.path.join(specdir,FITS_FILENAME1))[1].data
data2 = fits.open(os.path.join(specdir,FITS_FILENAME2))[1].data
gflux1 = (data1['flux']*(data1['and_mask'] == 0)).copy()
gflux2 = (data2['flux']*(data2['and_mask'] == 0)).copy()
zvi = clqC['zvi'][i]
sn1 =computeSN1700(10**data1['loglam']/(1.0+zvi),data1['flux'],1.0/np.sqrt(data1['ivar']))
sn2 =computeSN1700(10**data2['loglam']/(1.0+zvi),data2['flux'],1.0/np.sqrt(data2['ivar']))
if ((np.median(gflux1) != 0) & (np.median(gflux2) != 0) & (sn1 > 6) & (sn2 > 6)) :
fig=plt.figure(figsize=(18,8))
ax=plt.subplot2grid((2, 3), (0, 0), colspan=2,rowspan=2)
ax1 = plt.subplot2grid((2, 3), (0, 2))
ax.plot(10**data1['loglam']/(1.0+zvi),gaussian_filter1d(data1['flux'],2),color='black',alpha=0.5,label=FITS_FILENAME1.split('.')[0][5:])
ax.plot(10**data2['loglam']/(1.0+zvi),gaussian_filter1d(data2['flux'],2),color='red',alpha=0.5,label=FITS_FILENAME2.split('.')[0][5:])
ax.plot(10**data1['loglam']/(1.0+zvi),1.0/np.sqrt(data1['ivar']),color='black',alpha=0.1)
ax.plot(10**data2['loglam']/(1.0+zvi),1.0/np.sqrt(data2['ivar']),color='red',alpha=0.1)
string1 = 'SDSS J{0}\tZ\_VI: {1:4.4f}\tN$\_{{spec}}$: {2}'.format(clqC['name'][i],zvi,clqC['nepoch'][i])
string2 = 'RA: {0:4.4f}\tDEC: {1:4.4f}'.format(clqC['ra'][i],clqC['dec'][i])
string3 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format(FITS_FILENAME1.split('.')[0][5:], master['umag1'][xx], master['gmag1'][xx], master['rmag1'][xx], master['imag1'][xx], master['zmag1'][xx])
string4 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format(FITS_FILENAME2.split('.')[0][5:], master['umag2'][xx], master['gmag2'][xx], master['rmag2'][xx], master['imag2'][xx], master['zmag2'][xx])
string5 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format('$\Delta m(2-1)$', master['umag2'][xx] -master['umag1'][xx] , master['gmag2'][xx]-master['gmag1'][xx], master['rmag2'][xx]-master['rmag1'][xx], master['imag2'][xx]-master['imag1'][xx], master['zmag2'][xx] - master['zmag1'][xx])
ax.set(xlabel='Rest wavelength ($\AA$)',ylabel='Flux',ylim=(-2,max(np.median(gflux1)+3*np.std(gflux1),np.median(gflux2)+3*np.std(gflux2) )))
xlim,ylim=ax.get_xlim(),ax.get_ylim()
print string1,xlim,ylim
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.05*(ylim[1] - ylim[0]), string1,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.09*(ylim[1] - ylim[0]), string2,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.13*(ylim[1] - ylim[0]), string3,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.17*(ylim[1] - ylim[0]), string4,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.21*(ylim[1] - ylim[0]), string5,fontsize=18)
obslambda = linelist['lambda']#*(1.+zvi)
x = np.where((obslambda > xlim[0]) & (obslambda < xlim[1]))[0]
plotlambda = obslambda[x]
plotname = linelist['Name'][x]
plota_e = linelist['a_e'][x]
#print plotlambda
for k in range(len(plotlambda)):
if plota_e[k].strip() == 'Abs.' :
ax.axvline(x=plotlambda[k], color='lawngreen', linestyle=':')
ax.text(plotlambda[k],ylim[0]+0.75*(ylim[1]-ylim[0]),plotname[k],color='Orange',ha='center',rotation=90,**text_font)
else :
ax.axvline(x=plotlambda[k], color='lightblue', linestyle=':')
ax.text(plotlambda[k],ylim[0]+0.75*(ylim[1]-ylim[0]),plotname[k],color='Brown',ha='center',rotation=90,**text_font)
#Download and plot the other epoch data
dupmjd=[]
if clqC['nepoch'][i] > 2:
xyz = np.where((ndata['MJD_2'] !=clqC['mjd1'][i]) & (ndata['MJD_2'] !=clqC['mjd2'][i]) )[0]
for k in range(len(xyz)):
if ndata['PLATE_2'][xyz[k]] >=10000 :
FITS_FILENAME = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]])
else:
FITS_FILENAME = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]])
if not ((os.path.isfile(FITS_FILENAME)) | (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
download_spectra(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]],specdir)
print>>cptxt, 'cp /uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/{0}/spectra/lite/{1}/{2} ~/BOSS_BALDATA_CLQ/.'.format(spallversion,ndata['PLATE_2'][xyz[k]],FITS_FILENAME)
data0 = fits.open(os.path.join(specdir,FITS_FILENAME))[1].data
ax.plot(10**data0['loglam']/(1.0+zvi),gaussian_filter1d(data0['flux'],2),color=plt.cm.RdYlBu(k*300),alpha=0.5,label=FITS_FILENAME.split('.')[0][5:])
ax.plot(10**data0['loglam']/(1.0+zvi),1.0/np.sqrt(data0['ivar']),color=plt.cm.RdYlBu(k*300),alpha=0.1)
dupmjd.append(ndata['MJD_2'][xyz[k]])
ax.legend(loc=1)
crm = np.where(crts['InputID'] ==clqC['name'][i] )[0]
if len(crm) > 0 :
CRTS_Vmag = crts['Mag'][crm]
CRTS_Verr = crts['Magerr'][crm]
CRTS_MJD = crts['MJD'][crm]
ax1.errorbar(CRTS_MJD,CRTS_Vmag,yerr=CRTS_Verr,fmt='v',color='gold',label='CRTS',alpha=0.75)
CRTS_medmjd,CRTS_medmag,CRTS_medmagerr = smoothCRTS(CRTS_MJD,CRTS_Vmag,CRTS_Verr)
ax1.errorbar(CRTS_medmjd,CRTS_medmag,yerr=CRTS_medmagerr,fmt='v',color='brown',alpha=0.75)
ax1.set_ylim(ax1.get_ylim()[::-1])
ax1.set(xlabel='MJD',ylabel='V-mag')
ax1_ylim = ax1.get_ylim()
ax1.legend(loc=1)
ax1.axvline(clqC['mjd1'][i],ls='--',color='black',lw=3,zorder=-1,alpha=0.45)
ax1.text(clqC['mjd1'][i], ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(clqC['mjd1'][i]),fontsize=12,rotation='vertical')
ax1.axvline(clqC['mjd2'][i],ls='--',color='red',lw=3,zorder=-1,alpha=0.45)
ax1.text(clqC['mjd2'][i], ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(clqC['mjd2'][i]),fontsize=12,rotation='vertical')
for mm in dupmjd:
ax1.axvline(mm,ls='--',color='blue',lw=3,zorder=-1,alpha=0.45)
ax1.text(mm, ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(mm),fontsize=12,rotation='vertical')
fig.tight_layout()
fig.savefig(pp,format='pdf')
print>>out, '{0}\t{1}\t{2:10.5f}\t{3:10.5f}\t{4:10.5f}\t{5:10.5f}\t{6:10.5f}\t{7:10.5f}\t{8:10.5f}\t{9:10.5f}'.format(clqC['name'][i],clqC['nepoch'][i],clqC['ra'][i],clqC['dec'][i],clqC['zvi'][i], master['umag2'][xx] -master['umag1'][xx] , master['gmag2'][xx]-master['gmag1'][xx], master['rmag2'][xx]-master['rmag1'][xx], master['imag2'][xx]-master['imag1'][xx], master['zmag2'][xx] - master['zmag1'][xx])
out.close()
pp.close()
def main():
print '-------'
#checkUniqueN()
#MultipleEpochQSOs()
#download_spectraCLQ()
plot_spectra()
if __name__== "__main__":
main() | specdir = 'CLQsearch_spectra'
linelist = np.genfromtxt('/Users/vzm83/Proposals/linelist_speccy.txt',usecols=(0,1,2),dtype=('|S10',float,'|S5'),names=True)
out = open('CLQsearch_deltamgs_sn1700gt6.txt','w') | random_line_split |
CLQ_search_new.py | import numpy as np
import astropy.io.fits as fits
import scipy as sp
import matplotlib.pyplot as plt
import astroML
from matplotlib.ticker import NullFormatter
import pandas as pd
import os
import urllib2
from scipy import stats
from pydl.pydl.pydlutils.spheregroup import *
from astroML.plotting import hist
from matplotlib.backends.backend_pdf import PdfPages
from scipy.interpolate import interp1d
from astropy.modeling.models import Voigt1D
from astropy import constants as const
from astropy import units as U
from astropy.coordinates import SkyCoord
from dustmaps.sfd import SFDQuery
from specutils import extinction
from matplotlib.ticker import MaxNLocator
import seaborn as sns
from statsmodels.stats.outliers_influence import summary_table
import statsmodels.formula.api as smf
import statsmodels.api as sm
from pydl.pydl.pydlutils import yanny
from astropy.table import Table
from astropy.time import Time
from scipy.stats import kde
from scipy.ndimage.filters import gaussian_filter1d,uniform_filter1d
"""
Program to explore the search of CLQs in eBOSS quasar catalog
@author : Vivek M.
@date : 12/April/2019
@version: 1.0
"""
spallversion='v5_13_0'
params = {
'axes.labelsize': 18,
'axes.linewidth': 1.5,
#'text.fontsize': 8,
'legend.fontsize': 15,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True,
#'figure.figsize': [16, 5]
'legend.frameon': False,
'font.family': 'Times New Roman'
}
plt.rcParams.update(params)
hfont = {'fontname':'Times New Roman'}
def computeSN1700(wave,flux,err):
ww=np.where((wave >=1650) & (wave <= 1750))[0]
return np.median(flux[ww])/np.median(err[ww])
def download_spectra(plate, mjd, fiber, dirname='.'):
''' Downloads SDSS spectra from DR14 and puts it in dirname
Change the SDSS URL to download from a different location
'''
FITS_FILENAME = 'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits'
try :
SDSS_URL = ('https://data.sdss.org/sas/dr14/eboss/spectro/redux/v5_10_0/spectra/%(plate)04i/'
'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits')
urllib2.urlopen(SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber))
print 'Downloadin from dr14'
except urllib2.HTTPError as err:
if err.code == 404 :
SDSS_URL = ('https://data.sdss.org/sas/dr8/sdss/spectro/redux/26/spectra/%(plate)04i/'
'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits')
print 'Downloadin from dr8'
print SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber)
download_url = 'wget '+SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber)
print download_url
os.system(download_url)
mv_cmd='mv '+FITS_FILENAME % dict(plate=plate,mjd=mjd,fiber=fiber) + ' '+dirname+'/.'
#print mv_cmd
os.system(mv_cmd)
def checkUniqueN():
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
print 'Number of entries matching DR12 and spAll_v5-13-0:',len(data['SDSS_NAME'])
print 'Number of quasars matching DR12 and spAll_v5-13-0:',len(np.unique(data['SDSS_NAME']))
def MultipleEpochQSOs():
redux= '/uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/v5_13_0/spectra/lite'
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
uname = np.unique(data['SDSS_NAME'])
count = 0
gcount = 0
out=open('Candidate_CLQ_search_DR16.txt','w')
name = [] ; ra=[] ; dec=[];zvi=[]
umag1 =[] ;gmag1=[];rmag1=[];imag1=[];zmag1=[]
umag2 =[] ;gmag2=[];rmag2=[];imag2=[];zmag2=[]
#umag1err =[] ;gmag1err=[];rmag1err=[];imag1err=[];zmag1err=[]
#umag2err =[] ;gmag2err=[];rmag2err=[];imag2err=[];zmag2err=[]
plate1 = [] ; mjd1=[];fiber1=[]
plate2 = [] ; mjd2=[];fiber2=[]
gdiff = [] ; rdiff = [] ; idiff = []
for i in range(len(uname)):
#for i in range(150):
xx=np.where(data['SDSS_NAME'] == uname[i])[0]
if len(xx)>1:
ndata = data[xx]
#print uname[i],len(xx),xx,data['PLATE_1'][xx[0]],data['MJD_1'][xx[0]],data['FIBERID_1'][xx[0]],data['PLATE_2'][xx[0]],data['MJD_2'][xx[0]],data['FIBERID_2'][xx[0]],data['PLATE_2'][xx[-1]],data['MJD_2'][xx[-1]],data['FIBERID_2'][xx[-1]],data['FIBERMAG'][xx[0],1],data['FIBER2MAG'][xx[-1],1],data['FIBERFLUX'][xx[0],2],data['FIBERFLUX'][xx[-1],2]
mjdl = ndata['MJD_2']
maxmjd = max(mjdl)
minmjd = min(mjdl)
xmax = np.where(mjdl == maxmjd)[0][0]
xmin = np.where(mjdl == minmjd)[0][0]
print mjdl,maxmjd,minmjd
print xmax,xmin,ndata['MJD_2'][xmax],ndata['PLATE_2'][xmax],ndata['FIBERID_2'][xmax],ndata['MJD_2'][xmin],ndata['PLATE_2'][xmin],ndata['FIBERID_2'][xmin]
#ksjhdf=raw_input()
#print 'Check', ndata['MJD_2'],ndata['SDSS_NAME'],ndata['PLATE_2'],ndata['FIBERID_2']
#plate1 = data['PLATE_2'][xx[0]] ; plate2 = data['PLATE_2'][xx[-1]]
#pmf1 = '{0:04d}-{1:05d}-{2:04d}'.format(data['PLATE_2'][xx[0]],data['MJD_2'][xx[0]],data['FIBERID_2'][xx[0]])
#pmf2 = '{0:04d}-{1:05d}-{2:04d}'.format(data['PLATE_2'][xx[-1]],data['MJD_2'][xx[-1]],data['FIBERID_2'][xx[-1]])
#data1 = fits.open(os.path.join(redux,plate1,'spec-'+pmf1+'.fits'))[1].data
gdiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1])))
rdiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,2])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,2])))
idiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,3])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,3])))
if np.abs((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1]))) > 1 :
print>>out,'{0}\t{1}\t{2:10.5f}\t{3:10.5f}\t{4:10.5f}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}'.format( uname[i],len(xx),ndata['RA_1'][0],ndata['DEC_1'][0],ndata['Z_VI'][0],ndata['PLATE_2'][xmin],ndata['MJD_2'][xmin],ndata['FIBERID_2'][xmin],ndata['PLATE_2'][xmax],ndata['MJD_2'][xmax],ndata['FIBERID_2'][xmax])
gcount +=1
name.append(ndata['SDSS_NAME'][0])
ra.append(ndata['RA_1'][0])
dec.append(ndata['DEC_1'][0])
zvi.append(ndata['Z_VI'][0])
plate1.append(ndata['PLATE_2'][xmin])
mjd1.append(ndata['MJD_2'][xmin])
fiber1.append(ndata['FIBERID_2'][xmin])
plate2.append(ndata['PLATE_2'][xmax])
mjd2.append(ndata['MJD_2'][xmax])
fiber2.append(ndata['FIBERID_2'][xmax])
umag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,0])))
gmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])))
rmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,2])))
imag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,3])))
zmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,4])))
umag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,0])))
gmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1])))
rmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,2])))
imag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,3])))
zmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,4])))
count +=1
print str(i+1)+'/'+str(len(uname))+' Running and Found candidates: '+str(gcount)
np.savez('CLQsearch_MasterList_Plate-MJD-Fiber.npz',
name = np.array(name) ,
ra = np.array(ra) ,
dec = np.array(dec) ,
zvi = np.array(zvi) ,
plate1 = np.array(plate1) ,
mjd1 = np.array(mjd1) ,
fiber1 = np.array(fiber1) ,
plate2 = np.array(plate2) ,
mjd2 = np.array(mjd2) ,
fiber2 = np.array(fiber2) ,
umag1=np.array(umag1) ,
gmag1=np.array(gmag1) ,
rmag1=np.array(rmag1) ,
imag1=np.array(imag1) ,
zmag1=np.array(zmag1) ,
umag2=np.array(umag2) ,
gmag2=np.array(gmag2) ,
rmag2=np.array(rmag2) ,
imag2=np.array(imag2) ,
zmag2=np.array(zmag2) ,
)
#print count
#print gdiff
out.close()
gdiff=np.array(gdiff)
rdiff=np.array(rdiff)
idiff=np.array(idiff)
yy=np.where(np.abs(gdiff) > 1)[0]
fig,(ax,ax1,ax2)=plt.subplots(1,3,figsize=(10,5))
ax.plot(gdiff,idiff,'.',color='black',label='gmag vs imag')
nbins=20
#k = kde.gaussian_kde((gdiff,idiff))
#xi, yi = np.mgrid[gdiff.min():gdiff.max():nbins*1j, idiff.min():y.max():nbins*1j]
#zi = k(np.vstack([xi.flatten(), yi.flatten()]))
#ax.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap=plt.cm.BuGn_r)
#ax.contour(xi, yi, zi.reshape(xi.shape) )
ax.plot(gdiff,idiff,'.',color='black',label='gmag vs imag')
ax1.plot(gdiff,rdiff,'.',color='red',label='gmag vs rmag')
ax2.plot(rdiff,idiff,'.',color='blue',label='rmag vs imag')
ax.set(xlabel='$\Delta$g-mag',ylabel='$\Delta$i-mag')
ax1.set(xlabel='$\Delta$g-mag',ylabel='$\Delta$r-mag')
ax2.set(xlabel='$\Delta$r-mag',ylabel='$\Delta$i-mag')
fig.tight_layout()
fig.savefig('Candidate_CLQsearch_DR16_color-magn.jpg')
plt.show()
def download_spectraCLQ(print_cmd=False):
clqC=np.genfromtxt('Candidate_CLQ_search_DR16c.txt',names=['name','nepoch','ra','dec','zvi','plate1','mjd1','fiber1','plate2','mjd2','fiber2'],dtype=('|S30',int,float,float,float,int,int,int,int,int,int))
if print_cmd :
cptxt = open('CLQcopyspectrumfromBOSS_SPECTRO_REDUX_v5_13.txt','w')
checkdownload=[]
specdir = 'CLQsearch_spectra'
for i in range(len(clqC['name'])):
#for i in range(10):
plates=[clqC['plate1'][i],clqC['plate2'][i]]
mjds=[clqC['mjd1'][i],clqC['mjd2'][i]]
fibers=[clqC['fiber1'][i],clqC['fiber2'][i]]
for j in range(2):
if plates[j] >= 10000:
FITS_FILENAME = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(plates[j],mjds[j],fibers[j])
else:
FITS_FILENAME = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(plates[j],mjds[j],fibers[j])
if not print_cmd:
if not ((os.path.isfile(FITS_FILENAME)) | (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
download_spectra(plates[j],mjds[j],fibers[j],specdir)
else :
print 'Spectrum already downloaded'
if print_cmd:
print>>cptxt, 'cp /uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/{0}/spectra/lite/{1}/{2} ~/BOSS_BALDATA_CLQ/.'.format(spallversion,plates[j],FITS_FILENAME)
else :
if not ((os.path.isfile(FITS_FILENAME)) & (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
checkdownload.append(FITS_FILENAME)
print '--'*31
# kfgh=raw_input()
print checkdownload
if print_cmd:
cptxt.close()
def smoothCRTS(mjd,mag,magerr):
mjd= np.array(mjd) ; mag = np.array(mag) ; magerr= np.array(magerr)
minmjd =min(mjd)
msort = np.argsort(mjd)
mjd = mjd[msort] ; mag = mag[msort] ; magerr = magerr[msort]
mjddiff = mjd[1:] - mjd[0:-1]
gp = np.where(mjddiff > 100)[0]
print type(gp)
ngp = np.insert(np.insert(gp,0,0),len(gp)+1,len(mjd)-1)
#fig,ax = plt.subplots(figsize=(10,5))
#ax.plot(mjd,mag,'ok',alpha=0.2,label=name)
#ax.set_xlabel('MJD')
#ax.set_ylabel('CRTS V-mag')
#ax.legend(loc=1)
medmjd = []
medmag = []
medmagerr = []
for ig,g in enumerate(ngp[0:-1]):
#print mjd[g]
if ig == 0:
xg = np.where((mjd >= mjd[ngp[ig]]-10) & (mjd <=mjd[ngp[ig+1]]+10))[0]
medmag.append(np.median(mag[xg]))
medmjd.append(np.mean(mjd[xg]))
medmagerr.append(np.std(mag[xg])/np.sqrt(len(xg)))
# ax.axvline(mjd[g]-10,ls='--',color='red')
else:
xg = np.where((mjd >= mjd[ngp[ig]]+10) & (mjd <=mjd[ngp[ig+1]]+10))[0]
medmag.append(np.median(mag[xg]))
medmjd.append(np.mean(mjd[xg]))
medmagerr.append(np.std(mag[xg])/np.sqrt(len(xg)))
return medmjd,medmag,medmagerr
def | ():
text_font = {'fontname':'Times New Roman', 'size':'14'}
pp = PdfPages('CLQsearches_plot_spectra_sn1700gt6.pdf')
clqC=np.genfromtxt('Candidate_CLQ_search_DR16c.txt',names=['name','nepoch','ra','dec','zvi','plate1','mjd1','fiber1','plate2','mjd2','fiber2'],dtype=('|S30',int,float,float,float,int,int,int,int,int,int))
master =np.load('CLQsearch_MasterList_Plate-MJD-Fiber.npz')
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
crts = pd.read_csv('CRTS_lc_CLQsearchSample_sn1700gt6.csv')
specdir = 'CLQsearch_spectra'
linelist = np.genfromtxt('/Users/vzm83/Proposals/linelist_speccy.txt',usecols=(0,1,2),dtype=('|S10',float,'|S5'),names=True)
out = open('CLQsearch_deltamgs_sn1700gt6.txt','w')
cptxt = open('copyCLQadditionalspectra.txt','w')
for i in range(len(clqC['name'])):
#for i in range(100):
print 'Working on ',i,' source'
xx = np.where((master['plate1'] == clqC['plate1'][i]) &(master['mjd1'] == clqC['mjd1'][i]) & (master['fiber1'] == clqC['fiber1'][i]))[0][0]
xy =np.where(data['SDSS_NAME'] == clqC['name'][i])[0]
ndata = data[xy]
print xx
if clqC['plate1'][i] >= 10000:
FITS_FILENAME1 = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(clqC['plate1'][i],clqC['mjd1'][i],clqC['fiber1'][i])
else:
FITS_FILENAME1 = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(clqC['plate1'][i],clqC['mjd1'][i],clqC['fiber1'][i])
if clqC['plate2'][i] >= 10000:
FITS_FILENAME2 = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(clqC['plate2'][i],clqC['mjd2'][i],clqC['fiber2'][i])
else:
FITS_FILENAME2 = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(clqC['plate2'][i],clqC['mjd2'][i],clqC['fiber2'][i])
data1 = fits.open(os.path.join(specdir,FITS_FILENAME1))[1].data
data2 = fits.open(os.path.join(specdir,FITS_FILENAME2))[1].data
gflux1 = (data1['flux']*(data1['and_mask'] == 0)).copy()
gflux2 = (data2['flux']*(data2['and_mask'] == 0)).copy()
zvi = clqC['zvi'][i]
sn1 =computeSN1700(10**data1['loglam']/(1.0+zvi),data1['flux'],1.0/np.sqrt(data1['ivar']))
sn2 =computeSN1700(10**data2['loglam']/(1.0+zvi),data2['flux'],1.0/np.sqrt(data2['ivar']))
if ((np.median(gflux1) != 0) & (np.median(gflux2) != 0) & (sn1 > 6) & (sn2 > 6)) :
fig=plt.figure(figsize=(18,8))
ax=plt.subplot2grid((2, 3), (0, 0), colspan=2,rowspan=2)
ax1 = plt.subplot2grid((2, 3), (0, 2))
ax.plot(10**data1['loglam']/(1.0+zvi),gaussian_filter1d(data1['flux'],2),color='black',alpha=0.5,label=FITS_FILENAME1.split('.')[0][5:])
ax.plot(10**data2['loglam']/(1.0+zvi),gaussian_filter1d(data2['flux'],2),color='red',alpha=0.5,label=FITS_FILENAME2.split('.')[0][5:])
ax.plot(10**data1['loglam']/(1.0+zvi),1.0/np.sqrt(data1['ivar']),color='black',alpha=0.1)
ax.plot(10**data2['loglam']/(1.0+zvi),1.0/np.sqrt(data2['ivar']),color='red',alpha=0.1)
string1 = 'SDSS J{0}\tZ\_VI: {1:4.4f}\tN$\_{{spec}}$: {2}'.format(clqC['name'][i],zvi,clqC['nepoch'][i])
string2 = 'RA: {0:4.4f}\tDEC: {1:4.4f}'.format(clqC['ra'][i],clqC['dec'][i])
string3 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format(FITS_FILENAME1.split('.')[0][5:], master['umag1'][xx], master['gmag1'][xx], master['rmag1'][xx], master['imag1'][xx], master['zmag1'][xx])
string4 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format(FITS_FILENAME2.split('.')[0][5:], master['umag2'][xx], master['gmag2'][xx], master['rmag2'][xx], master['imag2'][xx], master['zmag2'][xx])
string5 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format('$\Delta m(2-1)$', master['umag2'][xx] -master['umag1'][xx] , master['gmag2'][xx]-master['gmag1'][xx], master['rmag2'][xx]-master['rmag1'][xx], master['imag2'][xx]-master['imag1'][xx], master['zmag2'][xx] - master['zmag1'][xx])
ax.set(xlabel='Rest wavelength ($\AA$)',ylabel='Flux',ylim=(-2,max(np.median(gflux1)+3*np.std(gflux1),np.median(gflux2)+3*np.std(gflux2) )))
xlim,ylim=ax.get_xlim(),ax.get_ylim()
print string1,xlim,ylim
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.05*(ylim[1] - ylim[0]), string1,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.09*(ylim[1] - ylim[0]), string2,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.13*(ylim[1] - ylim[0]), string3,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.17*(ylim[1] - ylim[0]), string4,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.21*(ylim[1] - ylim[0]), string5,fontsize=18)
obslambda = linelist['lambda']#*(1.+zvi)
x = np.where((obslambda > xlim[0]) & (obslambda < xlim[1]))[0]
plotlambda = obslambda[x]
plotname = linelist['Name'][x]
plota_e = linelist['a_e'][x]
#print plotlambda
for k in range(len(plotlambda)):
if plota_e[k].strip() == 'Abs.' :
ax.axvline(x=plotlambda[k], color='lawngreen', linestyle=':')
ax.text(plotlambda[k],ylim[0]+0.75*(ylim[1]-ylim[0]),plotname[k],color='Orange',ha='center',rotation=90,**text_font)
else :
ax.axvline(x=plotlambda[k], color='lightblue', linestyle=':')
ax.text(plotlambda[k],ylim[0]+0.75*(ylim[1]-ylim[0]),plotname[k],color='Brown',ha='center',rotation=90,**text_font)
#Download and plot the other epoch data
dupmjd=[]
if clqC['nepoch'][i] > 2:
xyz = np.where((ndata['MJD_2'] !=clqC['mjd1'][i]) & (ndata['MJD_2'] !=clqC['mjd2'][i]) )[0]
for k in range(len(xyz)):
if ndata['PLATE_2'][xyz[k]] >=10000 :
FITS_FILENAME = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]])
else:
FITS_FILENAME = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]])
if not ((os.path.isfile(FITS_FILENAME)) | (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
download_spectra(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]],specdir)
print>>cptxt, 'cp /uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/{0}/spectra/lite/{1}/{2} ~/BOSS_BALDATA_CLQ/.'.format(spallversion,ndata['PLATE_2'][xyz[k]],FITS_FILENAME)
data0 = fits.open(os.path.join(specdir,FITS_FILENAME))[1].data
ax.plot(10**data0['loglam']/(1.0+zvi),gaussian_filter1d(data0['flux'],2),color=plt.cm.RdYlBu(k*300),alpha=0.5,label=FITS_FILENAME.split('.')[0][5:])
ax.plot(10**data0['loglam']/(1.0+zvi),1.0/np.sqrt(data0['ivar']),color=plt.cm.RdYlBu(k*300),alpha=0.1)
dupmjd.append(ndata['MJD_2'][xyz[k]])
ax.legend(loc=1)
crm = np.where(crts['InputID'] ==clqC['name'][i] )[0]
if len(crm) > 0 :
CRTS_Vmag = crts['Mag'][crm]
CRTS_Verr = crts['Magerr'][crm]
CRTS_MJD = crts['MJD'][crm]
ax1.errorbar(CRTS_MJD,CRTS_Vmag,yerr=CRTS_Verr,fmt='v',color='gold',label='CRTS',alpha=0.75)
CRTS_medmjd,CRTS_medmag,CRTS_medmagerr = smoothCRTS(CRTS_MJD,CRTS_Vmag,CRTS_Verr)
ax1.errorbar(CRTS_medmjd,CRTS_medmag,yerr=CRTS_medmagerr,fmt='v',color='brown',alpha=0.75)
ax1.set_ylim(ax1.get_ylim()[::-1])
ax1.set(xlabel='MJD',ylabel='V-mag')
ax1_ylim = ax1.get_ylim()
ax1.legend(loc=1)
ax1.axvline(clqC['mjd1'][i],ls='--',color='black',lw=3,zorder=-1,alpha=0.45)
ax1.text(clqC['mjd1'][i], ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(clqC['mjd1'][i]),fontsize=12,rotation='vertical')
ax1.axvline(clqC['mjd2'][i],ls='--',color='red',lw=3,zorder=-1,alpha=0.45)
ax1.text(clqC['mjd2'][i], ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(clqC['mjd2'][i]),fontsize=12,rotation='vertical')
for mm in dupmjd:
ax1.axvline(mm,ls='--',color='blue',lw=3,zorder=-1,alpha=0.45)
ax1.text(mm, ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(mm),fontsize=12,rotation='vertical')
fig.tight_layout()
fig.savefig(pp,format='pdf')
print>>out, '{0}\t{1}\t{2:10.5f}\t{3:10.5f}\t{4:10.5f}\t{5:10.5f}\t{6:10.5f}\t{7:10.5f}\t{8:10.5f}\t{9:10.5f}'.format(clqC['name'][i],clqC['nepoch'][i],clqC['ra'][i],clqC['dec'][i],clqC['zvi'][i], master['umag2'][xx] -master['umag1'][xx] , master['gmag2'][xx]-master['gmag1'][xx], master['rmag2'][xx]-master['rmag1'][xx], master['imag2'][xx]-master['imag1'][xx], master['zmag2'][xx] - master['zmag1'][xx])
out.close()
pp.close()
def main():
print '-------'
#checkUniqueN()
#MultipleEpochQSOs()
#download_spectraCLQ()
plot_spectra()
if __name__== "__main__":
main()
| plot_spectra | identifier_name |
CLQ_search_new.py | import numpy as np
import astropy.io.fits as fits
import scipy as sp
import matplotlib.pyplot as plt
import astroML
from matplotlib.ticker import NullFormatter
import pandas as pd
import os
import urllib2
from scipy import stats
from pydl.pydl.pydlutils.spheregroup import *
from astroML.plotting import hist
from matplotlib.backends.backend_pdf import PdfPages
from scipy.interpolate import interp1d
from astropy.modeling.models import Voigt1D
from astropy import constants as const
from astropy import units as U
from astropy.coordinates import SkyCoord
from dustmaps.sfd import SFDQuery
from specutils import extinction
from matplotlib.ticker import MaxNLocator
import seaborn as sns
from statsmodels.stats.outliers_influence import summary_table
import statsmodels.formula.api as smf
import statsmodels.api as sm
from pydl.pydl.pydlutils import yanny
from astropy.table import Table
from astropy.time import Time
from scipy.stats import kde
from scipy.ndimage.filters import gaussian_filter1d,uniform_filter1d
"""
Program to explore the search of CLQs in eBOSS quasar catalog
@author : Vivek M.
@date : 12/April/2019
@version: 1.0
"""
spallversion='v5_13_0'
params = {
'axes.labelsize': 18,
'axes.linewidth': 1.5,
#'text.fontsize': 8,
'legend.fontsize': 15,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True,
#'figure.figsize': [16, 5]
'legend.frameon': False,
'font.family': 'Times New Roman'
}
plt.rcParams.update(params)
hfont = {'fontname':'Times New Roman'}
def computeSN1700(wave,flux,err):
ww=np.where((wave >=1650) & (wave <= 1750))[0]
return np.median(flux[ww])/np.median(err[ww])
def download_spectra(plate, mjd, fiber, dirname='.'):
''' Downloads SDSS spectra from DR14 and puts it in dirname
Change the SDSS URL to download from a different location
'''
FITS_FILENAME = 'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits'
try :
SDSS_URL = ('https://data.sdss.org/sas/dr14/eboss/spectro/redux/v5_10_0/spectra/%(plate)04i/'
'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits')
urllib2.urlopen(SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber))
print 'Downloadin from dr14'
except urllib2.HTTPError as err:
if err.code == 404 :
SDSS_URL = ('https://data.sdss.org/sas/dr8/sdss/spectro/redux/26/spectra/%(plate)04i/'
'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits')
print 'Downloadin from dr8'
print SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber)
download_url = 'wget '+SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber)
print download_url
os.system(download_url)
mv_cmd='mv '+FITS_FILENAME % dict(plate=plate,mjd=mjd,fiber=fiber) + ' '+dirname+'/.'
#print mv_cmd
os.system(mv_cmd)
def checkUniqueN():
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
print 'Number of entries matching DR12 and spAll_v5-13-0:',len(data['SDSS_NAME'])
print 'Number of quasars matching DR12 and spAll_v5-13-0:',len(np.unique(data['SDSS_NAME']))
def MultipleEpochQSOs():
|
def download_spectraCLQ(print_cmd=False):
clqC=np.genfromtxt('Candidate_CLQ_search_DR16c.txt',names=['name','nepoch','ra','dec','zvi','plate1','mjd1','fiber1','plate2','mjd2','fiber2'],dtype=('|S30',int,float,float,float,int,int,int,int,int,int))
if print_cmd :
cptxt = open('CLQcopyspectrumfromBOSS_SPECTRO_REDUX_v5_13.txt','w')
checkdownload=[]
specdir = 'CLQsearch_spectra'
for i in range(len(clqC['name'])):
#for i in range(10):
plates=[clqC['plate1'][i],clqC['plate2'][i]]
mjds=[clqC['mjd1'][i],clqC['mjd2'][i]]
fibers=[clqC['fiber1'][i],clqC['fiber2'][i]]
for j in range(2):
if plates[j] >= 10000:
FITS_FILENAME = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(plates[j],mjds[j],fibers[j])
else:
FITS_FILENAME = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(plates[j],mjds[j],fibers[j])
if not print_cmd:
if not ((os.path.isfile(FITS_FILENAME)) | (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
download_spectra(plates[j],mjds[j],fibers[j],specdir)
else :
print 'Spectrum already downloaded'
if print_cmd:
print>>cptxt, 'cp /uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/{0}/spectra/lite/{1}/{2} ~/BOSS_BALDATA_CLQ/.'.format(spallversion,plates[j],FITS_FILENAME)
else :
if not ((os.path.isfile(FITS_FILENAME)) & (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
checkdownload.append(FITS_FILENAME)
print '--'*31
# kfgh=raw_input()
print checkdownload
if print_cmd:
cptxt.close()
def smoothCRTS(mjd,mag,magerr):
mjd= np.array(mjd) ; mag = np.array(mag) ; magerr= np.array(magerr)
minmjd =min(mjd)
msort = np.argsort(mjd)
mjd = mjd[msort] ; mag = mag[msort] ; magerr = magerr[msort]
mjddiff = mjd[1:] - mjd[0:-1]
gp = np.where(mjddiff > 100)[0]
print type(gp)
ngp = np.insert(np.insert(gp,0,0),len(gp)+1,len(mjd)-1)
#fig,ax = plt.subplots(figsize=(10,5))
#ax.plot(mjd,mag,'ok',alpha=0.2,label=name)
#ax.set_xlabel('MJD')
#ax.set_ylabel('CRTS V-mag')
#ax.legend(loc=1)
medmjd = []
medmag = []
medmagerr = []
for ig,g in enumerate(ngp[0:-1]):
#print mjd[g]
if ig == 0:
xg = np.where((mjd >= mjd[ngp[ig]]-10) & (mjd <=mjd[ngp[ig+1]]+10))[0]
medmag.append(np.median(mag[xg]))
medmjd.append(np.mean(mjd[xg]))
medmagerr.append(np.std(mag[xg])/np.sqrt(len(xg)))
# ax.axvline(mjd[g]-10,ls='--',color='red')
else:
xg = np.where((mjd >= mjd[ngp[ig]]+10) & (mjd <=mjd[ngp[ig+1]]+10))[0]
medmag.append(np.median(mag[xg]))
medmjd.append(np.mean(mjd[xg]))
medmagerr.append(np.std(mag[xg])/np.sqrt(len(xg)))
return medmjd,medmag,medmagerr
def plot_spectra():
text_font = {'fontname':'Times New Roman', 'size':'14'}
pp = PdfPages('CLQsearches_plot_spectra_sn1700gt6.pdf')
clqC=np.genfromtxt('Candidate_CLQ_search_DR16c.txt',names=['name','nepoch','ra','dec','zvi','plate1','mjd1','fiber1','plate2','mjd2','fiber2'],dtype=('|S30',int,float,float,float,int,int,int,int,int,int))
master =np.load('CLQsearch_MasterList_Plate-MJD-Fiber.npz')
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
crts = pd.read_csv('CRTS_lc_CLQsearchSample_sn1700gt6.csv')
specdir = 'CLQsearch_spectra'
linelist = np.genfromtxt('/Users/vzm83/Proposals/linelist_speccy.txt',usecols=(0,1,2),dtype=('|S10',float,'|S5'),names=True)
out = open('CLQsearch_deltamgs_sn1700gt6.txt','w')
cptxt = open('copyCLQadditionalspectra.txt','w')
for i in range(len(clqC['name'])):
#for i in range(100):
print 'Working on ',i,' source'
xx = np.where((master['plate1'] == clqC['plate1'][i]) &(master['mjd1'] == clqC['mjd1'][i]) & (master['fiber1'] == clqC['fiber1'][i]))[0][0]
xy =np.where(data['SDSS_NAME'] == clqC['name'][i])[0]
ndata = data[xy]
print xx
if clqC['plate1'][i] >= 10000:
FITS_FILENAME1 = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(clqC['plate1'][i],clqC['mjd1'][i],clqC['fiber1'][i])
else:
FITS_FILENAME1 = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(clqC['plate1'][i],clqC['mjd1'][i],clqC['fiber1'][i])
if clqC['plate2'][i] >= 10000:
FITS_FILENAME2 = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(clqC['plate2'][i],clqC['mjd2'][i],clqC['fiber2'][i])
else:
FITS_FILENAME2 = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(clqC['plate2'][i],clqC['mjd2'][i],clqC['fiber2'][i])
data1 = fits.open(os.path.join(specdir,FITS_FILENAME1))[1].data
data2 = fits.open(os.path.join(specdir,FITS_FILENAME2))[1].data
gflux1 = (data1['flux']*(data1['and_mask'] == 0)).copy()
gflux2 = (data2['flux']*(data2['and_mask'] == 0)).copy()
zvi = clqC['zvi'][i]
sn1 =computeSN1700(10**data1['loglam']/(1.0+zvi),data1['flux'],1.0/np.sqrt(data1['ivar']))
sn2 =computeSN1700(10**data2['loglam']/(1.0+zvi),data2['flux'],1.0/np.sqrt(data2['ivar']))
if ((np.median(gflux1) != 0) & (np.median(gflux2) != 0) & (sn1 > 6) & (sn2 > 6)) :
fig=plt.figure(figsize=(18,8))
ax=plt.subplot2grid((2, 3), (0, 0), colspan=2,rowspan=2)
ax1 = plt.subplot2grid((2, 3), (0, 2))
ax.plot(10**data1['loglam']/(1.0+zvi),gaussian_filter1d(data1['flux'],2),color='black',alpha=0.5,label=FITS_FILENAME1.split('.')[0][5:])
ax.plot(10**data2['loglam']/(1.0+zvi),gaussian_filter1d(data2['flux'],2),color='red',alpha=0.5,label=FITS_FILENAME2.split('.')[0][5:])
ax.plot(10**data1['loglam']/(1.0+zvi),1.0/np.sqrt(data1['ivar']),color='black',alpha=0.1)
ax.plot(10**data2['loglam']/(1.0+zvi),1.0/np.sqrt(data2['ivar']),color='red',alpha=0.1)
string1 = 'SDSS J{0}\tZ\_VI: {1:4.4f}\tN$\_{{spec}}$: {2}'.format(clqC['name'][i],zvi,clqC['nepoch'][i])
string2 = 'RA: {0:4.4f}\tDEC: {1:4.4f}'.format(clqC['ra'][i],clqC['dec'][i])
string3 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format(FITS_FILENAME1.split('.')[0][5:], master['umag1'][xx], master['gmag1'][xx], master['rmag1'][xx], master['imag1'][xx], master['zmag1'][xx])
string4 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format(FITS_FILENAME2.split('.')[0][5:], master['umag2'][xx], master['gmag2'][xx], master['rmag2'][xx], master['imag2'][xx], master['zmag2'][xx])
string5 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format('$\Delta m(2-1)$', master['umag2'][xx] -master['umag1'][xx] , master['gmag2'][xx]-master['gmag1'][xx], master['rmag2'][xx]-master['rmag1'][xx], master['imag2'][xx]-master['imag1'][xx], master['zmag2'][xx] - master['zmag1'][xx])
ax.set(xlabel='Rest wavelength ($\AA$)',ylabel='Flux',ylim=(-2,max(np.median(gflux1)+3*np.std(gflux1),np.median(gflux2)+3*np.std(gflux2) )))
xlim,ylim=ax.get_xlim(),ax.get_ylim()
print string1,xlim,ylim
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.05*(ylim[1] - ylim[0]), string1,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.09*(ylim[1] - ylim[0]), string2,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.13*(ylim[1] - ylim[0]), string3,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.17*(ylim[1] - ylim[0]), string4,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.21*(ylim[1] - ylim[0]), string5,fontsize=18)
obslambda = linelist['lambda']#*(1.+zvi)
x = np.where((obslambda > xlim[0]) & (obslambda < xlim[1]))[0]
plotlambda = obslambda[x]
plotname = linelist['Name'][x]
plota_e = linelist['a_e'][x]
#print plotlambda
for k in range(len(plotlambda)):
if plota_e[k].strip() == 'Abs.' :
ax.axvline(x=plotlambda[k], color='lawngreen', linestyle=':')
ax.text(plotlambda[k],ylim[0]+0.75*(ylim[1]-ylim[0]),plotname[k],color='Orange',ha='center',rotation=90,**text_font)
else :
ax.axvline(x=plotlambda[k], color='lightblue', linestyle=':')
ax.text(plotlambda[k],ylim[0]+0.75*(ylim[1]-ylim[0]),plotname[k],color='Brown',ha='center',rotation=90,**text_font)
#Download and plot the other epoch data
dupmjd=[]
if clqC['nepoch'][i] > 2:
xyz = np.where((ndata['MJD_2'] !=clqC['mjd1'][i]) & (ndata['MJD_2'] !=clqC['mjd2'][i]) )[0]
for k in range(len(xyz)):
if ndata['PLATE_2'][xyz[k]] >=10000 :
FITS_FILENAME = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]])
else:
FITS_FILENAME = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]])
if not ((os.path.isfile(FITS_FILENAME)) | (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
download_spectra(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]],specdir)
print>>cptxt, 'cp /uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/{0}/spectra/lite/{1}/{2} ~/BOSS_BALDATA_CLQ/.'.format(spallversion,ndata['PLATE_2'][xyz[k]],FITS_FILENAME)
data0 = fits.open(os.path.join(specdir,FITS_FILENAME))[1].data
ax.plot(10**data0['loglam']/(1.0+zvi),gaussian_filter1d(data0['flux'],2),color=plt.cm.RdYlBu(k*300),alpha=0.5,label=FITS_FILENAME.split('.')[0][5:])
ax.plot(10**data0['loglam']/(1.0+zvi),1.0/np.sqrt(data0['ivar']),color=plt.cm.RdYlBu(k*300),alpha=0.1)
dupmjd.append(ndata['MJD_2'][xyz[k]])
ax.legend(loc=1)
crm = np.where(crts['InputID'] ==clqC['name'][i] )[0]
if len(crm) > 0 :
CRTS_Vmag = crts['Mag'][crm]
CRTS_Verr = crts['Magerr'][crm]
CRTS_MJD = crts['MJD'][crm]
ax1.errorbar(CRTS_MJD,CRTS_Vmag,yerr=CRTS_Verr,fmt='v',color='gold',label='CRTS',alpha=0.75)
CRTS_medmjd,CRTS_medmag,CRTS_medmagerr = smoothCRTS(CRTS_MJD,CRTS_Vmag,CRTS_Verr)
ax1.errorbar(CRTS_medmjd,CRTS_medmag,yerr=CRTS_medmagerr,fmt='v',color='brown',alpha=0.75)
ax1.set_ylim(ax1.get_ylim()[::-1])
ax1.set(xlabel='MJD',ylabel='V-mag')
ax1_ylim = ax1.get_ylim()
ax1.legend(loc=1)
ax1.axvline(clqC['mjd1'][i],ls='--',color='black',lw=3,zorder=-1,alpha=0.45)
ax1.text(clqC['mjd1'][i], ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(clqC['mjd1'][i]),fontsize=12,rotation='vertical')
ax1.axvline(clqC['mjd2'][i],ls='--',color='red',lw=3,zorder=-1,alpha=0.45)
ax1.text(clqC['mjd2'][i], ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(clqC['mjd2'][i]),fontsize=12,rotation='vertical')
for mm in dupmjd:
ax1.axvline(mm,ls='--',color='blue',lw=3,zorder=-1,alpha=0.45)
ax1.text(mm, ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(mm),fontsize=12,rotation='vertical')
fig.tight_layout()
fig.savefig(pp,format='pdf')
print>>out, '{0}\t{1}\t{2:10.5f}\t{3:10.5f}\t{4:10.5f}\t{5:10.5f}\t{6:10.5f}\t{7:10.5f}\t{8:10.5f}\t{9:10.5f}'.format(clqC['name'][i],clqC['nepoch'][i],clqC['ra'][i],clqC['dec'][i],clqC['zvi'][i], master['umag2'][xx] -master['umag1'][xx] , master['gmag2'][xx]-master['gmag1'][xx], master['rmag2'][xx]-master['rmag1'][xx], master['imag2'][xx]-master['imag1'][xx], master['zmag2'][xx] - master['zmag1'][xx])
out.close()
pp.close()
def main():
print '-------'
#checkUniqueN()
#MultipleEpochQSOs()
#download_spectraCLQ()
plot_spectra()
if __name__== "__main__":
main()
| redux= '/uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/v5_13_0/spectra/lite'
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
uname = np.unique(data['SDSS_NAME'])
count = 0
gcount = 0
out=open('Candidate_CLQ_search_DR16.txt','w')
name = [] ; ra=[] ; dec=[];zvi=[]
umag1 =[] ;gmag1=[];rmag1=[];imag1=[];zmag1=[]
umag2 =[] ;gmag2=[];rmag2=[];imag2=[];zmag2=[]
#umag1err =[] ;gmag1err=[];rmag1err=[];imag1err=[];zmag1err=[]
#umag2err =[] ;gmag2err=[];rmag2err=[];imag2err=[];zmag2err=[]
plate1 = [] ; mjd1=[];fiber1=[]
plate2 = [] ; mjd2=[];fiber2=[]
gdiff = [] ; rdiff = [] ; idiff = []
for i in range(len(uname)):
#for i in range(150):
xx=np.where(data['SDSS_NAME'] == uname[i])[0]
if len(xx)>1:
ndata = data[xx]
#print uname[i],len(xx),xx,data['PLATE_1'][xx[0]],data['MJD_1'][xx[0]],data['FIBERID_1'][xx[0]],data['PLATE_2'][xx[0]],data['MJD_2'][xx[0]],data['FIBERID_2'][xx[0]],data['PLATE_2'][xx[-1]],data['MJD_2'][xx[-1]],data['FIBERID_2'][xx[-1]],data['FIBERMAG'][xx[0],1],data['FIBER2MAG'][xx[-1],1],data['FIBERFLUX'][xx[0],2],data['FIBERFLUX'][xx[-1],2]
mjdl = ndata['MJD_2']
maxmjd = max(mjdl)
minmjd = min(mjdl)
xmax = np.where(mjdl == maxmjd)[0][0]
xmin = np.where(mjdl == minmjd)[0][0]
print mjdl,maxmjd,minmjd
print xmax,xmin,ndata['MJD_2'][xmax],ndata['PLATE_2'][xmax],ndata['FIBERID_2'][xmax],ndata['MJD_2'][xmin],ndata['PLATE_2'][xmin],ndata['FIBERID_2'][xmin]
#ksjhdf=raw_input()
#print 'Check', ndata['MJD_2'],ndata['SDSS_NAME'],ndata['PLATE_2'],ndata['FIBERID_2']
#plate1 = data['PLATE_2'][xx[0]] ; plate2 = data['PLATE_2'][xx[-1]]
#pmf1 = '{0:04d}-{1:05d}-{2:04d}'.format(data['PLATE_2'][xx[0]],data['MJD_2'][xx[0]],data['FIBERID_2'][xx[0]])
#pmf2 = '{0:04d}-{1:05d}-{2:04d}'.format(data['PLATE_2'][xx[-1]],data['MJD_2'][xx[-1]],data['FIBERID_2'][xx[-1]])
#data1 = fits.open(os.path.join(redux,plate1,'spec-'+pmf1+'.fits'))[1].data
gdiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1])))
rdiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,2])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,2])))
idiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,3])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,3])))
if np.abs((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1]))) > 1 :
print>>out,'{0}\t{1}\t{2:10.5f}\t{3:10.5f}\t{4:10.5f}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}'.format( uname[i],len(xx),ndata['RA_1'][0],ndata['DEC_1'][0],ndata['Z_VI'][0],ndata['PLATE_2'][xmin],ndata['MJD_2'][xmin],ndata['FIBERID_2'][xmin],ndata['PLATE_2'][xmax],ndata['MJD_2'][xmax],ndata['FIBERID_2'][xmax])
gcount +=1
name.append(ndata['SDSS_NAME'][0])
ra.append(ndata['RA_1'][0])
dec.append(ndata['DEC_1'][0])
zvi.append(ndata['Z_VI'][0])
plate1.append(ndata['PLATE_2'][xmin])
mjd1.append(ndata['MJD_2'][xmin])
fiber1.append(ndata['FIBERID_2'][xmin])
plate2.append(ndata['PLATE_2'][xmax])
mjd2.append(ndata['MJD_2'][xmax])
fiber2.append(ndata['FIBERID_2'][xmax])
umag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,0])))
gmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])))
rmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,2])))
imag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,3])))
zmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,4])))
umag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,0])))
gmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1])))
rmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,2])))
imag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,3])))
zmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,4])))
count +=1
print str(i+1)+'/'+str(len(uname))+' Running and Found candidates: '+str(gcount)
np.savez('CLQsearch_MasterList_Plate-MJD-Fiber.npz',
name = np.array(name) ,
ra = np.array(ra) ,
dec = np.array(dec) ,
zvi = np.array(zvi) ,
plate1 = np.array(plate1) ,
mjd1 = np.array(mjd1) ,
fiber1 = np.array(fiber1) ,
plate2 = np.array(plate2) ,
mjd2 = np.array(mjd2) ,
fiber2 = np.array(fiber2) ,
umag1=np.array(umag1) ,
gmag1=np.array(gmag1) ,
rmag1=np.array(rmag1) ,
imag1=np.array(imag1) ,
zmag1=np.array(zmag1) ,
umag2=np.array(umag2) ,
gmag2=np.array(gmag2) ,
rmag2=np.array(rmag2) ,
imag2=np.array(imag2) ,
zmag2=np.array(zmag2) ,
)
#print count
#print gdiff
out.close()
gdiff=np.array(gdiff)
rdiff=np.array(rdiff)
idiff=np.array(idiff)
yy=np.where(np.abs(gdiff) > 1)[0]
fig,(ax,ax1,ax2)=plt.subplots(1,3,figsize=(10,5))
ax.plot(gdiff,idiff,'.',color='black',label='gmag vs imag')
nbins=20
#k = kde.gaussian_kde((gdiff,idiff))
#xi, yi = np.mgrid[gdiff.min():gdiff.max():nbins*1j, idiff.min():y.max():nbins*1j]
#zi = k(np.vstack([xi.flatten(), yi.flatten()]))
#ax.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap=plt.cm.BuGn_r)
#ax.contour(xi, yi, zi.reshape(xi.shape) )
ax.plot(gdiff,idiff,'.',color='black',label='gmag vs imag')
ax1.plot(gdiff,rdiff,'.',color='red',label='gmag vs rmag')
ax2.plot(rdiff,idiff,'.',color='blue',label='rmag vs imag')
ax.set(xlabel='$\Delta$g-mag',ylabel='$\Delta$i-mag')
ax1.set(xlabel='$\Delta$g-mag',ylabel='$\Delta$r-mag')
ax2.set(xlabel='$\Delta$r-mag',ylabel='$\Delta$i-mag')
fig.tight_layout()
fig.savefig('Candidate_CLQsearch_DR16_color-magn.jpg')
plt.show() | identifier_body |
CLQ_search_new.py | import numpy as np
import astropy.io.fits as fits
import scipy as sp
import matplotlib.pyplot as plt
import astroML
from matplotlib.ticker import NullFormatter
import pandas as pd
import os
import urllib2
from scipy import stats
from pydl.pydl.pydlutils.spheregroup import *
from astroML.plotting import hist
from matplotlib.backends.backend_pdf import PdfPages
from scipy.interpolate import interp1d
from astropy.modeling.models import Voigt1D
from astropy import constants as const
from astropy import units as U
from astropy.coordinates import SkyCoord
from dustmaps.sfd import SFDQuery
from specutils import extinction
from matplotlib.ticker import MaxNLocator
import seaborn as sns
from statsmodels.stats.outliers_influence import summary_table
import statsmodels.formula.api as smf
import statsmodels.api as sm
from pydl.pydl.pydlutils import yanny
from astropy.table import Table
from astropy.time import Time
from scipy.stats import kde
from scipy.ndimage.filters import gaussian_filter1d,uniform_filter1d
"""
Program to explore the search of CLQs in eBOSS quasar catalog
@author : Vivek M.
@date : 12/April/2019
@version: 1.0
"""
spallversion='v5_13_0'
params = {
'axes.labelsize': 18,
'axes.linewidth': 1.5,
#'text.fontsize': 8,
'legend.fontsize': 15,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True,
#'figure.figsize': [16, 5]
'legend.frameon': False,
'font.family': 'Times New Roman'
}
plt.rcParams.update(params)
hfont = {'fontname':'Times New Roman'}
def computeSN1700(wave,flux,err):
ww=np.where((wave >=1650) & (wave <= 1750))[0]
return np.median(flux[ww])/np.median(err[ww])
def download_spectra(plate, mjd, fiber, dirname='.'):
''' Downloads SDSS spectra from DR14 and puts it in dirname
Change the SDSS URL to download from a different location
'''
FITS_FILENAME = 'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits'
try :
SDSS_URL = ('https://data.sdss.org/sas/dr14/eboss/spectro/redux/v5_10_0/spectra/%(plate)04i/'
'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits')
urllib2.urlopen(SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber))
print 'Downloadin from dr14'
except urllib2.HTTPError as err:
if err.code == 404 :
SDSS_URL = ('https://data.sdss.org/sas/dr8/sdss/spectro/redux/26/spectra/%(plate)04i/'
'spec-%(plate)04i-%(mjd)05i-%(fiber)04i.fits')
print 'Downloadin from dr8'
print SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber)
download_url = 'wget '+SDSS_URL % dict(plate=plate,mjd=mjd,fiber=fiber)
print download_url
os.system(download_url)
mv_cmd='mv '+FITS_FILENAME % dict(plate=plate,mjd=mjd,fiber=fiber) + ' '+dirname+'/.'
#print mv_cmd
os.system(mv_cmd)
def checkUniqueN():
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
print 'Number of entries matching DR12 and spAll_v5-13-0:',len(data['SDSS_NAME'])
print 'Number of quasars matching DR12 and spAll_v5-13-0:',len(np.unique(data['SDSS_NAME']))
def MultipleEpochQSOs():
redux= '/uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/v5_13_0/spectra/lite'
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
uname = np.unique(data['SDSS_NAME'])
count = 0
gcount = 0
out=open('Candidate_CLQ_search_DR16.txt','w')
name = [] ; ra=[] ; dec=[];zvi=[]
umag1 =[] ;gmag1=[];rmag1=[];imag1=[];zmag1=[]
umag2 =[] ;gmag2=[];rmag2=[];imag2=[];zmag2=[]
#umag1err =[] ;gmag1err=[];rmag1err=[];imag1err=[];zmag1err=[]
#umag2err =[] ;gmag2err=[];rmag2err=[];imag2err=[];zmag2err=[]
plate1 = [] ; mjd1=[];fiber1=[]
plate2 = [] ; mjd2=[];fiber2=[]
gdiff = [] ; rdiff = [] ; idiff = []
for i in range(len(uname)):
#for i in range(150):
xx=np.where(data['SDSS_NAME'] == uname[i])[0]
if len(xx)>1:
ndata = data[xx]
#print uname[i],len(xx),xx,data['PLATE_1'][xx[0]],data['MJD_1'][xx[0]],data['FIBERID_1'][xx[0]],data['PLATE_2'][xx[0]],data['MJD_2'][xx[0]],data['FIBERID_2'][xx[0]],data['PLATE_2'][xx[-1]],data['MJD_2'][xx[-1]],data['FIBERID_2'][xx[-1]],data['FIBERMAG'][xx[0],1],data['FIBER2MAG'][xx[-1],1],data['FIBERFLUX'][xx[0],2],data['FIBERFLUX'][xx[-1],2]
mjdl = ndata['MJD_2']
maxmjd = max(mjdl)
minmjd = min(mjdl)
xmax = np.where(mjdl == maxmjd)[0][0]
xmin = np.where(mjdl == minmjd)[0][0]
print mjdl,maxmjd,minmjd
print xmax,xmin,ndata['MJD_2'][xmax],ndata['PLATE_2'][xmax],ndata['FIBERID_2'][xmax],ndata['MJD_2'][xmin],ndata['PLATE_2'][xmin],ndata['FIBERID_2'][xmin]
#ksjhdf=raw_input()
#print 'Check', ndata['MJD_2'],ndata['SDSS_NAME'],ndata['PLATE_2'],ndata['FIBERID_2']
#plate1 = data['PLATE_2'][xx[0]] ; plate2 = data['PLATE_2'][xx[-1]]
#pmf1 = '{0:04d}-{1:05d}-{2:04d}'.format(data['PLATE_2'][xx[0]],data['MJD_2'][xx[0]],data['FIBERID_2'][xx[0]])
#pmf2 = '{0:04d}-{1:05d}-{2:04d}'.format(data['PLATE_2'][xx[-1]],data['MJD_2'][xx[-1]],data['FIBERID_2'][xx[-1]])
#data1 = fits.open(os.path.join(redux,plate1,'spec-'+pmf1+'.fits'))[1].data
gdiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1])))
rdiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,2])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,2])))
idiff.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,3])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,3])))
if np.abs((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])) - (22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1]))) > 1 :
print>>out,'{0}\t{1}\t{2:10.5f}\t{3:10.5f}\t{4:10.5f}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}'.format( uname[i],len(xx),ndata['RA_1'][0],ndata['DEC_1'][0],ndata['Z_VI'][0],ndata['PLATE_2'][xmin],ndata['MJD_2'][xmin],ndata['FIBERID_2'][xmin],ndata['PLATE_2'][xmax],ndata['MJD_2'][xmax],ndata['FIBERID_2'][xmax])
gcount +=1
name.append(ndata['SDSS_NAME'][0])
ra.append(ndata['RA_1'][0])
dec.append(ndata['DEC_1'][0])
zvi.append(ndata['Z_VI'][0])
plate1.append(ndata['PLATE_2'][xmin])
mjd1.append(ndata['MJD_2'][xmin])
fiber1.append(ndata['FIBERID_2'][xmin])
plate2.append(ndata['PLATE_2'][xmax])
mjd2.append(ndata['MJD_2'][xmax])
fiber2.append(ndata['FIBERID_2'][xmax])
umag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,0])))
gmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,1])))
rmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,2])))
imag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,3])))
zmag1.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmin,4])))
umag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,0])))
gmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,1])))
rmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,2])))
imag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,3])))
zmag2.append((22.5 - 2.5*np.log10(ndata['SPECTROFLUX'][xmax,4])))
count +=1
print str(i+1)+'/'+str(len(uname))+' Running and Found candidates: '+str(gcount)
np.savez('CLQsearch_MasterList_Plate-MJD-Fiber.npz',
name = np.array(name) ,
ra = np.array(ra) ,
dec = np.array(dec) ,
zvi = np.array(zvi) ,
plate1 = np.array(plate1) ,
mjd1 = np.array(mjd1) ,
fiber1 = np.array(fiber1) ,
plate2 = np.array(plate2) ,
mjd2 = np.array(mjd2) ,
fiber2 = np.array(fiber2) ,
umag1=np.array(umag1) ,
gmag1=np.array(gmag1) ,
rmag1=np.array(rmag1) ,
imag1=np.array(imag1) ,
zmag1=np.array(zmag1) ,
umag2=np.array(umag2) ,
gmag2=np.array(gmag2) ,
rmag2=np.array(rmag2) ,
imag2=np.array(imag2) ,
zmag2=np.array(zmag2) ,
)
#print count
#print gdiff
out.close()
gdiff=np.array(gdiff)
rdiff=np.array(rdiff)
idiff=np.array(idiff)
yy=np.where(np.abs(gdiff) > 1)[0]
fig,(ax,ax1,ax2)=plt.subplots(1,3,figsize=(10,5))
ax.plot(gdiff,idiff,'.',color='black',label='gmag vs imag')
nbins=20
#k = kde.gaussian_kde((gdiff,idiff))
#xi, yi = np.mgrid[gdiff.min():gdiff.max():nbins*1j, idiff.min():y.max():nbins*1j]
#zi = k(np.vstack([xi.flatten(), yi.flatten()]))
#ax.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap=plt.cm.BuGn_r)
#ax.contour(xi, yi, zi.reshape(xi.shape) )
ax.plot(gdiff,idiff,'.',color='black',label='gmag vs imag')
ax1.plot(gdiff,rdiff,'.',color='red',label='gmag vs rmag')
ax2.plot(rdiff,idiff,'.',color='blue',label='rmag vs imag')
ax.set(xlabel='$\Delta$g-mag',ylabel='$\Delta$i-mag')
ax1.set(xlabel='$\Delta$g-mag',ylabel='$\Delta$r-mag')
ax2.set(xlabel='$\Delta$r-mag',ylabel='$\Delta$i-mag')
fig.tight_layout()
fig.savefig('Candidate_CLQsearch_DR16_color-magn.jpg')
plt.show()
def download_spectraCLQ(print_cmd=False):
clqC=np.genfromtxt('Candidate_CLQ_search_DR16c.txt',names=['name','nepoch','ra','dec','zvi','plate1','mjd1','fiber1','plate2','mjd2','fiber2'],dtype=('|S30',int,float,float,float,int,int,int,int,int,int))
if print_cmd :
cptxt = open('CLQcopyspectrumfromBOSS_SPECTRO_REDUX_v5_13.txt','w')
checkdownload=[]
specdir = 'CLQsearch_spectra'
for i in range(len(clqC['name'])):
#for i in range(10):
plates=[clqC['plate1'][i],clqC['plate2'][i]]
mjds=[clqC['mjd1'][i],clqC['mjd2'][i]]
fibers=[clqC['fiber1'][i],clqC['fiber2'][i]]
for j in range(2):
if plates[j] >= 10000:
FITS_FILENAME = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(plates[j],mjds[j],fibers[j])
else:
|
if not print_cmd:
if not ((os.path.isfile(FITS_FILENAME)) | (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
download_spectra(plates[j],mjds[j],fibers[j],specdir)
else :
print 'Spectrum already downloaded'
if print_cmd:
print>>cptxt, 'cp /uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/{0}/spectra/lite/{1}/{2} ~/BOSS_BALDATA_CLQ/.'.format(spallversion,plates[j],FITS_FILENAME)
else :
if not ((os.path.isfile(FITS_FILENAME)) & (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
checkdownload.append(FITS_FILENAME)
print '--'*31
# kfgh=raw_input()
print checkdownload
if print_cmd:
cptxt.close()
def smoothCRTS(mjd,mag,magerr):
mjd= np.array(mjd) ; mag = np.array(mag) ; magerr= np.array(magerr)
minmjd =min(mjd)
msort = np.argsort(mjd)
mjd = mjd[msort] ; mag = mag[msort] ; magerr = magerr[msort]
mjddiff = mjd[1:] - mjd[0:-1]
gp = np.where(mjddiff > 100)[0]
print type(gp)
ngp = np.insert(np.insert(gp,0,0),len(gp)+1,len(mjd)-1)
#fig,ax = plt.subplots(figsize=(10,5))
#ax.plot(mjd,mag,'ok',alpha=0.2,label=name)
#ax.set_xlabel('MJD')
#ax.set_ylabel('CRTS V-mag')
#ax.legend(loc=1)
medmjd = []
medmag = []
medmagerr = []
for ig,g in enumerate(ngp[0:-1]):
#print mjd[g]
if ig == 0:
xg = np.where((mjd >= mjd[ngp[ig]]-10) & (mjd <=mjd[ngp[ig+1]]+10))[0]
medmag.append(np.median(mag[xg]))
medmjd.append(np.mean(mjd[xg]))
medmagerr.append(np.std(mag[xg])/np.sqrt(len(xg)))
# ax.axvline(mjd[g]-10,ls='--',color='red')
else:
xg = np.where((mjd >= mjd[ngp[ig]]+10) & (mjd <=mjd[ngp[ig+1]]+10))[0]
medmag.append(np.median(mag[xg]))
medmjd.append(np.mean(mjd[xg]))
medmagerr.append(np.std(mag[xg])/np.sqrt(len(xg)))
return medmjd,medmag,medmagerr
def plot_spectra():
text_font = {'fontname':'Times New Roman', 'size':'14'}
pp = PdfPages('CLQsearches_plot_spectra_sn1700gt6.pdf')
clqC=np.genfromtxt('Candidate_CLQ_search_DR16c.txt',names=['name','nepoch','ra','dec','zvi','plate1','mjd1','fiber1','plate2','mjd2','fiber2'],dtype=('|S30',int,float,float,float,int,int,int,int,int,int))
master =np.load('CLQsearch_MasterList_Plate-MJD-Fiber.npz')
data=fits.open('CrossMatch_DR12Q_spAll_v5_13_0.fits')[1].data
crts = pd.read_csv('CRTS_lc_CLQsearchSample_sn1700gt6.csv')
specdir = 'CLQsearch_spectra'
linelist = np.genfromtxt('/Users/vzm83/Proposals/linelist_speccy.txt',usecols=(0,1,2),dtype=('|S10',float,'|S5'),names=True)
out = open('CLQsearch_deltamgs_sn1700gt6.txt','w')
cptxt = open('copyCLQadditionalspectra.txt','w')
for i in range(len(clqC['name'])):
#for i in range(100):
print 'Working on ',i,' source'
xx = np.where((master['plate1'] == clqC['plate1'][i]) &(master['mjd1'] == clqC['mjd1'][i]) & (master['fiber1'] == clqC['fiber1'][i]))[0][0]
xy =np.where(data['SDSS_NAME'] == clqC['name'][i])[0]
ndata = data[xy]
print xx
if clqC['plate1'][i] >= 10000:
FITS_FILENAME1 = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(clqC['plate1'][i],clqC['mjd1'][i],clqC['fiber1'][i])
else:
FITS_FILENAME1 = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(clqC['plate1'][i],clqC['mjd1'][i],clqC['fiber1'][i])
if clqC['plate2'][i] >= 10000:
FITS_FILENAME2 = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(clqC['plate2'][i],clqC['mjd2'][i],clqC['fiber2'][i])
else:
FITS_FILENAME2 = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(clqC['plate2'][i],clqC['mjd2'][i],clqC['fiber2'][i])
data1 = fits.open(os.path.join(specdir,FITS_FILENAME1))[1].data
data2 = fits.open(os.path.join(specdir,FITS_FILENAME2))[1].data
gflux1 = (data1['flux']*(data1['and_mask'] == 0)).copy()
gflux2 = (data2['flux']*(data2['and_mask'] == 0)).copy()
zvi = clqC['zvi'][i]
sn1 =computeSN1700(10**data1['loglam']/(1.0+zvi),data1['flux'],1.0/np.sqrt(data1['ivar']))
sn2 =computeSN1700(10**data2['loglam']/(1.0+zvi),data2['flux'],1.0/np.sqrt(data2['ivar']))
if ((np.median(gflux1) != 0) & (np.median(gflux2) != 0) & (sn1 > 6) & (sn2 > 6)) :
fig=plt.figure(figsize=(18,8))
ax=plt.subplot2grid((2, 3), (0, 0), colspan=2,rowspan=2)
ax1 = plt.subplot2grid((2, 3), (0, 2))
ax.plot(10**data1['loglam']/(1.0+zvi),gaussian_filter1d(data1['flux'],2),color='black',alpha=0.5,label=FITS_FILENAME1.split('.')[0][5:])
ax.plot(10**data2['loglam']/(1.0+zvi),gaussian_filter1d(data2['flux'],2),color='red',alpha=0.5,label=FITS_FILENAME2.split('.')[0][5:])
ax.plot(10**data1['loglam']/(1.0+zvi),1.0/np.sqrt(data1['ivar']),color='black',alpha=0.1)
ax.plot(10**data2['loglam']/(1.0+zvi),1.0/np.sqrt(data2['ivar']),color='red',alpha=0.1)
string1 = 'SDSS J{0}\tZ\_VI: {1:4.4f}\tN$\_{{spec}}$: {2}'.format(clqC['name'][i],zvi,clqC['nepoch'][i])
string2 = 'RA: {0:4.4f}\tDEC: {1:4.4f}'.format(clqC['ra'][i],clqC['dec'][i])
string3 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format(FITS_FILENAME1.split('.')[0][5:], master['umag1'][xx], master['gmag1'][xx], master['rmag1'][xx], master['imag1'][xx], master['zmag1'][xx])
string4 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format(FITS_FILENAME2.split('.')[0][5:], master['umag2'][xx], master['gmag2'][xx], master['rmag2'][xx], master['imag2'][xx], master['zmag2'][xx])
string5 = '{0:20}- {1:3.2f} {2:3.2f} {3:3.2f} {4:3.2f} {5:3.2f}'.format('$\Delta m(2-1)$', master['umag2'][xx] -master['umag1'][xx] , master['gmag2'][xx]-master['gmag1'][xx], master['rmag2'][xx]-master['rmag1'][xx], master['imag2'][xx]-master['imag1'][xx], master['zmag2'][xx] - master['zmag1'][xx])
ax.set(xlabel='Rest wavelength ($\AA$)',ylabel='Flux',ylim=(-2,max(np.median(gflux1)+3*np.std(gflux1),np.median(gflux2)+3*np.std(gflux2) )))
xlim,ylim=ax.get_xlim(),ax.get_ylim()
print string1,xlim,ylim
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.05*(ylim[1] - ylim[0]), string1,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.09*(ylim[1] - ylim[0]), string2,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.13*(ylim[1] - ylim[0]), string3,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.17*(ylim[1] - ylim[0]), string4,fontsize=18)
ax.text(xlim[0]+0.05*(xlim[1] - xlim[0]),ylim[1]-0.21*(ylim[1] - ylim[0]), string5,fontsize=18)
obslambda = linelist['lambda']#*(1.+zvi)
x = np.where((obslambda > xlim[0]) & (obslambda < xlim[1]))[0]
plotlambda = obslambda[x]
plotname = linelist['Name'][x]
plota_e = linelist['a_e'][x]
#print plotlambda
for k in range(len(plotlambda)):
if plota_e[k].strip() == 'Abs.' :
ax.axvline(x=plotlambda[k], color='lawngreen', linestyle=':')
ax.text(plotlambda[k],ylim[0]+0.75*(ylim[1]-ylim[0]),plotname[k],color='Orange',ha='center',rotation=90,**text_font)
else :
ax.axvline(x=plotlambda[k], color='lightblue', linestyle=':')
ax.text(plotlambda[k],ylim[0]+0.75*(ylim[1]-ylim[0]),plotname[k],color='Brown',ha='center',rotation=90,**text_font)
#Download and plot the other epoch data
dupmjd=[]
if clqC['nepoch'][i] > 2:
xyz = np.where((ndata['MJD_2'] !=clqC['mjd1'][i]) & (ndata['MJD_2'] !=clqC['mjd2'][i]) )[0]
for k in range(len(xyz)):
if ndata['PLATE_2'][xyz[k]] >=10000 :
FITS_FILENAME = 'spec-{0:05d}-{1:05d}-{2:04d}.fits'.format(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]])
else:
FITS_FILENAME = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]])
if not ((os.path.isfile(FITS_FILENAME)) | (os.path.isfile(os.path.join(specdir,FITS_FILENAME)))):
download_spectra(ndata['PLATE_2'][xyz[k]],ndata['MJD_2'][xyz[k]],ndata['FIBERID_2'][xyz[k]],specdir)
print>>cptxt, 'cp /uufs/chpc.utah.edu/common/home/sdss/ebosswork/eboss/spectro/redux/{0}/spectra/lite/{1}/{2} ~/BOSS_BALDATA_CLQ/.'.format(spallversion,ndata['PLATE_2'][xyz[k]],FITS_FILENAME)
data0 = fits.open(os.path.join(specdir,FITS_FILENAME))[1].data
ax.plot(10**data0['loglam']/(1.0+zvi),gaussian_filter1d(data0['flux'],2),color=plt.cm.RdYlBu(k*300),alpha=0.5,label=FITS_FILENAME.split('.')[0][5:])
ax.plot(10**data0['loglam']/(1.0+zvi),1.0/np.sqrt(data0['ivar']),color=plt.cm.RdYlBu(k*300),alpha=0.1)
dupmjd.append(ndata['MJD_2'][xyz[k]])
ax.legend(loc=1)
crm = np.where(crts['InputID'] ==clqC['name'][i] )[0]
if len(crm) > 0 :
CRTS_Vmag = crts['Mag'][crm]
CRTS_Verr = crts['Magerr'][crm]
CRTS_MJD = crts['MJD'][crm]
ax1.errorbar(CRTS_MJD,CRTS_Vmag,yerr=CRTS_Verr,fmt='v',color='gold',label='CRTS',alpha=0.75)
CRTS_medmjd,CRTS_medmag,CRTS_medmagerr = smoothCRTS(CRTS_MJD,CRTS_Vmag,CRTS_Verr)
ax1.errorbar(CRTS_medmjd,CRTS_medmag,yerr=CRTS_medmagerr,fmt='v',color='brown',alpha=0.75)
ax1.set_ylim(ax1.get_ylim()[::-1])
ax1.set(xlabel='MJD',ylabel='V-mag')
ax1_ylim = ax1.get_ylim()
ax1.legend(loc=1)
ax1.axvline(clqC['mjd1'][i],ls='--',color='black',lw=3,zorder=-1,alpha=0.45)
ax1.text(clqC['mjd1'][i], ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(clqC['mjd1'][i]),fontsize=12,rotation='vertical')
ax1.axvline(clqC['mjd2'][i],ls='--',color='red',lw=3,zorder=-1,alpha=0.45)
ax1.text(clqC['mjd2'][i], ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(clqC['mjd2'][i]),fontsize=12,rotation='vertical')
for mm in dupmjd:
ax1.axvline(mm,ls='--',color='blue',lw=3,zorder=-1,alpha=0.45)
ax1.text(mm, ax1_ylim[0]+0.2*(ax1_ylim[1] - ax1_ylim[0]),str(mm),fontsize=12,rotation='vertical')
fig.tight_layout()
fig.savefig(pp,format='pdf')
print>>out, '{0}\t{1}\t{2:10.5f}\t{3:10.5f}\t{4:10.5f}\t{5:10.5f}\t{6:10.5f}\t{7:10.5f}\t{8:10.5f}\t{9:10.5f}'.format(clqC['name'][i],clqC['nepoch'][i],clqC['ra'][i],clqC['dec'][i],clqC['zvi'][i], master['umag2'][xx] -master['umag1'][xx] , master['gmag2'][xx]-master['gmag1'][xx], master['rmag2'][xx]-master['rmag1'][xx], master['imag2'][xx]-master['imag1'][xx], master['zmag2'][xx] - master['zmag1'][xx])
out.close()
pp.close()
def main():
print '-------'
#checkUniqueN()
#MultipleEpochQSOs()
#download_spectraCLQ()
plot_spectra()
if __name__== "__main__":
main()
| FITS_FILENAME = 'spec-{0:04d}-{1:05d}-{2:04d}.fits'.format(plates[j],mjds[j],fibers[j]) | conditional_block |
lib.rs | /*!
Generate lexicographically-evenly-spaced strings between two strings
from pre-defined alphabets.
This is a rewrite of [mudderjs](https://github.com/fasiha/mudderjs); thanks
for the original work of the author and their contributors!
## Usage
Add a dependency in your Cargo.toml:
```toml
mudders = "0.0.4"
```
Now you can generate lexicographically-spaced strings in a few different ways:
```
use mudders::SymbolTable;
// The mudder method takes a NonZeroUsize as the amount,
// so you cannot pass in an invalid value.
use std::num::NonZeroUsize;
// You can use the included alphabet table
let table = SymbolTable::alphabet();
// SymbolTable::mudder() returns a Vec containing `amount` Strings.
let result = table.mudder_one("a", "z").unwrap();
// These strings are always lexicographically placed between `start` and `end`.
let one_str = result.as_str();
assert!(one_str > "a");
assert!(one_str < "z");
// You can also define your own symbol tables
let table = SymbolTable::from_chars(&['a', 'b']).unwrap();
let result = table.mudder("a", "b", NonZeroUsize::new(2).unwrap()).unwrap();
assert_eq!(result.len(), 2);
assert!(result[0].as_str() > "a" && result[1].as_str() > "a");
assert!(result[0].as_str() < "b" && result[1].as_str() < "b");
// The strings *should* be evenly-spaced and as short as they can be.
let table = SymbolTable::alphabet();
let result = table.mudder("anhui", "azazel", NonZeroUsize::new(3).unwrap()).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(vec!["aq", "as", "av"], result);
```
## Notes
The most notable difference to Mudder.js is that currently, mudders only
supports ASCII characters (because 127 characters ought to be enough for
everyone™). Our default `::alphabet()` also only has lowercase letters.
*/
use core::num::NonZeroUsize;
use std::{convert::TryFrom, str::FromStr};
#[macro_use]
pub mod error;
use error::*;
/// The functionality of the crate lives here.
///
/// A symbol table is, internally, a vector of valid ASCII bytes that are used
/// to generate lexicographically evenly-spaced strings.
#[derive(Clone, Debug)]
pub struct SymbolTable(Vec<u8>);
impl SymbolTable {
/// Creates a new symbol table from the given byte slice.
/// The slice is internally sorted using `.sort()`.
///
/// An error is returned if one of the given bytes is out of ASCII range.
pub fn new(source: &[u8]) -> Result<Self, CreationError> {
ensure! { !source.is_empty(), CreationError::EmptySlice }
ensure! { all_chars_ascii(&source), NonAsciiError::NonAsciiU8 }
// Copy the values, we need to own them anyways...
let mut vec: Vec<_> = source.iter().copied().collect();
// Sort them so they're actually in order.
// (You can pass in ['b', 'a'], but that's not usable internally I think.)
vec.sort();
vec.dedup();
Ok(Self(vec))
}
/// Creates a new symbol table from the given characters.
/// The slice is internally sorted using `.sort()`.
///
/// An error is returned if one of the given characters is not ASCII.
pub fn from_chars(source: &[char]) -> Result<Self, CreationError> {
let inner: Box<[u8]> = source
.iter()
.map(|c| try_ascii_u8_from_char(*c))
.collect::<Result<_, _>>()?;
Ok(Self::new(&inner)?)
}
/// Returns a SymbolTable which contains the lowercase latin alphabet (`[a-z]`).
#[allow(clippy::char_lit_as_u8)]
pub fn alphabet() -> Self {
Self::new(&('a' as u8..='z' as u8).collect::<Box<[_]>>()).unwrap()
}
/// Generate `amount` strings that lexicographically sort between `start` and `end`.
/// The algorithm will try to make them as evenly-spaced as possible.
///
/// When both parameters are empty strings, `amount` new strings that are
/// in lexicographical order are returned.
///
/// If parameter `b` is lexicographically before `a`, they are swapped internally.
///
/// ```
/// # use mudders::SymbolTable;
/// # use std::num::NonZeroUsize;
/// // Using the included alphabet table
/// let table = SymbolTable::alphabet();
/// // Generate 10 strings from scratch
/// let results = table.mudder("", "", NonZeroUsize::new(10).unwrap()).unwrap();
/// assert!(results.len() == 10);
/// // results should look something like ["b", "d", "f", ..., "r", "t"]
/// ```
pub fn mudder(
&self,
a: &str,
b: &str,
amount: NonZeroUsize,
) -> Result<Vec<String>, GenerationError> {
use error::InternalError::*;
use GenerationError::*;
ensure! { all_chars_ascii(a), NonAsciiError::NonAsciiU8 }
ensure! { all_chars_ascii(b), NonAsciiError::NonAsciiU8 }
ensure! { self.contains_all_chars(a), UnknownCharacters(a.to_string()) }
ensure! { self.contains_all_chars(b), UnknownCharacters(b.to_string()) }
let (a, b) = if a.is_empty() || b.is_empty() {
// If an argument is empty, keep the order
(a, b)
} else if b < a {
// If they're not empty and b is lexicographically prior to a, swap them
(b, a)
} else {
// You can't generate values between two matching strings.
ensure! { a != b, MatchingStrings(a.to_string()) }
// In any other case, keep the order
(a, b)
};
// TODO: Check for lexicographical adjacency!
//ensure! { !lex_adjacent(a, b), LexAdjacentStrings(a.to_string(), b.to_string()) }
// Count the characters start and end have in common.
let matching_count: usize = {
// Iterate through the chars of both given inputs...
let (mut start_chars, mut end_chars) = (a.chars(), b.chars());
// We need to keep track of this, because:
// In the case of `a` == `"a"` and `b` == `"aab"`,
// we actually need to compare `""` to `"b"` later on, not `""` to `"a"`.
let mut last_start_char = '\0';
// Counting to get the index.
let mut i: usize = 0;
loop {
// Advance the iterators...
match (start_chars.next(), end_chars.next()) {
// As long as there's two characters that match, increment i.
(Some(sc), Some(ec)) if sc == ec => {
last_start_char = sc;
i += 1;
continue;
}
// If start_chars have run out, but end_chars haven't, check
// if the current end char matches the last start char.
// If it does, we still need to increment our counter.
(None, Some(ec)) if ec == last_start_char => {
i += 1;
continue;
}
// break with i as soon as any mismatch happens or both iterators run out.
// matching_count will either be 0, indicating that there's
// no leading common pattern, or something other than 0, in
// that case it's the count of common characters.
(None, None) | (Some(_), None) | (None, Some(_)) | (Some(_), Some(_)) => {
break i
}
}
}
};
// Count the number to add to the total requests amount.
// If a or b is empty, we need one item less in the pool;
// two items less if both are empty.
let non_empty_input_count = [a, b].iter().filter(|s| !s.is_empty()).count();
// For convenience
let computed_amount = || amount.get() + non_empty_input_count;
// Calculate the distance between the first non-matching characters.
// If matching_count is greater than 0, we have leading common chars,
// so we skip those, but add the amount to the depth base.
let branching_factor = self.distance_between_first_chars(
// v--- matching_count might be higher than a.len()
// vvv because we might count past a's end
&a[std::cmp::min(matching_count, a.len())..],
&b[matching_count..],
)?;
// We also add matching_count to the depth because if we're starting
// with a common prefix, we have at least x leading characters that
// will be the same for all substrings.
let mut depth =
depth_for(dbg!(branching_factor), dbg!(computed_amount())) + dbg!(matching_count);
// if branching_factor == 1 {
// // This should only be the case when we have an input like `"z", ""`.
// // In this case, we can generate strings after the z, but we need
// // to go one level deeper in any case.
// depth += 1;
// }
// TODO: Maybe keeping this as an iterator would be more efficient,
// but it would have to be cloned at least once to get the pool length.
let pool: Vec<String> = self.traverse("".into(), a, b, dbg!(depth)).collect();
let pool = if (pool.len() as isize).saturating_sub(non_empty_input_count as isize)
< amount.get() as isize
{
depth += depth_for(branching_factor, computed_amount() + pool.len());
dbg!(self.traverse("".into(), a, b, dbg!(depth)).collect())
} else {
pool
};
if (pool.len() as isize).saturating_sub(non_empty_input_count as isize)
< amount.get() as isize
{
// We still don't have enough items, so bail
panic!(
"Internal error: Failed to calculate the correct tree depth!
This is a bug. Please report it at: https://github.com/Follpvosten/mudders/issues
and make sure to include the following information:
Symbols in table: {symbols:?}
Given inputs: {a:?}, {b:?}, amount: {amount}
matching_count: {m_count}
non_empty_input_count: {ne_input_count}
required pool length (computed amount): {comp_amount}
branching_factor: {b_factor}
final depth: {depth}
pool: {pool:?} (length: {pool_len})",
symbols = self.0.iter().map(|i| *i as char).collect::<Box<[_]>>(),
a = a,
b = b,
amount = amount,
m_count = matching_count,
ne_input_count = non_empty_input_count,
comp_amount = computed_amount(),
b_factor = branching_factor,
depth = depth,
pool = pool,
pool_len = pool.len(),
)
}
Ok(if amount.get() == 1 {
pool.get(pool.len() / 2)
.map(|item| vec![item.clone()])
.ok_or_else(|| FailedToGetMiddle)?
} else {
let step = computed_amount() as f64 / pool.len() as f64;
let mut counter = 0f64;
let mut last_value = 0;
let result: Vec<_> = pool
.into_iter()
.filter(|_| {
counter += step;
let new_value = counter.floor() as usize;
if new_value > last_value {
last_value = new_value;
true
} else {
false
}
})
.take(amount.into())
.collect();
ensure! { result.len() == amount.get(), NotEnoughItemsInPool };
result
})
}
/// Convenience wrapper around `mudder` to generate exactly one string.
///
/// # Safety
/// This function calls `NonZeroUsize::new_unchecked(1)`.
pub fn mudder_one(&self, a: &str, b: &str) -> Result<String, GenerationError> {
self.mudder(a, b, unsafe { NonZeroUsize::new_unchecked(1) })
.map(|mut vec| vec.remove(0))
}
/// Convenience wrapper around `mudder` to generate an amount of fresh strings.
///
/// `SymbolTable.generate(amount)` is equivalent to `SymbolTable.mudder("", "", amount)`.
pub fn generate(&self, amount: NonZeroUsize) -> Result<Vec<String>, GenerationError> {
self.mudder("", "", amount)
}
/// Traverses a virtual tree of strings to the given depth.
fn traverse<'a>(
&'a self,
curr_key: String,
start: &'a str,
end: &'a str,
depth: usize,
) -> Box<dyn Iterator<Item = String> + 'a> {
if depth == 0 {
// If we've reached depth 0, we don't go futher.
Box::new(std::iter::empty())
} else {
// Generate all possible mutations on the current depth
Box::new(
self.0
.iter()
.filter_map(move |c| -> Option<Box<dyn Iterator<Item = String>>> {
// TODO: Performance - this probably still isn't the best option.
let key = {
let the_char = *c as char;
let mut string =
String::with_capacity(curr_key.len() + the_char.len_utf8());
string.push_str(&curr_key);
string.push(the_char);
string
};
// After the end key, we definitely do not continue.
if key.as_str() > end && !end.is_empty() {
None
} else if key.as_str() < start {
// If we're prior to the start key...
// ...and the start key is a subkey of the current key...
if start.starts_with(&key) {
// ...only traverse the subtree, ignoring the key itself.
Some(Box::new(self.traverse(key, start, end, depth - 1)))
} else {
None
}
} else {
// Traverse normally, returning both the parent and sub key,
// in all other cases.
if key.len() < 2 {
let iter = std::iter::once(key.clone());
Some(if key == end {
Box::new(iter)
} else {
Box::new(iter.chain(self.traverse(key, start, end, depth - 1)))
})
} else {
let first = key.chars().next().unwrap();
Some(if key.chars().all(|c| c == first) {
// If our characters are all the same,
// don't add key to the list, only the subtree.
Box::new(self.traverse(key, start, end, depth - 1))
} else {
Box::new(std::iter::once(key.clone()).chain(self.traverse(
key,
start,
end,
depth - 1,
)))
})
}
}
})
.flatten(),
)
}
}
fn distance_between_first_chars(
&self,
start: &str,
end: &str,
) -> Result<usize, GenerationError> {
use InternalError::WrongCharOrder;
// check the first character of both strings...
Ok(match (start.chars().next(), end.chars().next()) {
// if both have a first char, compare them.
(Some(start_char), Some(end_char)) => {
ensure! { start_char < end_char, WrongCharOrder(start_char, end_char) }
let distance =
try_ascii_u8_from_char(end_char)? - try_ascii_u8_from_char(start_char)?;
distance as usize + 1
}
// if only the start has a first char, compare it to our last possible symbol.
(Some(start_char), None) => {
let end_u8 = self.0.last().unwrap();
// In this case, we allow the start and end char to be equal.
// This is because you can generate something after the last char,
// but not before the first char.
// vv
ensure! { start_char <= *end_u8 as char, WrongCharOrder(start_char, *end_u8 as char) }
let distance = end_u8 - try_ascii_u8_from_char(start_char)?;
if distance == 0 {
2
} else {
distance as usize + 1
}
}
// if only the end has a first char, compare it to our first possible symbol.
(None, Some(end_char)) => {
let start_u8 = self.0.first().unwrap();
ensure! { *start_u8 <= end_char as u8, WrongCharOrder(*start_u8 as char, end_char) }
let distance = try_ascii_u8_from_char(end_char)? - start_u8;
if distance == 0 {
2
} else {
distance as usize + 1
}
}
// if there's no characters given, the whole symboltable is our range.
_ => self.0.len(),
})
}
fn contains_all_chars(&self, chars: impl AsRef<[u8]>) -> bool {
chars.as_ref().iter().all(|c| self.0.contains(c))
}
}
/// Calculate the required depth for the given values.
///
/// `branching_factor` is used as the logarithm base, `n_elements` as the
/// value, and the result is rounded up and cast to usize.
fn depth_for(branching_factor: usize, n_elements: usize) -> usize {
f64::log(n_elements as f64, branching_factor as f64).ceil() as usize
}
fn try_ascii_u8_from_char(c: char) -> Result<u8, NonAsciiError> {
u8::try_from(c as u32).map_err(NonAsciiError::from)
}
fn all_chars_ascii(chars: impl AsRef<[u8]>) -> bool {
chars.as_ref().iter().all(|i| i.is_ascii())
}
impl FromStr for SymbolTable {
type Err = CreationError;
fn from_str(s: &str) -> Result<Self, CreationError> {
Self::from_chars(&s.chars().collect::<Box<[_]>>())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::num::NonZeroUsize;
/// Create and unwrap a NonZeroUsize from the given usize.
fn n(n: usize) -> NonZeroUsize {
NonZeroUsize::new(n).unwrap()
}
// Public API tests:
#[test]
#[allow(clippy::char_lit_as_u8)]
fn valid_tables_work() {
assert!(SymbolTable::new(&[1, 2, 3, 4, 5]).is_ok());
assert!(SymbolTable::new(&[125, 126, 127]).is_ok());
// Possible, but to be discouraged
assert!(SymbolTable::new(&['a' as u8, 'f' as u8]).is_ok());
assert!(SymbolTable::from_chars(&['a', 'b', 'c']).is_ok());
assert!(SymbolTable::from_str("0123").is_ok());
}
#[test]
fn invalid_tables_error() {
assert!(SymbolTable::from_str("🍅😂👶🏻").is_err());
assert!(SymbolTable::from_chars(&['🍌', '🍣', '⛈']).is_err());
assert!(SymbolTable::new(&[128, 129, 130]).is_err());
assert!(SymbolTable::new(&[]).is_err());
assert!(SymbolTable::from_chars(&[]).is_err());
assert!(SymbolTable::from_str("").is_err());
}
#[test]
fn unknown_chars_error() {
use error::GenerationError::UnknownCharacters;
// You cannot pass in strings with characters not in the SymbolTable:
let table = SymbolTable::alphabet();
assert_eq!(
table.mudder_one("123", "()/"),
Err(UnknownCharacters("123".into()))
);
assert_eq!(
table.mudder_one("a", "123"),
Err(UnknownCharacters("123".into()))
);
assert_eq!(
table.mudder_one("0)(", "b"),
Err(UnknownCharacters("0)(".into()))
);
let table = SymbolTable::from_str("123").unwrap();
assert_eq!(
table.mudder_one("a", "b"),
Err(UnknownCharacters("a".into()))
);
assert_eq!(
table.mudder_one("456", "1"),
Err(UnknownCharacters("456".into()))
);
assert_eq!(
table.mudder_one("2", "abc"),
Err(UnknownCharacters("abc".into()))
);
}
#[test]
fn equal_strings_error() {
use error::GenerationError::MatchingStrings;
let table = SymbolTable::alphabet();
assert_eq!(
table.mudder_one("abc", "abc"),
Err(MatchingStrings("abc".into()))
);
assert_eq!(
table.mudder_one("xyz", "xyz"),
Err(MatchingStrings("xyz".into()))
);
}
// TODO: Make this test work.
// I need to find out how to tell if two strings are lexicographically inseparable.
// #[test]
// fn lexicographically_adjacent_strings_error() {
// assert!(SymbolTable::alphabet().mudder("ba", "baa", n(1)).is_err());
// }
#[test]
fn reasonable_values() {
let table = | utputs_more_or_less_match_mudderjs() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder_one("a", "b").unwrap();
assert_eq!(result, "ac");
let table = SymbolTable::alphabet();
let result = table.mudder("anhui", "azazel", n(3)).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(vec!["aq", "as", "av"], result);
}
#[test]
fn empty_start() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder("", "c", n(2)).unwrap();
assert_eq!(result.len(), 2);
}
#[test]
fn empty_end() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder("b", "", n(2)).unwrap();
assert_eq!(result.len(), 2);
}
#[test]
fn generate_before_ax() {
// While you can't generate anything before 'a' with alphabet(), you
// should be able to generate something before "a" + something else.
let table = SymbolTable::alphabet();
let result = table.mudder("", "axxx", n(10)).unwrap();
assert_eq!(result.len(), 10);
assert!(result.iter().all(|k| k.as_str() > "a"));
// Some more to be sure
assert!(table.mudder_one("", "ab").is_ok());
assert!(table.mudder_one("", "abc").is_ok());
}
#[test]
fn generate_after_z() {
let table = SymbolTable::alphabet();
let result = table.mudder("z", "", n(10)).unwrap();
assert_eq!(result.len(), 10);
assert!(result.iter().all(|k| k.as_str() > "z"));
}
#[test]
fn only_amount() {
let table = SymbolTable::alphabet();
let result = table.generate(n(10)).unwrap();
assert_eq!(result.len(), 10);
}
#[test]
fn values_sorting_correct() {
let mut iter = SymbolTable::alphabet().generate(n(12)).into_iter();
while let (Some(one), Some(two)) = (iter.next(), iter.next()) {
assert!(one < two);
}
}
#[test]
fn differing_input_lengths() {
let table = SymbolTable::alphabet();
let result = table.mudder_one("a", "ab").unwrap();
assert!(result.starts_with('a'));
}
#[test]
fn values_consistently_between_start_and_end() {
let table = SymbolTable::alphabet();
{
// From z to a
let mut right = String::from("z");
for _ in 0..500 {
let new_val = dbg!(table.mudder_one("a", &right).unwrap());
assert!(new_val < right);
assert!(new_val.as_str() > "a");
right = new_val;
}
}
{
// And from a to z
let mut left = String::from("a");
// TODO: vv this test fails for higher numbers. FIXME!
for _ in 0..17 {
let new_val = dbg!(table.mudder_one(&left, "z").unwrap());
assert!(new_val > left);
assert!(new_val.as_str() < "z");
left = new_val;
}
}
}
// Internal/private method tests:
#[test]
fn traverse_alphabet() {
fn traverse_alphabet(a: &str, b: &str, depth: usize) -> Vec<String> {
SymbolTable::alphabet()
.traverse("".into(), a, b, depth)
.collect()
}
assert_eq!(traverse_alphabet("a", "d", 1), vec!["a", "b", "c", "d"]);
assert_eq!(
traverse_alphabet("a", "z", 1),
('a' as u32 as u8..='z' as u32 as u8)
.map(|c| (c as char).to_string())
.collect::<Vec<_>>()
);
assert_eq!(
traverse_alphabet("a", "b", 2),
vec![
"a", "ab", "ac", "ad", "ae", "af", "ag", "ah", "ai", "aj", "ak", "al", "am", "an",
"ao", "ap", "aq", "ar", "as", "at", "au", "av", "aw", "ax", "ay", "az", "b"
]
)
}
#[test]
fn traverse_custom() {
fn traverse(table: &str, a: &str, b: &str, depth: usize) -> Vec<String> {
let table = SymbolTable::from_str(table).unwrap();
table.traverse("".into(), a, b, depth).collect()
}
assert_eq!(traverse("abc", "a", "c", 1), vec!["a", "b", "c"]);
assert_eq!(
traverse("abc", "a", "c", 2),
vec!["a", "ab", "ac", "b", "ba", "bc", "c"]
);
assert_eq!(
traverse("0123456789", "1", "2", 2),
vec!["1", "10", "12", "13", "14", "15", "16", "17", "18", "19", "2"]
);
}
#[test]
fn distance_between_first_chars_correct() {
let table = SymbolTable::alphabet();
assert_eq!(table.distance_between_first_chars("a", "b").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("a", "z").unwrap(), 26);
assert_eq!(table.distance_between_first_chars("", "").unwrap(), 26);
assert_eq!(table.distance_between_first_chars("n", "").unwrap(), 13);
assert_eq!(table.distance_between_first_chars("", "n").unwrap(), 14);
assert_eq!(table.distance_between_first_chars("y", "z").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("a", "y").unwrap(), 25);
assert_eq!(
table.distance_between_first_chars("aaaa", "zzzz").unwrap(),
table.distance_between_first_chars("aa", "zz").unwrap()
);
let table = SymbolTable::from_str("12345").unwrap();
assert_eq!(table.distance_between_first_chars("1", "2").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("1", "3").unwrap(), 3);
assert_eq!(table.distance_between_first_chars("2", "3").unwrap(), 2);
}
}
| SymbolTable::from_str("ab").unwrap();
let result = table.mudder_one("a", "b").unwrap();
assert_eq!(result, "ab");
let table = SymbolTable::from_str("0123456789").unwrap();
let result = table.mudder_one("1", "2").unwrap();
assert_eq!(result, "15");
}
#[test]
fn o | identifier_body |
lib.rs | /*!
Generate lexicographically-evenly-spaced strings between two strings
from pre-defined alphabets.
This is a rewrite of [mudderjs](https://github.com/fasiha/mudderjs); thanks
for the original work of the author and their contributors!
## Usage
Add a dependency in your Cargo.toml:
```toml
mudders = "0.0.4"
```
Now you can generate lexicographically-spaced strings in a few different ways:
```
use mudders::SymbolTable;
// The mudder method takes a NonZeroUsize as the amount, | // SymbolTable::mudder() returns a Vec containing `amount` Strings.
let result = table.mudder_one("a", "z").unwrap();
// These strings are always lexicographically placed between `start` and `end`.
let one_str = result.as_str();
assert!(one_str > "a");
assert!(one_str < "z");
// You can also define your own symbol tables
let table = SymbolTable::from_chars(&['a', 'b']).unwrap();
let result = table.mudder("a", "b", NonZeroUsize::new(2).unwrap()).unwrap();
assert_eq!(result.len(), 2);
assert!(result[0].as_str() > "a" && result[1].as_str() > "a");
assert!(result[0].as_str() < "b" && result[1].as_str() < "b");
// The strings *should* be evenly-spaced and as short as they can be.
let table = SymbolTable::alphabet();
let result = table.mudder("anhui", "azazel", NonZeroUsize::new(3).unwrap()).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(vec!["aq", "as", "av"], result);
```
## Notes
The most notable difference to Mudder.js is that currently, mudders only
supports ASCII characters (because 127 characters ought to be enough for
everyone™). Our default `::alphabet()` also only has lowercase letters.
*/
use core::num::NonZeroUsize;
use std::{convert::TryFrom, str::FromStr};
#[macro_use]
pub mod error;
use error::*;
/// The functionality of the crate lives here.
///
/// A symbol table is, internally, a vector of valid ASCII bytes that are used
/// to generate lexicographically evenly-spaced strings.
#[derive(Clone, Debug)]
pub struct SymbolTable(Vec<u8>);
impl SymbolTable {
/// Creates a new symbol table from the given byte slice.
/// The slice is internally sorted using `.sort()`.
///
/// An error is returned if one of the given bytes is out of ASCII range.
pub fn new(source: &[u8]) -> Result<Self, CreationError> {
ensure! { !source.is_empty(), CreationError::EmptySlice }
ensure! { all_chars_ascii(&source), NonAsciiError::NonAsciiU8 }
// Copy the values, we need to own them anyways...
let mut vec: Vec<_> = source.iter().copied().collect();
// Sort them so they're actually in order.
// (You can pass in ['b', 'a'], but that's not usable internally I think.)
vec.sort();
vec.dedup();
Ok(Self(vec))
}
/// Creates a new symbol table from the given characters.
/// The slice is internally sorted using `.sort()`.
///
/// An error is returned if one of the given characters is not ASCII.
pub fn from_chars(source: &[char]) -> Result<Self, CreationError> {
let inner: Box<[u8]> = source
.iter()
.map(|c| try_ascii_u8_from_char(*c))
.collect::<Result<_, _>>()?;
Ok(Self::new(&inner)?)
}
/// Returns a SymbolTable which contains the lowercase latin alphabet (`[a-z]`).
#[allow(clippy::char_lit_as_u8)]
pub fn alphabet() -> Self {
Self::new(&('a' as u8..='z' as u8).collect::<Box<[_]>>()).unwrap()
}
/// Generate `amount` strings that lexicographically sort between `start` and `end`.
/// The algorithm will try to make them as evenly-spaced as possible.
///
/// When both parameters are empty strings, `amount` new strings that are
/// in lexicographical order are returned.
///
/// If parameter `b` is lexicographically before `a`, they are swapped internally.
///
/// ```
/// # use mudders::SymbolTable;
/// # use std::num::NonZeroUsize;
/// // Using the included alphabet table
/// let table = SymbolTable::alphabet();
/// // Generate 10 strings from scratch
/// let results = table.mudder("", "", NonZeroUsize::new(10).unwrap()).unwrap();
/// assert!(results.len() == 10);
/// // results should look something like ["b", "d", "f", ..., "r", "t"]
/// ```
pub fn mudder(
&self,
a: &str,
b: &str,
amount: NonZeroUsize,
) -> Result<Vec<String>, GenerationError> {
use error::InternalError::*;
use GenerationError::*;
ensure! { all_chars_ascii(a), NonAsciiError::NonAsciiU8 }
ensure! { all_chars_ascii(b), NonAsciiError::NonAsciiU8 }
ensure! { self.contains_all_chars(a), UnknownCharacters(a.to_string()) }
ensure! { self.contains_all_chars(b), UnknownCharacters(b.to_string()) }
let (a, b) = if a.is_empty() || b.is_empty() {
// If an argument is empty, keep the order
(a, b)
} else if b < a {
// If they're not empty and b is lexicographically prior to a, swap them
(b, a)
} else {
// You can't generate values between two matching strings.
ensure! { a != b, MatchingStrings(a.to_string()) }
// In any other case, keep the order
(a, b)
};
// TODO: Check for lexicographical adjacency!
//ensure! { !lex_adjacent(a, b), LexAdjacentStrings(a.to_string(), b.to_string()) }
// Count the characters start and end have in common.
let matching_count: usize = {
// Iterate through the chars of both given inputs...
let (mut start_chars, mut end_chars) = (a.chars(), b.chars());
// We need to keep track of this, because:
// In the case of `a` == `"a"` and `b` == `"aab"`,
// we actually need to compare `""` to `"b"` later on, not `""` to `"a"`.
let mut last_start_char = '\0';
// Counting to get the index.
let mut i: usize = 0;
loop {
// Advance the iterators...
match (start_chars.next(), end_chars.next()) {
// As long as there's two characters that match, increment i.
(Some(sc), Some(ec)) if sc == ec => {
last_start_char = sc;
i += 1;
continue;
}
// If start_chars have run out, but end_chars haven't, check
// if the current end char matches the last start char.
// If it does, we still need to increment our counter.
(None, Some(ec)) if ec == last_start_char => {
i += 1;
continue;
}
// break with i as soon as any mismatch happens or both iterators run out.
// matching_count will either be 0, indicating that there's
// no leading common pattern, or something other than 0, in
// that case it's the count of common characters.
(None, None) | (Some(_), None) | (None, Some(_)) | (Some(_), Some(_)) => {
break i
}
}
}
};
// Count the number to add to the total requests amount.
// If a or b is empty, we need one item less in the pool;
// two items less if both are empty.
let non_empty_input_count = [a, b].iter().filter(|s| !s.is_empty()).count();
// For convenience
let computed_amount = || amount.get() + non_empty_input_count;
// Calculate the distance between the first non-matching characters.
// If matching_count is greater than 0, we have leading common chars,
// so we skip those, but add the amount to the depth base.
let branching_factor = self.distance_between_first_chars(
// v--- matching_count might be higher than a.len()
// vvv because we might count past a's end
&a[std::cmp::min(matching_count, a.len())..],
&b[matching_count..],
)?;
// We also add matching_count to the depth because if we're starting
// with a common prefix, we have at least x leading characters that
// will be the same for all substrings.
let mut depth =
depth_for(dbg!(branching_factor), dbg!(computed_amount())) + dbg!(matching_count);
// if branching_factor == 1 {
// // This should only be the case when we have an input like `"z", ""`.
// // In this case, we can generate strings after the z, but we need
// // to go one level deeper in any case.
// depth += 1;
// }
// TODO: Maybe keeping this as an iterator would be more efficient,
// but it would have to be cloned at least once to get the pool length.
let pool: Vec<String> = self.traverse("".into(), a, b, dbg!(depth)).collect();
let pool = if (pool.len() as isize).saturating_sub(non_empty_input_count as isize)
< amount.get() as isize
{
depth += depth_for(branching_factor, computed_amount() + pool.len());
dbg!(self.traverse("".into(), a, b, dbg!(depth)).collect())
} else {
pool
};
if (pool.len() as isize).saturating_sub(non_empty_input_count as isize)
< amount.get() as isize
{
// We still don't have enough items, so bail
panic!(
"Internal error: Failed to calculate the correct tree depth!
This is a bug. Please report it at: https://github.com/Follpvosten/mudders/issues
and make sure to include the following information:
Symbols in table: {symbols:?}
Given inputs: {a:?}, {b:?}, amount: {amount}
matching_count: {m_count}
non_empty_input_count: {ne_input_count}
required pool length (computed amount): {comp_amount}
branching_factor: {b_factor}
final depth: {depth}
pool: {pool:?} (length: {pool_len})",
symbols = self.0.iter().map(|i| *i as char).collect::<Box<[_]>>(),
a = a,
b = b,
amount = amount,
m_count = matching_count,
ne_input_count = non_empty_input_count,
comp_amount = computed_amount(),
b_factor = branching_factor,
depth = depth,
pool = pool,
pool_len = pool.len(),
)
}
Ok(if amount.get() == 1 {
pool.get(pool.len() / 2)
.map(|item| vec![item.clone()])
.ok_or_else(|| FailedToGetMiddle)?
} else {
let step = computed_amount() as f64 / pool.len() as f64;
let mut counter = 0f64;
let mut last_value = 0;
let result: Vec<_> = pool
.into_iter()
.filter(|_| {
counter += step;
let new_value = counter.floor() as usize;
if new_value > last_value {
last_value = new_value;
true
} else {
false
}
})
.take(amount.into())
.collect();
ensure! { result.len() == amount.get(), NotEnoughItemsInPool };
result
})
}
/// Convenience wrapper around `mudder` to generate exactly one string.
///
/// # Safety
/// This function calls `NonZeroUsize::new_unchecked(1)`.
pub fn mudder_one(&self, a: &str, b: &str) -> Result<String, GenerationError> {
self.mudder(a, b, unsafe { NonZeroUsize::new_unchecked(1) })
.map(|mut vec| vec.remove(0))
}
/// Convenience wrapper around `mudder` to generate an amount of fresh strings.
///
/// `SymbolTable.generate(amount)` is equivalent to `SymbolTable.mudder("", "", amount)`.
pub fn generate(&self, amount: NonZeroUsize) -> Result<Vec<String>, GenerationError> {
self.mudder("", "", amount)
}
/// Traverses a virtual tree of strings to the given depth.
fn traverse<'a>(
&'a self,
curr_key: String,
start: &'a str,
end: &'a str,
depth: usize,
) -> Box<dyn Iterator<Item = String> + 'a> {
if depth == 0 {
// If we've reached depth 0, we don't go futher.
Box::new(std::iter::empty())
} else {
// Generate all possible mutations on the current depth
Box::new(
self.0
.iter()
.filter_map(move |c| -> Option<Box<dyn Iterator<Item = String>>> {
// TODO: Performance - this probably still isn't the best option.
let key = {
let the_char = *c as char;
let mut string =
String::with_capacity(curr_key.len() + the_char.len_utf8());
string.push_str(&curr_key);
string.push(the_char);
string
};
// After the end key, we definitely do not continue.
if key.as_str() > end && !end.is_empty() {
None
} else if key.as_str() < start {
// If we're prior to the start key...
// ...and the start key is a subkey of the current key...
if start.starts_with(&key) {
// ...only traverse the subtree, ignoring the key itself.
Some(Box::new(self.traverse(key, start, end, depth - 1)))
} else {
None
}
} else {
// Traverse normally, returning both the parent and sub key,
// in all other cases.
if key.len() < 2 {
let iter = std::iter::once(key.clone());
Some(if key == end {
Box::new(iter)
} else {
Box::new(iter.chain(self.traverse(key, start, end, depth - 1)))
})
} else {
let first = key.chars().next().unwrap();
Some(if key.chars().all(|c| c == first) {
// If our characters are all the same,
// don't add key to the list, only the subtree.
Box::new(self.traverse(key, start, end, depth - 1))
} else {
Box::new(std::iter::once(key.clone()).chain(self.traverse(
key,
start,
end,
depth - 1,
)))
})
}
}
})
.flatten(),
)
}
}
fn distance_between_first_chars(
&self,
start: &str,
end: &str,
) -> Result<usize, GenerationError> {
use InternalError::WrongCharOrder;
// check the first character of both strings...
Ok(match (start.chars().next(), end.chars().next()) {
// if both have a first char, compare them.
(Some(start_char), Some(end_char)) => {
ensure! { start_char < end_char, WrongCharOrder(start_char, end_char) }
let distance =
try_ascii_u8_from_char(end_char)? - try_ascii_u8_from_char(start_char)?;
distance as usize + 1
}
// if only the start has a first char, compare it to our last possible symbol.
(Some(start_char), None) => {
let end_u8 = self.0.last().unwrap();
// In this case, we allow the start and end char to be equal.
// This is because you can generate something after the last char,
// but not before the first char.
// vv
ensure! { start_char <= *end_u8 as char, WrongCharOrder(start_char, *end_u8 as char) }
let distance = end_u8 - try_ascii_u8_from_char(start_char)?;
if distance == 0 {
2
} else {
distance as usize + 1
}
}
// if only the end has a first char, compare it to our first possible symbol.
(None, Some(end_char)) => {
let start_u8 = self.0.first().unwrap();
ensure! { *start_u8 <= end_char as u8, WrongCharOrder(*start_u8 as char, end_char) }
let distance = try_ascii_u8_from_char(end_char)? - start_u8;
if distance == 0 {
2
} else {
distance as usize + 1
}
}
// if there's no characters given, the whole symboltable is our range.
_ => self.0.len(),
})
}
fn contains_all_chars(&self, chars: impl AsRef<[u8]>) -> bool {
chars.as_ref().iter().all(|c| self.0.contains(c))
}
}
/// Calculate the required depth for the given values.
///
/// `branching_factor` is used as the logarithm base, `n_elements` as the
/// value, and the result is rounded up and cast to usize.
fn depth_for(branching_factor: usize, n_elements: usize) -> usize {
f64::log(n_elements as f64, branching_factor as f64).ceil() as usize
}
fn try_ascii_u8_from_char(c: char) -> Result<u8, NonAsciiError> {
u8::try_from(c as u32).map_err(NonAsciiError::from)
}
fn all_chars_ascii(chars: impl AsRef<[u8]>) -> bool {
chars.as_ref().iter().all(|i| i.is_ascii())
}
impl FromStr for SymbolTable {
type Err = CreationError;
fn from_str(s: &str) -> Result<Self, CreationError> {
Self::from_chars(&s.chars().collect::<Box<[_]>>())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::num::NonZeroUsize;
/// Create and unwrap a NonZeroUsize from the given usize.
fn n(n: usize) -> NonZeroUsize {
NonZeroUsize::new(n).unwrap()
}
// Public API tests:
#[test]
#[allow(clippy::char_lit_as_u8)]
fn valid_tables_work() {
assert!(SymbolTable::new(&[1, 2, 3, 4, 5]).is_ok());
assert!(SymbolTable::new(&[125, 126, 127]).is_ok());
// Possible, but to be discouraged
assert!(SymbolTable::new(&['a' as u8, 'f' as u8]).is_ok());
assert!(SymbolTable::from_chars(&['a', 'b', 'c']).is_ok());
assert!(SymbolTable::from_str("0123").is_ok());
}
#[test]
fn invalid_tables_error() {
assert!(SymbolTable::from_str("🍅😂👶🏻").is_err());
assert!(SymbolTable::from_chars(&['🍌', '🍣', '⛈']).is_err());
assert!(SymbolTable::new(&[128, 129, 130]).is_err());
assert!(SymbolTable::new(&[]).is_err());
assert!(SymbolTable::from_chars(&[]).is_err());
assert!(SymbolTable::from_str("").is_err());
}
#[test]
fn unknown_chars_error() {
use error::GenerationError::UnknownCharacters;
// You cannot pass in strings with characters not in the SymbolTable:
let table = SymbolTable::alphabet();
assert_eq!(
table.mudder_one("123", "()/"),
Err(UnknownCharacters("123".into()))
);
assert_eq!(
table.mudder_one("a", "123"),
Err(UnknownCharacters("123".into()))
);
assert_eq!(
table.mudder_one("0)(", "b"),
Err(UnknownCharacters("0)(".into()))
);
let table = SymbolTable::from_str("123").unwrap();
assert_eq!(
table.mudder_one("a", "b"),
Err(UnknownCharacters("a".into()))
);
assert_eq!(
table.mudder_one("456", "1"),
Err(UnknownCharacters("456".into()))
);
assert_eq!(
table.mudder_one("2", "abc"),
Err(UnknownCharacters("abc".into()))
);
}
#[test]
fn equal_strings_error() {
use error::GenerationError::MatchingStrings;
let table = SymbolTable::alphabet();
assert_eq!(
table.mudder_one("abc", "abc"),
Err(MatchingStrings("abc".into()))
);
assert_eq!(
table.mudder_one("xyz", "xyz"),
Err(MatchingStrings("xyz".into()))
);
}
// TODO: Make this test work.
// I need to find out how to tell if two strings are lexicographically inseparable.
// #[test]
// fn lexicographically_adjacent_strings_error() {
// assert!(SymbolTable::alphabet().mudder("ba", "baa", n(1)).is_err());
// }
#[test]
fn reasonable_values() {
let table = SymbolTable::from_str("ab").unwrap();
let result = table.mudder_one("a", "b").unwrap();
assert_eq!(result, "ab");
let table = SymbolTable::from_str("0123456789").unwrap();
let result = table.mudder_one("1", "2").unwrap();
assert_eq!(result, "15");
}
#[test]
fn outputs_more_or_less_match_mudderjs() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder_one("a", "b").unwrap();
assert_eq!(result, "ac");
let table = SymbolTable::alphabet();
let result = table.mudder("anhui", "azazel", n(3)).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(vec!["aq", "as", "av"], result);
}
#[test]
fn empty_start() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder("", "c", n(2)).unwrap();
assert_eq!(result.len(), 2);
}
#[test]
fn empty_end() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder("b", "", n(2)).unwrap();
assert_eq!(result.len(), 2);
}
#[test]
fn generate_before_ax() {
// While you can't generate anything before 'a' with alphabet(), you
// should be able to generate something before "a" + something else.
let table = SymbolTable::alphabet();
let result = table.mudder("", "axxx", n(10)).unwrap();
assert_eq!(result.len(), 10);
assert!(result.iter().all(|k| k.as_str() > "a"));
// Some more to be sure
assert!(table.mudder_one("", "ab").is_ok());
assert!(table.mudder_one("", "abc").is_ok());
}
#[test]
fn generate_after_z() {
let table = SymbolTable::alphabet();
let result = table.mudder("z", "", n(10)).unwrap();
assert_eq!(result.len(), 10);
assert!(result.iter().all(|k| k.as_str() > "z"));
}
#[test]
fn only_amount() {
let table = SymbolTable::alphabet();
let result = table.generate(n(10)).unwrap();
assert_eq!(result.len(), 10);
}
#[test]
fn values_sorting_correct() {
let mut iter = SymbolTable::alphabet().generate(n(12)).into_iter();
while let (Some(one), Some(two)) = (iter.next(), iter.next()) {
assert!(one < two);
}
}
#[test]
fn differing_input_lengths() {
let table = SymbolTable::alphabet();
let result = table.mudder_one("a", "ab").unwrap();
assert!(result.starts_with('a'));
}
#[test]
fn values_consistently_between_start_and_end() {
let table = SymbolTable::alphabet();
{
// From z to a
let mut right = String::from("z");
for _ in 0..500 {
let new_val = dbg!(table.mudder_one("a", &right).unwrap());
assert!(new_val < right);
assert!(new_val.as_str() > "a");
right = new_val;
}
}
{
// And from a to z
let mut left = String::from("a");
// TODO: vv this test fails for higher numbers. FIXME!
for _ in 0..17 {
let new_val = dbg!(table.mudder_one(&left, "z").unwrap());
assert!(new_val > left);
assert!(new_val.as_str() < "z");
left = new_val;
}
}
}
// Internal/private method tests:
#[test]
fn traverse_alphabet() {
fn traverse_alphabet(a: &str, b: &str, depth: usize) -> Vec<String> {
SymbolTable::alphabet()
.traverse("".into(), a, b, depth)
.collect()
}
assert_eq!(traverse_alphabet("a", "d", 1), vec!["a", "b", "c", "d"]);
assert_eq!(
traverse_alphabet("a", "z", 1),
('a' as u32 as u8..='z' as u32 as u8)
.map(|c| (c as char).to_string())
.collect::<Vec<_>>()
);
assert_eq!(
traverse_alphabet("a", "b", 2),
vec![
"a", "ab", "ac", "ad", "ae", "af", "ag", "ah", "ai", "aj", "ak", "al", "am", "an",
"ao", "ap", "aq", "ar", "as", "at", "au", "av", "aw", "ax", "ay", "az", "b"
]
)
}
#[test]
fn traverse_custom() {
fn traverse(table: &str, a: &str, b: &str, depth: usize) -> Vec<String> {
let table = SymbolTable::from_str(table).unwrap();
table.traverse("".into(), a, b, depth).collect()
}
assert_eq!(traverse("abc", "a", "c", 1), vec!["a", "b", "c"]);
assert_eq!(
traverse("abc", "a", "c", 2),
vec!["a", "ab", "ac", "b", "ba", "bc", "c"]
);
assert_eq!(
traverse("0123456789", "1", "2", 2),
vec!["1", "10", "12", "13", "14", "15", "16", "17", "18", "19", "2"]
);
}
#[test]
fn distance_between_first_chars_correct() {
let table = SymbolTable::alphabet();
assert_eq!(table.distance_between_first_chars("a", "b").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("a", "z").unwrap(), 26);
assert_eq!(table.distance_between_first_chars("", "").unwrap(), 26);
assert_eq!(table.distance_between_first_chars("n", "").unwrap(), 13);
assert_eq!(table.distance_between_first_chars("", "n").unwrap(), 14);
assert_eq!(table.distance_between_first_chars("y", "z").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("a", "y").unwrap(), 25);
assert_eq!(
table.distance_between_first_chars("aaaa", "zzzz").unwrap(),
table.distance_between_first_chars("aa", "zz").unwrap()
);
let table = SymbolTable::from_str("12345").unwrap();
assert_eq!(table.distance_between_first_chars("1", "2").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("1", "3").unwrap(), 3);
assert_eq!(table.distance_between_first_chars("2", "3").unwrap(), 2);
}
} | // so you cannot pass in an invalid value.
use std::num::NonZeroUsize;
// You can use the included alphabet table
let table = SymbolTable::alphabet(); | random_line_split |
lib.rs | /*!
Generate lexicographically-evenly-spaced strings between two strings
from pre-defined alphabets.
This is a rewrite of [mudderjs](https://github.com/fasiha/mudderjs); thanks
for the original work of the author and their contributors!
## Usage
Add a dependency in your Cargo.toml:
```toml
mudders = "0.0.4"
```
Now you can generate lexicographically-spaced strings in a few different ways:
```
use mudders::SymbolTable;
// The mudder method takes a NonZeroUsize as the amount,
// so you cannot pass in an invalid value.
use std::num::NonZeroUsize;
// You can use the included alphabet table
let table = SymbolTable::alphabet();
// SymbolTable::mudder() returns a Vec containing `amount` Strings.
let result = table.mudder_one("a", "z").unwrap();
// These strings are always lexicographically placed between `start` and `end`.
let one_str = result.as_str();
assert!(one_str > "a");
assert!(one_str < "z");
// You can also define your own symbol tables
let table = SymbolTable::from_chars(&['a', 'b']).unwrap();
let result = table.mudder("a", "b", NonZeroUsize::new(2).unwrap()).unwrap();
assert_eq!(result.len(), 2);
assert!(result[0].as_str() > "a" && result[1].as_str() > "a");
assert!(result[0].as_str() < "b" && result[1].as_str() < "b");
// The strings *should* be evenly-spaced and as short as they can be.
let table = SymbolTable::alphabet();
let result = table.mudder("anhui", "azazel", NonZeroUsize::new(3).unwrap()).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(vec!["aq", "as", "av"], result);
```
## Notes
The most notable difference to Mudder.js is that currently, mudders only
supports ASCII characters (because 127 characters ought to be enough for
everyone™). Our default `::alphabet()` also only has lowercase letters.
*/
use core::num::NonZeroUsize;
use std::{convert::TryFrom, str::FromStr};
#[macro_use]
pub mod error;
use error::*;
/// The functionality of the crate lives here.
///
/// A symbol table is, internally, a vector of valid ASCII bytes that are used
/// to generate lexicographically evenly-spaced strings.
#[derive(Clone, Debug)]
pub struct SymbolTable(Vec<u8>);
impl SymbolTable {
/// Creates a new symbol table from the given byte slice.
/// The slice is internally sorted using `.sort()`.
///
/// An error is returned if one of the given bytes is out of ASCII range.
pub fn new(source: &[u8]) -> Result<Self, CreationError> {
ensure! { !source.is_empty(), CreationError::EmptySlice }
ensure! { all_chars_ascii(&source), NonAsciiError::NonAsciiU8 }
// Copy the values, we need to own them anyways...
let mut vec: Vec<_> = source.iter().copied().collect();
// Sort them so they're actually in order.
// (You can pass in ['b', 'a'], but that's not usable internally I think.)
vec.sort();
vec.dedup();
Ok(Self(vec))
}
/// Creates a new symbol table from the given characters.
/// The slice is internally sorted using `.sort()`.
///
/// An error is returned if one of the given characters is not ASCII.
pub fn from_chars(source: &[char]) -> Result<Self, CreationError> {
let inner: Box<[u8]> = source
.iter()
.map(|c| try_ascii_u8_from_char(*c))
.collect::<Result<_, _>>()?;
Ok(Self::new(&inner)?)
}
/// Returns a SymbolTable which contains the lowercase latin alphabet (`[a-z]`).
#[allow(clippy::char_lit_as_u8)]
pub fn alphabet() -> Self {
Self::new(&('a' as u8..='z' as u8).collect::<Box<[_]>>()).unwrap()
}
/// Generate `amount` strings that lexicographically sort between `start` and `end`.
/// The algorithm will try to make them as evenly-spaced as possible.
///
/// When both parameters are empty strings, `amount` new strings that are
/// in lexicographical order are returned.
///
/// If parameter `b` is lexicographically before `a`, they are swapped internally.
///
/// ```
/// # use mudders::SymbolTable;
/// # use std::num::NonZeroUsize;
/// // Using the included alphabet table
/// let table = SymbolTable::alphabet();
/// // Generate 10 strings from scratch
/// let results = table.mudder("", "", NonZeroUsize::new(10).unwrap()).unwrap();
/// assert!(results.len() == 10);
/// // results should look something like ["b", "d", "f", ..., "r", "t"]
/// ```
pub fn mudder(
&self,
a: &str,
b: &str,
amount: NonZeroUsize,
) -> Result<Vec<String>, GenerationError> {
use error::InternalError::*;
use GenerationError::*;
ensure! { all_chars_ascii(a), NonAsciiError::NonAsciiU8 }
ensure! { all_chars_ascii(b), NonAsciiError::NonAsciiU8 }
ensure! { self.contains_all_chars(a), UnknownCharacters(a.to_string()) }
ensure! { self.contains_all_chars(b), UnknownCharacters(b.to_string()) }
let (a, b) = if a.is_empty() || b.is_empty() {
// If an argument is empty, keep the order
(a, b)
} else if b < a {
// If they're not empty and b is lexicographically prior to a, swap them
(b, a)
} else {
// You can't generate values between two matching strings.
ensure! { a != b, MatchingStrings(a.to_string()) }
// In any other case, keep the order
(a, b)
};
// TODO: Check for lexicographical adjacency!
//ensure! { !lex_adjacent(a, b), LexAdjacentStrings(a.to_string(), b.to_string()) }
// Count the characters start and end have in common.
let matching_count: usize = {
// Iterate through the chars of both given inputs...
let (mut start_chars, mut end_chars) = (a.chars(), b.chars());
// We need to keep track of this, because:
// In the case of `a` == `"a"` and `b` == `"aab"`,
// we actually need to compare `""` to `"b"` later on, not `""` to `"a"`.
let mut last_start_char = '\0';
// Counting to get the index.
let mut i: usize = 0;
loop {
// Advance the iterators...
match (start_chars.next(), end_chars.next()) {
// As long as there's two characters that match, increment i.
(Some(sc), Some(ec)) if sc == ec => {
last_start_char = sc;
i += 1;
continue;
}
// If start_chars have run out, but end_chars haven't, check
// if the current end char matches the last start char.
// If it does, we still need to increment our counter.
(None, Some(ec)) if ec == last_start_char => {
i += 1;
continue;
}
// break with i as soon as any mismatch happens or both iterators run out.
// matching_count will either be 0, indicating that there's
// no leading common pattern, or something other than 0, in
// that case it's the count of common characters.
(None, None) | (Some(_), None) | (None, Some(_)) | (Some(_), Some(_)) => {
break i
}
}
}
};
// Count the number to add to the total requests amount.
// If a or b is empty, we need one item less in the pool;
// two items less if both are empty.
let non_empty_input_count = [a, b].iter().filter(|s| !s.is_empty()).count();
// For convenience
let computed_amount = || amount.get() + non_empty_input_count;
// Calculate the distance between the first non-matching characters.
// If matching_count is greater than 0, we have leading common chars,
// so we skip those, but add the amount to the depth base.
let branching_factor = self.distance_between_first_chars(
// v--- matching_count might be higher than a.len()
// vvv because we might count past a's end
&a[std::cmp::min(matching_count, a.len())..],
&b[matching_count..],
)?;
// We also add matching_count to the depth because if we're starting
// with a common prefix, we have at least x leading characters that
// will be the same for all substrings.
let mut depth =
depth_for(dbg!(branching_factor), dbg!(computed_amount())) + dbg!(matching_count);
// if branching_factor == 1 {
// // This should only be the case when we have an input like `"z", ""`.
// // In this case, we can generate strings after the z, but we need
// // to go one level deeper in any case.
// depth += 1;
// }
// TODO: Maybe keeping this as an iterator would be more efficient,
// but it would have to be cloned at least once to get the pool length.
let pool: Vec<String> = self.traverse("".into(), a, b, dbg!(depth)).collect();
let pool = if (pool.len() as isize).saturating_sub(non_empty_input_count as isize)
< amount.get() as isize
{
depth += depth_for(branching_factor, computed_amount() + pool.len());
dbg!(self.traverse("".into(), a, b, dbg!(depth)).collect())
} else {
pool
};
if (pool.len() as isize).saturating_sub(non_empty_input_count as isize)
< amount.get() as isize
{
| Ok(if amount.get() == 1 {
pool.get(pool.len() / 2)
.map(|item| vec![item.clone()])
.ok_or_else(|| FailedToGetMiddle)?
} else {
let step = computed_amount() as f64 / pool.len() as f64;
let mut counter = 0f64;
let mut last_value = 0;
let result: Vec<_> = pool
.into_iter()
.filter(|_| {
counter += step;
let new_value = counter.floor() as usize;
if new_value > last_value {
last_value = new_value;
true
} else {
false
}
})
.take(amount.into())
.collect();
ensure! { result.len() == amount.get(), NotEnoughItemsInPool };
result
})
}
/// Convenience wrapper around `mudder` to generate exactly one string.
///
/// # Safety
/// This function calls `NonZeroUsize::new_unchecked(1)`.
pub fn mudder_one(&self, a: &str, b: &str) -> Result<String, GenerationError> {
self.mudder(a, b, unsafe { NonZeroUsize::new_unchecked(1) })
.map(|mut vec| vec.remove(0))
}
/// Convenience wrapper around `mudder` to generate an amount of fresh strings.
///
/// `SymbolTable.generate(amount)` is equivalent to `SymbolTable.mudder("", "", amount)`.
pub fn generate(&self, amount: NonZeroUsize) -> Result<Vec<String>, GenerationError> {
self.mudder("", "", amount)
}
/// Traverses a virtual tree of strings to the given depth.
fn traverse<'a>(
&'a self,
curr_key: String,
start: &'a str,
end: &'a str,
depth: usize,
) -> Box<dyn Iterator<Item = String> + 'a> {
if depth == 0 {
// If we've reached depth 0, we don't go futher.
Box::new(std::iter::empty())
} else {
// Generate all possible mutations on the current depth
Box::new(
self.0
.iter()
.filter_map(move |c| -> Option<Box<dyn Iterator<Item = String>>> {
// TODO: Performance - this probably still isn't the best option.
let key = {
let the_char = *c as char;
let mut string =
String::with_capacity(curr_key.len() + the_char.len_utf8());
string.push_str(&curr_key);
string.push(the_char);
string
};
// After the end key, we definitely do not continue.
if key.as_str() > end && !end.is_empty() {
None
} else if key.as_str() < start {
// If we're prior to the start key...
// ...and the start key is a subkey of the current key...
if start.starts_with(&key) {
// ...only traverse the subtree, ignoring the key itself.
Some(Box::new(self.traverse(key, start, end, depth - 1)))
} else {
None
}
} else {
// Traverse normally, returning both the parent and sub key,
// in all other cases.
if key.len() < 2 {
let iter = std::iter::once(key.clone());
Some(if key == end {
Box::new(iter)
} else {
Box::new(iter.chain(self.traverse(key, start, end, depth - 1)))
})
} else {
let first = key.chars().next().unwrap();
Some(if key.chars().all(|c| c == first) {
// If our characters are all the same,
// don't add key to the list, only the subtree.
Box::new(self.traverse(key, start, end, depth - 1))
} else {
Box::new(std::iter::once(key.clone()).chain(self.traverse(
key,
start,
end,
depth - 1,
)))
})
}
}
})
.flatten(),
)
}
}
fn distance_between_first_chars(
&self,
start: &str,
end: &str,
) -> Result<usize, GenerationError> {
use InternalError::WrongCharOrder;
// check the first character of both strings...
Ok(match (start.chars().next(), end.chars().next()) {
// if both have a first char, compare them.
(Some(start_char), Some(end_char)) => {
ensure! { start_char < end_char, WrongCharOrder(start_char, end_char) }
let distance =
try_ascii_u8_from_char(end_char)? - try_ascii_u8_from_char(start_char)?;
distance as usize + 1
}
// if only the start has a first char, compare it to our last possible symbol.
(Some(start_char), None) => {
let end_u8 = self.0.last().unwrap();
// In this case, we allow the start and end char to be equal.
// This is because you can generate something after the last char,
// but not before the first char.
// vv
ensure! { start_char <= *end_u8 as char, WrongCharOrder(start_char, *end_u8 as char) }
let distance = end_u8 - try_ascii_u8_from_char(start_char)?;
if distance == 0 {
2
} else {
distance as usize + 1
}
}
// if only the end has a first char, compare it to our first possible symbol.
(None, Some(end_char)) => {
let start_u8 = self.0.first().unwrap();
ensure! { *start_u8 <= end_char as u8, WrongCharOrder(*start_u8 as char, end_char) }
let distance = try_ascii_u8_from_char(end_char)? - start_u8;
if distance == 0 {
2
} else {
distance as usize + 1
}
}
// if there's no characters given, the whole symboltable is our range.
_ => self.0.len(),
})
}
fn contains_all_chars(&self, chars: impl AsRef<[u8]>) -> bool {
chars.as_ref().iter().all(|c| self.0.contains(c))
}
}
/// Calculate the required depth for the given values.
///
/// `branching_factor` is used as the logarithm base, `n_elements` as the
/// value, and the result is rounded up and cast to usize.
fn depth_for(branching_factor: usize, n_elements: usize) -> usize {
f64::log(n_elements as f64, branching_factor as f64).ceil() as usize
}
fn try_ascii_u8_from_char(c: char) -> Result<u8, NonAsciiError> {
u8::try_from(c as u32).map_err(NonAsciiError::from)
}
fn all_chars_ascii(chars: impl AsRef<[u8]>) -> bool {
chars.as_ref().iter().all(|i| i.is_ascii())
}
impl FromStr for SymbolTable {
type Err = CreationError;
fn from_str(s: &str) -> Result<Self, CreationError> {
Self::from_chars(&s.chars().collect::<Box<[_]>>())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::num::NonZeroUsize;
/// Create and unwrap a NonZeroUsize from the given usize.
fn n(n: usize) -> NonZeroUsize {
NonZeroUsize::new(n).unwrap()
}
// Public API tests:
#[test]
#[allow(clippy::char_lit_as_u8)]
fn valid_tables_work() {
assert!(SymbolTable::new(&[1, 2, 3, 4, 5]).is_ok());
assert!(SymbolTable::new(&[125, 126, 127]).is_ok());
// Possible, but to be discouraged
assert!(SymbolTable::new(&['a' as u8, 'f' as u8]).is_ok());
assert!(SymbolTable::from_chars(&['a', 'b', 'c']).is_ok());
assert!(SymbolTable::from_str("0123").is_ok());
}
#[test]
fn invalid_tables_error() {
assert!(SymbolTable::from_str("🍅😂👶🏻").is_err());
assert!(SymbolTable::from_chars(&['🍌', '🍣', '⛈']).is_err());
assert!(SymbolTable::new(&[128, 129, 130]).is_err());
assert!(SymbolTable::new(&[]).is_err());
assert!(SymbolTable::from_chars(&[]).is_err());
assert!(SymbolTable::from_str("").is_err());
}
#[test]
fn unknown_chars_error() {
use error::GenerationError::UnknownCharacters;
// You cannot pass in strings with characters not in the SymbolTable:
let table = SymbolTable::alphabet();
assert_eq!(
table.mudder_one("123", "()/"),
Err(UnknownCharacters("123".into()))
);
assert_eq!(
table.mudder_one("a", "123"),
Err(UnknownCharacters("123".into()))
);
assert_eq!(
table.mudder_one("0)(", "b"),
Err(UnknownCharacters("0)(".into()))
);
let table = SymbolTable::from_str("123").unwrap();
assert_eq!(
table.mudder_one("a", "b"),
Err(UnknownCharacters("a".into()))
);
assert_eq!(
table.mudder_one("456", "1"),
Err(UnknownCharacters("456".into()))
);
assert_eq!(
table.mudder_one("2", "abc"),
Err(UnknownCharacters("abc".into()))
);
}
#[test]
fn equal_strings_error() {
use error::GenerationError::MatchingStrings;
let table = SymbolTable::alphabet();
assert_eq!(
table.mudder_one("abc", "abc"),
Err(MatchingStrings("abc".into()))
);
assert_eq!(
table.mudder_one("xyz", "xyz"),
Err(MatchingStrings("xyz".into()))
);
}
// TODO: Make this test work.
// I need to find out how to tell if two strings are lexicographically inseparable.
// #[test]
// fn lexicographically_adjacent_strings_error() {
// assert!(SymbolTable::alphabet().mudder("ba", "baa", n(1)).is_err());
// }
#[test]
fn reasonable_values() {
let table = SymbolTable::from_str("ab").unwrap();
let result = table.mudder_one("a", "b").unwrap();
assert_eq!(result, "ab");
let table = SymbolTable::from_str("0123456789").unwrap();
let result = table.mudder_one("1", "2").unwrap();
assert_eq!(result, "15");
}
#[test]
fn outputs_more_or_less_match_mudderjs() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder_one("a", "b").unwrap();
assert_eq!(result, "ac");
let table = SymbolTable::alphabet();
let result = table.mudder("anhui", "azazel", n(3)).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(vec!["aq", "as", "av"], result);
}
#[test]
fn empty_start() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder("", "c", n(2)).unwrap();
assert_eq!(result.len(), 2);
}
#[test]
fn empty_end() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder("b", "", n(2)).unwrap();
assert_eq!(result.len(), 2);
}
#[test]
fn generate_before_ax() {
// While you can't generate anything before 'a' with alphabet(), you
// should be able to generate something before "a" + something else.
let table = SymbolTable::alphabet();
let result = table.mudder("", "axxx", n(10)).unwrap();
assert_eq!(result.len(), 10);
assert!(result.iter().all(|k| k.as_str() > "a"));
// Some more to be sure
assert!(table.mudder_one("", "ab").is_ok());
assert!(table.mudder_one("", "abc").is_ok());
}
#[test]
fn generate_after_z() {
let table = SymbolTable::alphabet();
let result = table.mudder("z", "", n(10)).unwrap();
assert_eq!(result.len(), 10);
assert!(result.iter().all(|k| k.as_str() > "z"));
}
#[test]
fn only_amount() {
let table = SymbolTable::alphabet();
let result = table.generate(n(10)).unwrap();
assert_eq!(result.len(), 10);
}
#[test]
fn values_sorting_correct() {
let mut iter = SymbolTable::alphabet().generate(n(12)).into_iter();
while let (Some(one), Some(two)) = (iter.next(), iter.next()) {
assert!(one < two);
}
}
#[test]
fn differing_input_lengths() {
let table = SymbolTable::alphabet();
let result = table.mudder_one("a", "ab").unwrap();
assert!(result.starts_with('a'));
}
#[test]
fn values_consistently_between_start_and_end() {
let table = SymbolTable::alphabet();
{
// From z to a
let mut right = String::from("z");
for _ in 0..500 {
let new_val = dbg!(table.mudder_one("a", &right).unwrap());
assert!(new_val < right);
assert!(new_val.as_str() > "a");
right = new_val;
}
}
{
// And from a to z
let mut left = String::from("a");
// TODO: vv this test fails for higher numbers. FIXME!
for _ in 0..17 {
let new_val = dbg!(table.mudder_one(&left, "z").unwrap());
assert!(new_val > left);
assert!(new_val.as_str() < "z");
left = new_val;
}
}
}
// Internal/private method tests:
#[test]
fn traverse_alphabet() {
fn traverse_alphabet(a: &str, b: &str, depth: usize) -> Vec<String> {
SymbolTable::alphabet()
.traverse("".into(), a, b, depth)
.collect()
}
assert_eq!(traverse_alphabet("a", "d", 1), vec!["a", "b", "c", "d"]);
assert_eq!(
traverse_alphabet("a", "z", 1),
('a' as u32 as u8..='z' as u32 as u8)
.map(|c| (c as char).to_string())
.collect::<Vec<_>>()
);
assert_eq!(
traverse_alphabet("a", "b", 2),
vec![
"a", "ab", "ac", "ad", "ae", "af", "ag", "ah", "ai", "aj", "ak", "al", "am", "an",
"ao", "ap", "aq", "ar", "as", "at", "au", "av", "aw", "ax", "ay", "az", "b"
]
)
}
#[test]
fn traverse_custom() {
fn traverse(table: &str, a: &str, b: &str, depth: usize) -> Vec<String> {
let table = SymbolTable::from_str(table).unwrap();
table.traverse("".into(), a, b, depth).collect()
}
assert_eq!(traverse("abc", "a", "c", 1), vec!["a", "b", "c"]);
assert_eq!(
traverse("abc", "a", "c", 2),
vec!["a", "ab", "ac", "b", "ba", "bc", "c"]
);
assert_eq!(
traverse("0123456789", "1", "2", 2),
vec!["1", "10", "12", "13", "14", "15", "16", "17", "18", "19", "2"]
);
}
#[test]
fn distance_between_first_chars_correct() {
let table = SymbolTable::alphabet();
assert_eq!(table.distance_between_first_chars("a", "b").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("a", "z").unwrap(), 26);
assert_eq!(table.distance_between_first_chars("", "").unwrap(), 26);
assert_eq!(table.distance_between_first_chars("n", "").unwrap(), 13);
assert_eq!(table.distance_between_first_chars("", "n").unwrap(), 14);
assert_eq!(table.distance_between_first_chars("y", "z").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("a", "y").unwrap(), 25);
assert_eq!(
table.distance_between_first_chars("aaaa", "zzzz").unwrap(),
table.distance_between_first_chars("aa", "zz").unwrap()
);
let table = SymbolTable::from_str("12345").unwrap();
assert_eq!(table.distance_between_first_chars("1", "2").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("1", "3").unwrap(), 3);
assert_eq!(table.distance_between_first_chars("2", "3").unwrap(), 2);
}
}
| // We still don't have enough items, so bail
panic!(
"Internal error: Failed to calculate the correct tree depth!
This is a bug. Please report it at: https://github.com/Follpvosten/mudders/issues
and make sure to include the following information:
Symbols in table: {symbols:?}
Given inputs: {a:?}, {b:?}, amount: {amount}
matching_count: {m_count}
non_empty_input_count: {ne_input_count}
required pool length (computed amount): {comp_amount}
branching_factor: {b_factor}
final depth: {depth}
pool: {pool:?} (length: {pool_len})",
symbols = self.0.iter().map(|i| *i as char).collect::<Box<[_]>>(),
a = a,
b = b,
amount = amount,
m_count = matching_count,
ne_input_count = non_empty_input_count,
comp_amount = computed_amount(),
b_factor = branching_factor,
depth = depth,
pool = pool,
pool_len = pool.len(),
)
}
| conditional_block |
lib.rs | /*!
Generate lexicographically-evenly-spaced strings between two strings
from pre-defined alphabets.
This is a rewrite of [mudderjs](https://github.com/fasiha/mudderjs); thanks
for the original work of the author and their contributors!
## Usage
Add a dependency in your Cargo.toml:
```toml
mudders = "0.0.4"
```
Now you can generate lexicographically-spaced strings in a few different ways:
```
use mudders::SymbolTable;
// The mudder method takes a NonZeroUsize as the amount,
// so you cannot pass in an invalid value.
use std::num::NonZeroUsize;
// You can use the included alphabet table
let table = SymbolTable::alphabet();
// SymbolTable::mudder() returns a Vec containing `amount` Strings.
let result = table.mudder_one("a", "z").unwrap();
// These strings are always lexicographically placed between `start` and `end`.
let one_str = result.as_str();
assert!(one_str > "a");
assert!(one_str < "z");
// You can also define your own symbol tables
let table = SymbolTable::from_chars(&['a', 'b']).unwrap();
let result = table.mudder("a", "b", NonZeroUsize::new(2).unwrap()).unwrap();
assert_eq!(result.len(), 2);
assert!(result[0].as_str() > "a" && result[1].as_str() > "a");
assert!(result[0].as_str() < "b" && result[1].as_str() < "b");
// The strings *should* be evenly-spaced and as short as they can be.
let table = SymbolTable::alphabet();
let result = table.mudder("anhui", "azazel", NonZeroUsize::new(3).unwrap()).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(vec!["aq", "as", "av"], result);
```
## Notes
The most notable difference to Mudder.js is that currently, mudders only
supports ASCII characters (because 127 characters ought to be enough for
everyone™). Our default `::alphabet()` also only has lowercase letters.
*/
use core::num::NonZeroUsize;
use std::{convert::TryFrom, str::FromStr};
#[macro_use]
pub mod error;
use error::*;
/// The functionality of the crate lives here.
///
/// A symbol table is, internally, a vector of valid ASCII bytes that are used
/// to generate lexicographically evenly-spaced strings.
#[derive(Clone, Debug)]
pub struct SymbolTable(Vec<u8>);
impl SymbolTable {
/// Creates a new symbol table from the given byte slice.
/// The slice is internally sorted using `.sort()`.
///
/// An error is returned if one of the given bytes is out of ASCII range.
pub fn new(source: &[u8]) -> Result<Self, CreationError> {
ensure! { !source.is_empty(), CreationError::EmptySlice }
ensure! { all_chars_ascii(&source), NonAsciiError::NonAsciiU8 }
// Copy the values, we need to own them anyways...
let mut vec: Vec<_> = source.iter().copied().collect();
// Sort them so they're actually in order.
// (You can pass in ['b', 'a'], but that's not usable internally I think.)
vec.sort();
vec.dedup();
Ok(Self(vec))
}
/// Creates a new symbol table from the given characters.
/// The slice is internally sorted using `.sort()`.
///
/// An error is returned if one of the given characters is not ASCII.
pub fn from_chars(source: &[char]) -> Result<Self, CreationError> {
let inner: Box<[u8]> = source
.iter()
.map(|c| try_ascii_u8_from_char(*c))
.collect::<Result<_, _>>()?;
Ok(Self::new(&inner)?)
}
/// Returns a SymbolTable which contains the lowercase latin alphabet (`[a-z]`).
#[allow(clippy::char_lit_as_u8)]
pub fn al | -> Self {
Self::new(&('a' as u8..='z' as u8).collect::<Box<[_]>>()).unwrap()
}
/// Generate `amount` strings that lexicographically sort between `start` and `end`.
/// The algorithm will try to make them as evenly-spaced as possible.
///
/// When both parameters are empty strings, `amount` new strings that are
/// in lexicographical order are returned.
///
/// If parameter `b` is lexicographically before `a`, they are swapped internally.
///
/// ```
/// # use mudders::SymbolTable;
/// # use std::num::NonZeroUsize;
/// // Using the included alphabet table
/// let table = SymbolTable::alphabet();
/// // Generate 10 strings from scratch
/// let results = table.mudder("", "", NonZeroUsize::new(10).unwrap()).unwrap();
/// assert!(results.len() == 10);
/// // results should look something like ["b", "d", "f", ..., "r", "t"]
/// ```
pub fn mudder(
&self,
a: &str,
b: &str,
amount: NonZeroUsize,
) -> Result<Vec<String>, GenerationError> {
use error::InternalError::*;
use GenerationError::*;
ensure! { all_chars_ascii(a), NonAsciiError::NonAsciiU8 }
ensure! { all_chars_ascii(b), NonAsciiError::NonAsciiU8 }
ensure! { self.contains_all_chars(a), UnknownCharacters(a.to_string()) }
ensure! { self.contains_all_chars(b), UnknownCharacters(b.to_string()) }
let (a, b) = if a.is_empty() || b.is_empty() {
// If an argument is empty, keep the order
(a, b)
} else if b < a {
// If they're not empty and b is lexicographically prior to a, swap them
(b, a)
} else {
// You can't generate values between two matching strings.
ensure! { a != b, MatchingStrings(a.to_string()) }
// In any other case, keep the order
(a, b)
};
// TODO: Check for lexicographical adjacency!
//ensure! { !lex_adjacent(a, b), LexAdjacentStrings(a.to_string(), b.to_string()) }
// Count the characters start and end have in common.
let matching_count: usize = {
// Iterate through the chars of both given inputs...
let (mut start_chars, mut end_chars) = (a.chars(), b.chars());
// We need to keep track of this, because:
// In the case of `a` == `"a"` and `b` == `"aab"`,
// we actually need to compare `""` to `"b"` later on, not `""` to `"a"`.
let mut last_start_char = '\0';
// Counting to get the index.
let mut i: usize = 0;
loop {
// Advance the iterators...
match (start_chars.next(), end_chars.next()) {
// As long as there's two characters that match, increment i.
(Some(sc), Some(ec)) if sc == ec => {
last_start_char = sc;
i += 1;
continue;
}
// If start_chars have run out, but end_chars haven't, check
// if the current end char matches the last start char.
// If it does, we still need to increment our counter.
(None, Some(ec)) if ec == last_start_char => {
i += 1;
continue;
}
// break with i as soon as any mismatch happens or both iterators run out.
// matching_count will either be 0, indicating that there's
// no leading common pattern, or something other than 0, in
// that case it's the count of common characters.
(None, None) | (Some(_), None) | (None, Some(_)) | (Some(_), Some(_)) => {
break i
}
}
}
};
// Count the number to add to the total requests amount.
// If a or b is empty, we need one item less in the pool;
// two items less if both are empty.
let non_empty_input_count = [a, b].iter().filter(|s| !s.is_empty()).count();
// For convenience
let computed_amount = || amount.get() + non_empty_input_count;
// Calculate the distance between the first non-matching characters.
// If matching_count is greater than 0, we have leading common chars,
// so we skip those, but add the amount to the depth base.
let branching_factor = self.distance_between_first_chars(
// v--- matching_count might be higher than a.len()
// vvv because we might count past a's end
&a[std::cmp::min(matching_count, a.len())..],
&b[matching_count..],
)?;
// We also add matching_count to the depth because if we're starting
// with a common prefix, we have at least x leading characters that
// will be the same for all substrings.
let mut depth =
depth_for(dbg!(branching_factor), dbg!(computed_amount())) + dbg!(matching_count);
// if branching_factor == 1 {
// // This should only be the case when we have an input like `"z", ""`.
// // In this case, we can generate strings after the z, but we need
// // to go one level deeper in any case.
// depth += 1;
// }
// TODO: Maybe keeping this as an iterator would be more efficient,
// but it would have to be cloned at least once to get the pool length.
let pool: Vec<String> = self.traverse("".into(), a, b, dbg!(depth)).collect();
let pool = if (pool.len() as isize).saturating_sub(non_empty_input_count as isize)
< amount.get() as isize
{
depth += depth_for(branching_factor, computed_amount() + pool.len());
dbg!(self.traverse("".into(), a, b, dbg!(depth)).collect())
} else {
pool
};
if (pool.len() as isize).saturating_sub(non_empty_input_count as isize)
< amount.get() as isize
{
// We still don't have enough items, so bail
panic!(
"Internal error: Failed to calculate the correct tree depth!
This is a bug. Please report it at: https://github.com/Follpvosten/mudders/issues
and make sure to include the following information:
Symbols in table: {symbols:?}
Given inputs: {a:?}, {b:?}, amount: {amount}
matching_count: {m_count}
non_empty_input_count: {ne_input_count}
required pool length (computed amount): {comp_amount}
branching_factor: {b_factor}
final depth: {depth}
pool: {pool:?} (length: {pool_len})",
symbols = self.0.iter().map(|i| *i as char).collect::<Box<[_]>>(),
a = a,
b = b,
amount = amount,
m_count = matching_count,
ne_input_count = non_empty_input_count,
comp_amount = computed_amount(),
b_factor = branching_factor,
depth = depth,
pool = pool,
pool_len = pool.len(),
)
}
Ok(if amount.get() == 1 {
pool.get(pool.len() / 2)
.map(|item| vec![item.clone()])
.ok_or_else(|| FailedToGetMiddle)?
} else {
let step = computed_amount() as f64 / pool.len() as f64;
let mut counter = 0f64;
let mut last_value = 0;
let result: Vec<_> = pool
.into_iter()
.filter(|_| {
counter += step;
let new_value = counter.floor() as usize;
if new_value > last_value {
last_value = new_value;
true
} else {
false
}
})
.take(amount.into())
.collect();
ensure! { result.len() == amount.get(), NotEnoughItemsInPool };
result
})
}
/// Convenience wrapper around `mudder` to generate exactly one string.
///
/// # Safety
/// This function calls `NonZeroUsize::new_unchecked(1)`.
pub fn mudder_one(&self, a: &str, b: &str) -> Result<String, GenerationError> {
self.mudder(a, b, unsafe { NonZeroUsize::new_unchecked(1) })
.map(|mut vec| vec.remove(0))
}
/// Convenience wrapper around `mudder` to generate an amount of fresh strings.
///
/// `SymbolTable.generate(amount)` is equivalent to `SymbolTable.mudder("", "", amount)`.
pub fn generate(&self, amount: NonZeroUsize) -> Result<Vec<String>, GenerationError> {
self.mudder("", "", amount)
}
/// Traverses a virtual tree of strings to the given depth.
fn traverse<'a>(
&'a self,
curr_key: String,
start: &'a str,
end: &'a str,
depth: usize,
) -> Box<dyn Iterator<Item = String> + 'a> {
if depth == 0 {
// If we've reached depth 0, we don't go futher.
Box::new(std::iter::empty())
} else {
// Generate all possible mutations on the current depth
Box::new(
self.0
.iter()
.filter_map(move |c| -> Option<Box<dyn Iterator<Item = String>>> {
// TODO: Performance - this probably still isn't the best option.
let key = {
let the_char = *c as char;
let mut string =
String::with_capacity(curr_key.len() + the_char.len_utf8());
string.push_str(&curr_key);
string.push(the_char);
string
};
// After the end key, we definitely do not continue.
if key.as_str() > end && !end.is_empty() {
None
} else if key.as_str() < start {
// If we're prior to the start key...
// ...and the start key is a subkey of the current key...
if start.starts_with(&key) {
// ...only traverse the subtree, ignoring the key itself.
Some(Box::new(self.traverse(key, start, end, depth - 1)))
} else {
None
}
} else {
// Traverse normally, returning both the parent and sub key,
// in all other cases.
if key.len() < 2 {
let iter = std::iter::once(key.clone());
Some(if key == end {
Box::new(iter)
} else {
Box::new(iter.chain(self.traverse(key, start, end, depth - 1)))
})
} else {
let first = key.chars().next().unwrap();
Some(if key.chars().all(|c| c == first) {
// If our characters are all the same,
// don't add key to the list, only the subtree.
Box::new(self.traverse(key, start, end, depth - 1))
} else {
Box::new(std::iter::once(key.clone()).chain(self.traverse(
key,
start,
end,
depth - 1,
)))
})
}
}
})
.flatten(),
)
}
}
fn distance_between_first_chars(
&self,
start: &str,
end: &str,
) -> Result<usize, GenerationError> {
use InternalError::WrongCharOrder;
// check the first character of both strings...
Ok(match (start.chars().next(), end.chars().next()) {
// if both have a first char, compare them.
(Some(start_char), Some(end_char)) => {
ensure! { start_char < end_char, WrongCharOrder(start_char, end_char) }
let distance =
try_ascii_u8_from_char(end_char)? - try_ascii_u8_from_char(start_char)?;
distance as usize + 1
}
// if only the start has a first char, compare it to our last possible symbol.
(Some(start_char), None) => {
let end_u8 = self.0.last().unwrap();
// In this case, we allow the start and end char to be equal.
// This is because you can generate something after the last char,
// but not before the first char.
// vv
ensure! { start_char <= *end_u8 as char, WrongCharOrder(start_char, *end_u8 as char) }
let distance = end_u8 - try_ascii_u8_from_char(start_char)?;
if distance == 0 {
2
} else {
distance as usize + 1
}
}
// if only the end has a first char, compare it to our first possible symbol.
(None, Some(end_char)) => {
let start_u8 = self.0.first().unwrap();
ensure! { *start_u8 <= end_char as u8, WrongCharOrder(*start_u8 as char, end_char) }
let distance = try_ascii_u8_from_char(end_char)? - start_u8;
if distance == 0 {
2
} else {
distance as usize + 1
}
}
// if there's no characters given, the whole symboltable is our range.
_ => self.0.len(),
})
}
fn contains_all_chars(&self, chars: impl AsRef<[u8]>) -> bool {
chars.as_ref().iter().all(|c| self.0.contains(c))
}
}
/// Calculate the required depth for the given values.
///
/// `branching_factor` is used as the logarithm base, `n_elements` as the
/// value, and the result is rounded up and cast to usize.
fn depth_for(branching_factor: usize, n_elements: usize) -> usize {
f64::log(n_elements as f64, branching_factor as f64).ceil() as usize
}
fn try_ascii_u8_from_char(c: char) -> Result<u8, NonAsciiError> {
u8::try_from(c as u32).map_err(NonAsciiError::from)
}
fn all_chars_ascii(chars: impl AsRef<[u8]>) -> bool {
chars.as_ref().iter().all(|i| i.is_ascii())
}
impl FromStr for SymbolTable {
type Err = CreationError;
fn from_str(s: &str) -> Result<Self, CreationError> {
Self::from_chars(&s.chars().collect::<Box<[_]>>())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::num::NonZeroUsize;
/// Create and unwrap a NonZeroUsize from the given usize.
fn n(n: usize) -> NonZeroUsize {
NonZeroUsize::new(n).unwrap()
}
// Public API tests:
#[test]
#[allow(clippy::char_lit_as_u8)]
fn valid_tables_work() {
assert!(SymbolTable::new(&[1, 2, 3, 4, 5]).is_ok());
assert!(SymbolTable::new(&[125, 126, 127]).is_ok());
// Possible, but to be discouraged
assert!(SymbolTable::new(&['a' as u8, 'f' as u8]).is_ok());
assert!(SymbolTable::from_chars(&['a', 'b', 'c']).is_ok());
assert!(SymbolTable::from_str("0123").is_ok());
}
#[test]
fn invalid_tables_error() {
assert!(SymbolTable::from_str("🍅😂👶🏻").is_err());
assert!(SymbolTable::from_chars(&['🍌', '🍣', '⛈']).is_err());
assert!(SymbolTable::new(&[128, 129, 130]).is_err());
assert!(SymbolTable::new(&[]).is_err());
assert!(SymbolTable::from_chars(&[]).is_err());
assert!(SymbolTable::from_str("").is_err());
}
#[test]
fn unknown_chars_error() {
use error::GenerationError::UnknownCharacters;
// You cannot pass in strings with characters not in the SymbolTable:
let table = SymbolTable::alphabet();
assert_eq!(
table.mudder_one("123", "()/"),
Err(UnknownCharacters("123".into()))
);
assert_eq!(
table.mudder_one("a", "123"),
Err(UnknownCharacters("123".into()))
);
assert_eq!(
table.mudder_one("0)(", "b"),
Err(UnknownCharacters("0)(".into()))
);
let table = SymbolTable::from_str("123").unwrap();
assert_eq!(
table.mudder_one("a", "b"),
Err(UnknownCharacters("a".into()))
);
assert_eq!(
table.mudder_one("456", "1"),
Err(UnknownCharacters("456".into()))
);
assert_eq!(
table.mudder_one("2", "abc"),
Err(UnknownCharacters("abc".into()))
);
}
#[test]
fn equal_strings_error() {
use error::GenerationError::MatchingStrings;
let table = SymbolTable::alphabet();
assert_eq!(
table.mudder_one("abc", "abc"),
Err(MatchingStrings("abc".into()))
);
assert_eq!(
table.mudder_one("xyz", "xyz"),
Err(MatchingStrings("xyz".into()))
);
}
// TODO: Make this test work.
// I need to find out how to tell if two strings are lexicographically inseparable.
// #[test]
// fn lexicographically_adjacent_strings_error() {
// assert!(SymbolTable::alphabet().mudder("ba", "baa", n(1)).is_err());
// }
#[test]
fn reasonable_values() {
let table = SymbolTable::from_str("ab").unwrap();
let result = table.mudder_one("a", "b").unwrap();
assert_eq!(result, "ab");
let table = SymbolTable::from_str("0123456789").unwrap();
let result = table.mudder_one("1", "2").unwrap();
assert_eq!(result, "15");
}
#[test]
fn outputs_more_or_less_match_mudderjs() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder_one("a", "b").unwrap();
assert_eq!(result, "ac");
let table = SymbolTable::alphabet();
let result = table.mudder("anhui", "azazel", n(3)).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(vec!["aq", "as", "av"], result);
}
#[test]
fn empty_start() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder("", "c", n(2)).unwrap();
assert_eq!(result.len(), 2);
}
#[test]
fn empty_end() {
let table = SymbolTable::from_str("abc").unwrap();
let result = table.mudder("b", "", n(2)).unwrap();
assert_eq!(result.len(), 2);
}
#[test]
fn generate_before_ax() {
// While you can't generate anything before 'a' with alphabet(), you
// should be able to generate something before "a" + something else.
let table = SymbolTable::alphabet();
let result = table.mudder("", "axxx", n(10)).unwrap();
assert_eq!(result.len(), 10);
assert!(result.iter().all(|k| k.as_str() > "a"));
// Some more to be sure
assert!(table.mudder_one("", "ab").is_ok());
assert!(table.mudder_one("", "abc").is_ok());
}
#[test]
fn generate_after_z() {
let table = SymbolTable::alphabet();
let result = table.mudder("z", "", n(10)).unwrap();
assert_eq!(result.len(), 10);
assert!(result.iter().all(|k| k.as_str() > "z"));
}
#[test]
fn only_amount() {
let table = SymbolTable::alphabet();
let result = table.generate(n(10)).unwrap();
assert_eq!(result.len(), 10);
}
#[test]
fn values_sorting_correct() {
let mut iter = SymbolTable::alphabet().generate(n(12)).into_iter();
while let (Some(one), Some(two)) = (iter.next(), iter.next()) {
assert!(one < two);
}
}
#[test]
fn differing_input_lengths() {
let table = SymbolTable::alphabet();
let result = table.mudder_one("a", "ab").unwrap();
assert!(result.starts_with('a'));
}
#[test]
fn values_consistently_between_start_and_end() {
let table = SymbolTable::alphabet();
{
// From z to a
let mut right = String::from("z");
for _ in 0..500 {
let new_val = dbg!(table.mudder_one("a", &right).unwrap());
assert!(new_val < right);
assert!(new_val.as_str() > "a");
right = new_val;
}
}
{
// And from a to z
let mut left = String::from("a");
// TODO: vv this test fails for higher numbers. FIXME!
for _ in 0..17 {
let new_val = dbg!(table.mudder_one(&left, "z").unwrap());
assert!(new_val > left);
assert!(new_val.as_str() < "z");
left = new_val;
}
}
}
// Internal/private method tests:
#[test]
fn traverse_alphabet() {
fn traverse_alphabet(a: &str, b: &str, depth: usize) -> Vec<String> {
SymbolTable::alphabet()
.traverse("".into(), a, b, depth)
.collect()
}
assert_eq!(traverse_alphabet("a", "d", 1), vec!["a", "b", "c", "d"]);
assert_eq!(
traverse_alphabet("a", "z", 1),
('a' as u32 as u8..='z' as u32 as u8)
.map(|c| (c as char).to_string())
.collect::<Vec<_>>()
);
assert_eq!(
traverse_alphabet("a", "b", 2),
vec![
"a", "ab", "ac", "ad", "ae", "af", "ag", "ah", "ai", "aj", "ak", "al", "am", "an",
"ao", "ap", "aq", "ar", "as", "at", "au", "av", "aw", "ax", "ay", "az", "b"
]
)
}
#[test]
fn traverse_custom() {
fn traverse(table: &str, a: &str, b: &str, depth: usize) -> Vec<String> {
let table = SymbolTable::from_str(table).unwrap();
table.traverse("".into(), a, b, depth).collect()
}
assert_eq!(traverse("abc", "a", "c", 1), vec!["a", "b", "c"]);
assert_eq!(
traverse("abc", "a", "c", 2),
vec!["a", "ab", "ac", "b", "ba", "bc", "c"]
);
assert_eq!(
traverse("0123456789", "1", "2", 2),
vec!["1", "10", "12", "13", "14", "15", "16", "17", "18", "19", "2"]
);
}
#[test]
fn distance_between_first_chars_correct() {
let table = SymbolTable::alphabet();
assert_eq!(table.distance_between_first_chars("a", "b").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("a", "z").unwrap(), 26);
assert_eq!(table.distance_between_first_chars("", "").unwrap(), 26);
assert_eq!(table.distance_between_first_chars("n", "").unwrap(), 13);
assert_eq!(table.distance_between_first_chars("", "n").unwrap(), 14);
assert_eq!(table.distance_between_first_chars("y", "z").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("a", "y").unwrap(), 25);
assert_eq!(
table.distance_between_first_chars("aaaa", "zzzz").unwrap(),
table.distance_between_first_chars("aa", "zz").unwrap()
);
let table = SymbolTable::from_str("12345").unwrap();
assert_eq!(table.distance_between_first_chars("1", "2").unwrap(), 2);
assert_eq!(table.distance_between_first_chars("1", "3").unwrap(), 3);
assert_eq!(table.distance_between_first_chars("2", "3").unwrap(), 2);
}
}
| phabet() | identifier_name |
Search.py | # coding: utf-8
# Jeremy Aguillon
# CMSC 471
# Project 1
# Due 2/15/2016
# imports queues for BFS and UCS
from queue import Queue
from queue import PriorityQueue
# imports sys for command line arguments (argv)
import sys
## Constants ##
# Command line arguments
INPUT_FILE = 1
OUTPUT_FILE = 2
START_NODE = 3
END_NODE = 4
SEARCH_TYPE = 5
# Input file arguments
NODE_1 = 0
NEIGHBOR_1 = 1
WEIGHT = 2
# Error flags
FILE_IO = -1
MISSING_NODE = -2
MISSING_START = -3
MISSING_END = -4
NO_NEIGHBORS = -5
NO_PATH = ""
# getNodes() takes in a filename and parses the file to create nodes for each of the inputs in the file
# This also validates that the file exists and can be opened, and the start and end nodes are
# in the given graph.
# Input: Filename - string of the filename of the input
# start - the node to begin searching at
# end - the node to stop searching at
# Output: The nodes that are created and stored in a dictionary or an error flag
def getNodes(Filename, start, end):
# flags to validate that nodes exist in the given graph
foundStart = 0
foundEnd = 0
# validation for opening the file
try:
inFile = open(Filename, 'r')
except IOError as e:
print ("I/O error({0}): {1} \"{2}\"".format(e.errno, e.strerror, Filename),)
# error flag of -1 for main
return FILE_IO
# initialized dictionary
nodeDict = {}
# loops through each line of the file
for line in inFile:
line = line.split()
# checks for start and end nodes and sets flag when found
if line[NODE_1] == start or line[NEIGHBOR_1] == start:
foundStart = 1
if line[NODE_1] == end or line[NEIGHBOR_1] == end:
foundEnd = 1
# adds an entry for each unadded node as the key with a tuple of neighbors and weight as the value
if line[NODE_1] in nodeDict.keys():
nodeDict[ line[NODE_1] ].append( ( line[NEIGHBOR_1], int(line[WEIGHT]) ) )
# if the node already exists, adds another node to the neighbors
else:
nodeDict[ line[NODE_1] ] = [(line[NEIGHBOR_1], int(line[WEIGHT]) )]
inFile.close()
# returns the dictionary if the nodes exist
if foundStart and foundEnd:
if start in nodeDict.keys():
return nodeDict
else:
return NO_NEIGHBORS
# returns an error message otherwise
elif foundStart:
return MISSING_END
elif foundEnd:
return MISSING_START
else:
return MISSING_NODE
# DFS() uses a graph to search depth first to find a path to a given end node
# from a start node and returns the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def DFS(nodeDict, start, end):
# creates lists for nodes to visit and visited nodes
Open = []
closed = []
# begins with the start node
Open.append(start)
# loops through the unvisited nodes until there are no more
while Open:
# examines the node at the top of the stack
curNode = Open.pop()
# checks if the node is found
if curNode == end:
# adds the final node and returns the path
closed.append(curNode)
return closed
# checks if you have visited the node before
elif curNode not in closed:
# adds the current node to visited nodes
closed.append(curNode)
# checks if the current node has neighbors for directed graphs
if curNode in nodeDict.keys():
# adds all neighbors of the current node to unvisited
for pair in sorted(nodeDict[curNode], reverse=True):
Open.append(pair[0])
# return blank string if none found
return ""
# BFS() uses a graph to search breadth first to find a path to a given end node
# from a start node and returns the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def BFS(nodeDict, start, end):
# creates the unvisited nodes as a queue
Open = Queue()
# closed 1 is the path taken and closed 2 is the node that led to the next node
seen = []
closed1 = []
closed2 = []
# begins searching at the start node which is the node and what led to it
Open.put((start,start))
seen.append(start)
# loops until there are no more unvisited nodes
while Open:
# dequeues the first node
curNode = Open.get()
# checks if the node is at the end and stops if it is
if curNode[0] == end:
# adds the final node and what sent it to the lists
closed1.append(curNode[0])
closed2.append(curNode[1])
# begins tracing list one back for the path at the goal node
cur = closed1[len(closed1)-1]
final = [cur]
# searches each pair until it goes back to the start node
while cur != start:
# finds the location of the current node
loc = closed1.index(cur)
# finds the node that sent the current node
cur = closed2[loc]
# adds the node that sent it to the list
final.append(cur)
# returns the final path reversed for consistency with DFS
return reversed(final)
# checks if the current node has neighbors for directed graphs
elif curNode[0] in nodeDict.keys():
# Adds each of the neighbors of the node if it is not the goal
for pair in sorted(nodeDict[curNode[0]]):
# must check if it is not in seen in case a previous node added it before closing
if pair[0] not in seen:
seen.append(pair[0])
# each node is classified by the node it is at and the node that led to it
Open.put((pair[0], curNode[0]))
# updates the visited lists and how they got there
closed1.append(curNode[0])
closed2.append(curNode[1])
# return blank string if none found
return ""
# UCS() uses a graph to search using Dijkstra's algorithm to find
# a path to a given end node from a start node and returns
# the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def UCS(nodeDict, start, end):
# crates the priority queue with a max value of 10,000
Open = PriorityQueue(10000)
# creates dictionaries to keep track of distance and previous node of each element
distance = {}
previous = {}
# Initializes each node to have infinity length and no previous
for node in nodeDict.keys():
# gives the initial node 0 distance to be chosen first
if node == start:
distance[node] = 0
else:
distance[node] = float('inf')
previous[node] = None
# adds each node to the queue
Open.put((distance[node], node))
# iterates through each node of the graph
while Open:
# gets the least valued piece from the queue
cur = Open.get()
# checks if reached the end
if cur[1] == end:
temp = end
finalPath = [temp]
# loops backwards through the found path until reaches start
while temp != start:
temp = previous[temp]
finalPath.append(temp)
# returns start reverse for consistency
return reversed(finalPath)
| alternate = distance[cur[1]] + pair[1]
# if the distance is shorter it replaces in the algorithm
if alternate < distance[pair[0]]:
distance[pair[0]] = alternate
previous[pair[0]] = cur[1]
# finds if the nodes are in the open queue and adds the new value to temp list
if pair[0] in [x[1] for x in Open.queue]:
openNodes.append( (alternate, pair[0]) )
# list of all the nodes in open including updated ones
newOpen = []
# dequeues each of the nodes in Open to update the ones that need it
for i in range(len(Open.queue)):
node = Open.get()
if node[1] in [x[1] for x in openNodes]:
newOpen.append([x for x in openNodes if x[1] == node[1]][0])
else:
newOpen.append(node)
# repopulates Open with updated values
for node in newOpen:
Open.put(node)
# end while loop
# returns blank string if no output found
return ""
# writePath() writes the final path it takes to search from start to end to a new file
# Input: outFile - the filename of the file to be written to
# finalPath - a list of the nodes of the path from start to end nodes
# Output: None
def writePath(outFile, finalPath):
outFile = open(outFile, 'w')
if NO_PATH != finalPath:
for node in finalPath:
outFile.write("{0}\n".format(node))
else:
outFile.write("No path found")
outFile.close()
# main
def main(argv):
# validates amount of arguments given
if len(argv) != 6:
print("Invalid Input\nUsage: python Search.py <input file> <output file> <start node> <end node> <search_type>")
# validates correct search types entered
elif "DFS" != argv[SEARCH_TYPE] and "BFS" != argv[SEARCH_TYPE] and "UCS" != argv[SEARCH_TYPE]:
print("Invalid Search Type\nUsage: python Search.py <input file> <output file> <start node> <end node> <search_type>\n<search_type> = DFS or BFS or UCS")
else:
# Gets the dictionary of nodes and weights
nodeDict = getNodes(argv[INPUT_FILE], argv[START_NODE], argv[END_NODE])
# validates start and end nodes exist in graph
if type(nodeDict) == int and nodeDict <= MISSING_NODE:
if nodeDict == MISSING_START:
print("Start node ({0}) is not in the given graph.".format(argv[START_NODE]))
elif nodeDict == MISSING_END:
print("End node ({0}) is not in the given graph.".format(argv[END_NODE]))
elif nodeDict == NO_NEIGHBORS:
print("Start node ({0}) has no neighbors.".format(argv[START_NODE]))
else:
print("Start node ({0}) and/or End node ({1}) are not in the given graph.".format(argv[START_NODE], argv[END_NODE]))
# checks if file was sucessfully opened
elif nodeDict != FILE_IO:
# performs the search on the graph that the user requests
if "DFS" == argv[SEARCH_TYPE]:
finalPath = (DFS(nodeDict, argv[START_NODE], argv[END_NODE]))
elif "BFS" == argv[SEARCH_TYPE]:
finalPath = BFS(nodeDict, argv[START_NODE], argv[END_NODE])
elif "UCS" == argv[SEARCH_TYPE]:
finalPath = UCS(nodeDict, argv[START_NODE], argv[END_NODE])
# writes the final result to the provided file
writePath(argv[OUTPUT_FILE], finalPath)
# call to main
main(sys.argv)
# old test cases
#main(['Search.py','test.txt','sup1.txt','A','F', 'DFS'])
#main(['Search.py','utube.txt','sup2.txt','A','H', 'BFS'])
#main(['Search.py','other.txt','sup3.txt','S','G', 'DFS'])
#main(['Search.py','sample.txt','final.txt','1','50', 'BFS'])
#main(['Search.py','another.txt','one.txt','a','g', 'UCS']) | # list of nodes that are in open that need to be updated
openNodes = []
# Adds each of the neighbors of the node and compares their length
for pair in sorted(nodeDict[cur[1]]):
# distance of current path is saved and compared with distance | random_line_split |
Search.py | # coding: utf-8
# Jeremy Aguillon
# CMSC 471
# Project 1
# Due 2/15/2016
# imports queues for BFS and UCS
from queue import Queue
from queue import PriorityQueue
# imports sys for command line arguments (argv)
import sys
## Constants ##
# Command line arguments
INPUT_FILE = 1
OUTPUT_FILE = 2
START_NODE = 3
END_NODE = 4
SEARCH_TYPE = 5
# Input file arguments
NODE_1 = 0
NEIGHBOR_1 = 1
WEIGHT = 2
# Error flags
FILE_IO = -1
MISSING_NODE = -2
MISSING_START = -3
MISSING_END = -4
NO_NEIGHBORS = -5
NO_PATH = ""
# getNodes() takes in a filename and parses the file to create nodes for each of the inputs in the file
# This also validates that the file exists and can be opened, and the start and end nodes are
# in the given graph.
# Input: Filename - string of the filename of the input
# start - the node to begin searching at
# end - the node to stop searching at
# Output: The nodes that are created and stored in a dictionary or an error flag
def | (Filename, start, end):
# flags to validate that nodes exist in the given graph
foundStart = 0
foundEnd = 0
# validation for opening the file
try:
inFile = open(Filename, 'r')
except IOError as e:
print ("I/O error({0}): {1} \"{2}\"".format(e.errno, e.strerror, Filename),)
# error flag of -1 for main
return FILE_IO
# initialized dictionary
nodeDict = {}
# loops through each line of the file
for line in inFile:
line = line.split()
# checks for start and end nodes and sets flag when found
if line[NODE_1] == start or line[NEIGHBOR_1] == start:
foundStart = 1
if line[NODE_1] == end or line[NEIGHBOR_1] == end:
foundEnd = 1
# adds an entry for each unadded node as the key with a tuple of neighbors and weight as the value
if line[NODE_1] in nodeDict.keys():
nodeDict[ line[NODE_1] ].append( ( line[NEIGHBOR_1], int(line[WEIGHT]) ) )
# if the node already exists, adds another node to the neighbors
else:
nodeDict[ line[NODE_1] ] = [(line[NEIGHBOR_1], int(line[WEIGHT]) )]
inFile.close()
# returns the dictionary if the nodes exist
if foundStart and foundEnd:
if start in nodeDict.keys():
return nodeDict
else:
return NO_NEIGHBORS
# returns an error message otherwise
elif foundStart:
return MISSING_END
elif foundEnd:
return MISSING_START
else:
return MISSING_NODE
# DFS() uses a graph to search depth first to find a path to a given end node
# from a start node and returns the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def DFS(nodeDict, start, end):
# creates lists for nodes to visit and visited nodes
Open = []
closed = []
# begins with the start node
Open.append(start)
# loops through the unvisited nodes until there are no more
while Open:
# examines the node at the top of the stack
curNode = Open.pop()
# checks if the node is found
if curNode == end:
# adds the final node and returns the path
closed.append(curNode)
return closed
# checks if you have visited the node before
elif curNode not in closed:
# adds the current node to visited nodes
closed.append(curNode)
# checks if the current node has neighbors for directed graphs
if curNode in nodeDict.keys():
# adds all neighbors of the current node to unvisited
for pair in sorted(nodeDict[curNode], reverse=True):
Open.append(pair[0])
# return blank string if none found
return ""
# BFS() uses a graph to search breadth first to find a path to a given end node
# from a start node and returns the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def BFS(nodeDict, start, end):
# creates the unvisited nodes as a queue
Open = Queue()
# closed 1 is the path taken and closed 2 is the node that led to the next node
seen = []
closed1 = []
closed2 = []
# begins searching at the start node which is the node and what led to it
Open.put((start,start))
seen.append(start)
# loops until there are no more unvisited nodes
while Open:
# dequeues the first node
curNode = Open.get()
# checks if the node is at the end and stops if it is
if curNode[0] == end:
# adds the final node and what sent it to the lists
closed1.append(curNode[0])
closed2.append(curNode[1])
# begins tracing list one back for the path at the goal node
cur = closed1[len(closed1)-1]
final = [cur]
# searches each pair until it goes back to the start node
while cur != start:
# finds the location of the current node
loc = closed1.index(cur)
# finds the node that sent the current node
cur = closed2[loc]
# adds the node that sent it to the list
final.append(cur)
# returns the final path reversed for consistency with DFS
return reversed(final)
# checks if the current node has neighbors for directed graphs
elif curNode[0] in nodeDict.keys():
# Adds each of the neighbors of the node if it is not the goal
for pair in sorted(nodeDict[curNode[0]]):
# must check if it is not in seen in case a previous node added it before closing
if pair[0] not in seen:
seen.append(pair[0])
# each node is classified by the node it is at and the node that led to it
Open.put((pair[0], curNode[0]))
# updates the visited lists and how they got there
closed1.append(curNode[0])
closed2.append(curNode[1])
# return blank string if none found
return ""
# UCS() uses a graph to search using Dijkstra's algorithm to find
# a path to a given end node from a start node and returns
# the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def UCS(nodeDict, start, end):
# crates the priority queue with a max value of 10,000
Open = PriorityQueue(10000)
# creates dictionaries to keep track of distance and previous node of each element
distance = {}
previous = {}
# Initializes each node to have infinity length and no previous
for node in nodeDict.keys():
# gives the initial node 0 distance to be chosen first
if node == start:
distance[node] = 0
else:
distance[node] = float('inf')
previous[node] = None
# adds each node to the queue
Open.put((distance[node], node))
# iterates through each node of the graph
while Open:
# gets the least valued piece from the queue
cur = Open.get()
# checks if reached the end
if cur[1] == end:
temp = end
finalPath = [temp]
# loops backwards through the found path until reaches start
while temp != start:
temp = previous[temp]
finalPath.append(temp)
# returns start reverse for consistency
return reversed(finalPath)
# list of nodes that are in open that need to be updated
openNodes = []
# Adds each of the neighbors of the node and compares their length
for pair in sorted(nodeDict[cur[1]]):
# distance of current path is saved and compared with distance
alternate = distance[cur[1]] + pair[1]
# if the distance is shorter it replaces in the algorithm
if alternate < distance[pair[0]]:
distance[pair[0]] = alternate
previous[pair[0]] = cur[1]
# finds if the nodes are in the open queue and adds the new value to temp list
if pair[0] in [x[1] for x in Open.queue]:
openNodes.append( (alternate, pair[0]) )
# list of all the nodes in open including updated ones
newOpen = []
# dequeues each of the nodes in Open to update the ones that need it
for i in range(len(Open.queue)):
node = Open.get()
if node[1] in [x[1] for x in openNodes]:
newOpen.append([x for x in openNodes if x[1] == node[1]][0])
else:
newOpen.append(node)
# repopulates Open with updated values
for node in newOpen:
Open.put(node)
# end while loop
# returns blank string if no output found
return ""
# writePath() writes the final path it takes to search from start to end to a new file
# Input: outFile - the filename of the file to be written to
# finalPath - a list of the nodes of the path from start to end nodes
# Output: None
def writePath(outFile, finalPath):
outFile = open(outFile, 'w')
if NO_PATH != finalPath:
for node in finalPath:
outFile.write("{0}\n".format(node))
else:
outFile.write("No path found")
outFile.close()
# main
def main(argv):
# validates amount of arguments given
if len(argv) != 6:
print("Invalid Input\nUsage: python Search.py <input file> <output file> <start node> <end node> <search_type>")
# validates correct search types entered
elif "DFS" != argv[SEARCH_TYPE] and "BFS" != argv[SEARCH_TYPE] and "UCS" != argv[SEARCH_TYPE]:
print("Invalid Search Type\nUsage: python Search.py <input file> <output file> <start node> <end node> <search_type>\n<search_type> = DFS or BFS or UCS")
else:
# Gets the dictionary of nodes and weights
nodeDict = getNodes(argv[INPUT_FILE], argv[START_NODE], argv[END_NODE])
# validates start and end nodes exist in graph
if type(nodeDict) == int and nodeDict <= MISSING_NODE:
if nodeDict == MISSING_START:
print("Start node ({0}) is not in the given graph.".format(argv[START_NODE]))
elif nodeDict == MISSING_END:
print("End node ({0}) is not in the given graph.".format(argv[END_NODE]))
elif nodeDict == NO_NEIGHBORS:
print("Start node ({0}) has no neighbors.".format(argv[START_NODE]))
else:
print("Start node ({0}) and/or End node ({1}) are not in the given graph.".format(argv[START_NODE], argv[END_NODE]))
# checks if file was sucessfully opened
elif nodeDict != FILE_IO:
# performs the search on the graph that the user requests
if "DFS" == argv[SEARCH_TYPE]:
finalPath = (DFS(nodeDict, argv[START_NODE], argv[END_NODE]))
elif "BFS" == argv[SEARCH_TYPE]:
finalPath = BFS(nodeDict, argv[START_NODE], argv[END_NODE])
elif "UCS" == argv[SEARCH_TYPE]:
finalPath = UCS(nodeDict, argv[START_NODE], argv[END_NODE])
# writes the final result to the provided file
writePath(argv[OUTPUT_FILE], finalPath)
# call to main
main(sys.argv)
# old test cases
#main(['Search.py','test.txt','sup1.txt','A','F', 'DFS'])
#main(['Search.py','utube.txt','sup2.txt','A','H', 'BFS'])
#main(['Search.py','other.txt','sup3.txt','S','G', 'DFS'])
#main(['Search.py','sample.txt','final.txt','1','50', 'BFS'])
#main(['Search.py','another.txt','one.txt','a','g', 'UCS'])
| getNodes | identifier_name |
Search.py | # coding: utf-8
# Jeremy Aguillon
# CMSC 471
# Project 1
# Due 2/15/2016
# imports queues for BFS and UCS
from queue import Queue
from queue import PriorityQueue
# imports sys for command line arguments (argv)
import sys
## Constants ##
# Command line arguments
INPUT_FILE = 1
OUTPUT_FILE = 2
START_NODE = 3
END_NODE = 4
SEARCH_TYPE = 5
# Input file arguments
NODE_1 = 0
NEIGHBOR_1 = 1
WEIGHT = 2
# Error flags
FILE_IO = -1
MISSING_NODE = -2
MISSING_START = -3
MISSING_END = -4
NO_NEIGHBORS = -5
NO_PATH = ""
# getNodes() takes in a filename and parses the file to create nodes for each of the inputs in the file
# This also validates that the file exists and can be opened, and the start and end nodes are
# in the given graph.
# Input: Filename - string of the filename of the input
# start - the node to begin searching at
# end - the node to stop searching at
# Output: The nodes that are created and stored in a dictionary or an error flag
def getNodes(Filename, start, end):
# flags to validate that nodes exist in the given graph
foundStart = 0
foundEnd = 0
# validation for opening the file
try:
inFile = open(Filename, 'r')
except IOError as e:
print ("I/O error({0}): {1} \"{2}\"".format(e.errno, e.strerror, Filename),)
# error flag of -1 for main
return FILE_IO
# initialized dictionary
nodeDict = {}
# loops through each line of the file
for line in inFile:
line = line.split()
# checks for start and end nodes and sets flag when found
if line[NODE_1] == start or line[NEIGHBOR_1] == start:
foundStart = 1
if line[NODE_1] == end or line[NEIGHBOR_1] == end:
foundEnd = 1
# adds an entry for each unadded node as the key with a tuple of neighbors and weight as the value
if line[NODE_1] in nodeDict.keys():
nodeDict[ line[NODE_1] ].append( ( line[NEIGHBOR_1], int(line[WEIGHT]) ) )
# if the node already exists, adds another node to the neighbors
else:
nodeDict[ line[NODE_1] ] = [(line[NEIGHBOR_1], int(line[WEIGHT]) )]
inFile.close()
# returns the dictionary if the nodes exist
if foundStart and foundEnd:
if start in nodeDict.keys():
return nodeDict
else:
return NO_NEIGHBORS
# returns an error message otherwise
elif foundStart:
return MISSING_END
elif foundEnd:
return MISSING_START
else:
return MISSING_NODE
# DFS() uses a graph to search depth first to find a path to a given end node
# from a start node and returns the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def DFS(nodeDict, start, end):
# creates lists for nodes to visit and visited nodes
Open = []
closed = []
# begins with the start node
Open.append(start)
# loops through the unvisited nodes until there are no more
while Open:
# examines the node at the top of the stack
curNode = Open.pop()
# checks if the node is found
if curNode == end:
# adds the final node and returns the path
closed.append(curNode)
return closed
# checks if you have visited the node before
elif curNode not in closed:
# adds the current node to visited nodes
closed.append(curNode)
# checks if the current node has neighbors for directed graphs
if curNode in nodeDict.keys():
# adds all neighbors of the current node to unvisited
for pair in sorted(nodeDict[curNode], reverse=True):
Open.append(pair[0])
# return blank string if none found
return ""
# BFS() uses a graph to search breadth first to find a path to a given end node
# from a start node and returns the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def BFS(nodeDict, start, end):
# creates the unvisited nodes as a queue
Open = Queue()
# closed 1 is the path taken and closed 2 is the node that led to the next node
seen = []
closed1 = []
closed2 = []
# begins searching at the start node which is the node and what led to it
Open.put((start,start))
seen.append(start)
# loops until there are no more unvisited nodes
while Open:
# dequeues the first node
curNode = Open.get()
# checks if the node is at the end and stops if it is
if curNode[0] == end:
# adds the final node and what sent it to the lists
closed1.append(curNode[0])
closed2.append(curNode[1])
# begins tracing list one back for the path at the goal node
cur = closed1[len(closed1)-1]
final = [cur]
# searches each pair until it goes back to the start node
while cur != start:
# finds the location of the current node
loc = closed1.index(cur)
# finds the node that sent the current node
cur = closed2[loc]
# adds the node that sent it to the list
final.append(cur)
# returns the final path reversed for consistency with DFS
return reversed(final)
# checks if the current node has neighbors for directed graphs
elif curNode[0] in nodeDict.keys():
# Adds each of the neighbors of the node if it is not the goal
for pair in sorted(nodeDict[curNode[0]]):
# must check if it is not in seen in case a previous node added it before closing
if pair[0] not in seen:
seen.append(pair[0])
# each node is classified by the node it is at and the node that led to it
Open.put((pair[0], curNode[0]))
# updates the visited lists and how they got there
closed1.append(curNode[0])
closed2.append(curNode[1])
# return blank string if none found
return ""
# UCS() uses a graph to search using Dijkstra's algorithm to find
# a path to a given end node from a start node and returns
# the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def UCS(nodeDict, start, end):
# crates the priority queue with a max value of 10,000
|
# writePath() writes the final path it takes to search from start to end to a new file
# Input: outFile - the filename of the file to be written to
# finalPath - a list of the nodes of the path from start to end nodes
# Output: None
def writePath(outFile, finalPath):
outFile = open(outFile, 'w')
if NO_PATH != finalPath:
for node in finalPath:
outFile.write("{0}\n".format(node))
else:
outFile.write("No path found")
outFile.close()
# main
def main(argv):
# validates amount of arguments given
if len(argv) != 6:
print("Invalid Input\nUsage: python Search.py <input file> <output file> <start node> <end node> <search_type>")
# validates correct search types entered
elif "DFS" != argv[SEARCH_TYPE] and "BFS" != argv[SEARCH_TYPE] and "UCS" != argv[SEARCH_TYPE]:
print("Invalid Search Type\nUsage: python Search.py <input file> <output file> <start node> <end node> <search_type>\n<search_type> = DFS or BFS or UCS")
else:
# Gets the dictionary of nodes and weights
nodeDict = getNodes(argv[INPUT_FILE], argv[START_NODE], argv[END_NODE])
# validates start and end nodes exist in graph
if type(nodeDict) == int and nodeDict <= MISSING_NODE:
if nodeDict == MISSING_START:
print("Start node ({0}) is not in the given graph.".format(argv[START_NODE]))
elif nodeDict == MISSING_END:
print("End node ({0}) is not in the given graph.".format(argv[END_NODE]))
elif nodeDict == NO_NEIGHBORS:
print("Start node ({0}) has no neighbors.".format(argv[START_NODE]))
else:
print("Start node ({0}) and/or End node ({1}) are not in the given graph.".format(argv[START_NODE], argv[END_NODE]))
# checks if file was sucessfully opened
elif nodeDict != FILE_IO:
# performs the search on the graph that the user requests
if "DFS" == argv[SEARCH_TYPE]:
finalPath = (DFS(nodeDict, argv[START_NODE], argv[END_NODE]))
elif "BFS" == argv[SEARCH_TYPE]:
finalPath = BFS(nodeDict, argv[START_NODE], argv[END_NODE])
elif "UCS" == argv[SEARCH_TYPE]:
finalPath = UCS(nodeDict, argv[START_NODE], argv[END_NODE])
# writes the final result to the provided file
writePath(argv[OUTPUT_FILE], finalPath)
# call to main
main(sys.argv)
# old test cases
#main(['Search.py','test.txt','sup1.txt','A','F', 'DFS'])
#main(['Search.py','utube.txt','sup2.txt','A','H', 'BFS'])
#main(['Search.py','other.txt','sup3.txt','S','G', 'DFS'])
#main(['Search.py','sample.txt','final.txt','1','50', 'BFS'])
#main(['Search.py','another.txt','one.txt','a','g', 'UCS'])
| Open = PriorityQueue(10000)
# creates dictionaries to keep track of distance and previous node of each element
distance = {}
previous = {}
# Initializes each node to have infinity length and no previous
for node in nodeDict.keys():
# gives the initial node 0 distance to be chosen first
if node == start:
distance[node] = 0
else:
distance[node] = float('inf')
previous[node] = None
# adds each node to the queue
Open.put((distance[node], node))
# iterates through each node of the graph
while Open:
# gets the least valued piece from the queue
cur = Open.get()
# checks if reached the end
if cur[1] == end:
temp = end
finalPath = [temp]
# loops backwards through the found path until reaches start
while temp != start:
temp = previous[temp]
finalPath.append(temp)
# returns start reverse for consistency
return reversed(finalPath)
# list of nodes that are in open that need to be updated
openNodes = []
# Adds each of the neighbors of the node and compares their length
for pair in sorted(nodeDict[cur[1]]):
# distance of current path is saved and compared with distance
alternate = distance[cur[1]] + pair[1]
# if the distance is shorter it replaces in the algorithm
if alternate < distance[pair[0]]:
distance[pair[0]] = alternate
previous[pair[0]] = cur[1]
# finds if the nodes are in the open queue and adds the new value to temp list
if pair[0] in [x[1] for x in Open.queue]:
openNodes.append( (alternate, pair[0]) )
# list of all the nodes in open including updated ones
newOpen = []
# dequeues each of the nodes in Open to update the ones that need it
for i in range(len(Open.queue)):
node = Open.get()
if node[1] in [x[1] for x in openNodes]:
newOpen.append([x for x in openNodes if x[1] == node[1]][0])
else:
newOpen.append(node)
# repopulates Open with updated values
for node in newOpen:
Open.put(node)
# end while loop
# returns blank string if no output found
return "" | identifier_body |
Search.py | # coding: utf-8
# Jeremy Aguillon
# CMSC 471
# Project 1
# Due 2/15/2016
# imports queues for BFS and UCS
from queue import Queue
from queue import PriorityQueue
# imports sys for command line arguments (argv)
import sys
## Constants ##
# Command line arguments
INPUT_FILE = 1
OUTPUT_FILE = 2
START_NODE = 3
END_NODE = 4
SEARCH_TYPE = 5
# Input file arguments
NODE_1 = 0
NEIGHBOR_1 = 1
WEIGHT = 2
# Error flags
FILE_IO = -1
MISSING_NODE = -2
MISSING_START = -3
MISSING_END = -4
NO_NEIGHBORS = -5
NO_PATH = ""
# getNodes() takes in a filename and parses the file to create nodes for each of the inputs in the file
# This also validates that the file exists and can be opened, and the start and end nodes are
# in the given graph.
# Input: Filename - string of the filename of the input
# start - the node to begin searching at
# end - the node to stop searching at
# Output: The nodes that are created and stored in a dictionary or an error flag
def getNodes(Filename, start, end):
# flags to validate that nodes exist in the given graph
foundStart = 0
foundEnd = 0
# validation for opening the file
try:
inFile = open(Filename, 'r')
except IOError as e:
print ("I/O error({0}): {1} \"{2}\"".format(e.errno, e.strerror, Filename),)
# error flag of -1 for main
return FILE_IO
# initialized dictionary
nodeDict = {}
# loops through each line of the file
for line in inFile:
line = line.split()
# checks for start and end nodes and sets flag when found
if line[NODE_1] == start or line[NEIGHBOR_1] == start:
foundStart = 1
if line[NODE_1] == end or line[NEIGHBOR_1] == end:
foundEnd = 1
# adds an entry for each unadded node as the key with a tuple of neighbors and weight as the value
if line[NODE_1] in nodeDict.keys():
nodeDict[ line[NODE_1] ].append( ( line[NEIGHBOR_1], int(line[WEIGHT]) ) )
# if the node already exists, adds another node to the neighbors
else:
nodeDict[ line[NODE_1] ] = [(line[NEIGHBOR_1], int(line[WEIGHT]) )]
inFile.close()
# returns the dictionary if the nodes exist
if foundStart and foundEnd:
if start in nodeDict.keys():
return nodeDict
else:
return NO_NEIGHBORS
# returns an error message otherwise
elif foundStart:
return MISSING_END
elif foundEnd:
return MISSING_START
else:
return MISSING_NODE
# DFS() uses a graph to search depth first to find a path to a given end node
# from a start node and returns the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def DFS(nodeDict, start, end):
# creates lists for nodes to visit and visited nodes
Open = []
closed = []
# begins with the start node
Open.append(start)
# loops through the unvisited nodes until there are no more
while Open:
# examines the node at the top of the stack
curNode = Open.pop()
# checks if the node is found
if curNode == end:
# adds the final node and returns the path
closed.append(curNode)
return closed
# checks if you have visited the node before
elif curNode not in closed:
# adds the current node to visited nodes
closed.append(curNode)
# checks if the current node has neighbors for directed graphs
if curNode in nodeDict.keys():
# adds all neighbors of the current node to unvisited
for pair in sorted(nodeDict[curNode], reverse=True):
Open.append(pair[0])
# return blank string if none found
return ""
# BFS() uses a graph to search breadth first to find a path to a given end node
# from a start node and returns the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def BFS(nodeDict, start, end):
# creates the unvisited nodes as a queue
Open = Queue()
# closed 1 is the path taken and closed 2 is the node that led to the next node
seen = []
closed1 = []
closed2 = []
# begins searching at the start node which is the node and what led to it
Open.put((start,start))
seen.append(start)
# loops until there are no more unvisited nodes
while Open:
# dequeues the first node
curNode = Open.get()
# checks if the node is at the end and stops if it is
if curNode[0] == end:
# adds the final node and what sent it to the lists
closed1.append(curNode[0])
closed2.append(curNode[1])
# begins tracing list one back for the path at the goal node
cur = closed1[len(closed1)-1]
final = [cur]
# searches each pair until it goes back to the start node
while cur != start:
# finds the location of the current node
loc = closed1.index(cur)
# finds the node that sent the current node
cur = closed2[loc]
# adds the node that sent it to the list
final.append(cur)
# returns the final path reversed for consistency with DFS
return reversed(final)
# checks if the current node has neighbors for directed graphs
elif curNode[0] in nodeDict.keys():
# Adds each of the neighbors of the node if it is not the goal
for pair in sorted(nodeDict[curNode[0]]):
# must check if it is not in seen in case a previous node added it before closing
if pair[0] not in seen:
seen.append(pair[0])
# each node is classified by the node it is at and the node that led to it
Open.put((pair[0], curNode[0]))
# updates the visited lists and how they got there
closed1.append(curNode[0])
closed2.append(curNode[1])
# return blank string if none found
return ""
# UCS() uses a graph to search using Dijkstra's algorithm to find
# a path to a given end node from a start node and returns
# the path as a list
# Input: nodeDict - a dictionary of nodes representing a graph
# start - a start node that is in the graph
# end - the goal node that is in the graph
# Output: a list of the path from start to end
def UCS(nodeDict, start, end):
# crates the priority queue with a max value of 10,000
Open = PriorityQueue(10000)
# creates dictionaries to keep track of distance and previous node of each element
distance = {}
previous = {}
# Initializes each node to have infinity length and no previous
for node in nodeDict.keys():
# gives the initial node 0 distance to be chosen first
if node == start:
distance[node] = 0
else:
distance[node] = float('inf')
previous[node] = None
# adds each node to the queue
Open.put((distance[node], node))
# iterates through each node of the graph
while Open:
# gets the least valued piece from the queue
cur = Open.get()
# checks if reached the end
if cur[1] == end:
temp = end
finalPath = [temp]
# loops backwards through the found path until reaches start
while temp != start:
temp = previous[temp]
finalPath.append(temp)
# returns start reverse for consistency
return reversed(finalPath)
# list of nodes that are in open that need to be updated
openNodes = []
# Adds each of the neighbors of the node and compares their length
for pair in sorted(nodeDict[cur[1]]):
# distance of current path is saved and compared with distance
alternate = distance[cur[1]] + pair[1]
# if the distance is shorter it replaces in the algorithm
if alternate < distance[pair[0]]:
distance[pair[0]] = alternate
previous[pair[0]] = cur[1]
# finds if the nodes are in the open queue and adds the new value to temp list
if pair[0] in [x[1] for x in Open.queue]:
openNodes.append( (alternate, pair[0]) )
# list of all the nodes in open including updated ones
newOpen = []
# dequeues each of the nodes in Open to update the ones that need it
for i in range(len(Open.queue)):
node = Open.get()
if node[1] in [x[1] for x in openNodes]:
newOpen.append([x for x in openNodes if x[1] == node[1]][0])
else:
|
# repopulates Open with updated values
for node in newOpen:
Open.put(node)
# end while loop
# returns blank string if no output found
return ""
# writePath() writes the final path it takes to search from start to end to a new file
# Input: outFile - the filename of the file to be written to
# finalPath - a list of the nodes of the path from start to end nodes
# Output: None
def writePath(outFile, finalPath):
outFile = open(outFile, 'w')
if NO_PATH != finalPath:
for node in finalPath:
outFile.write("{0}\n".format(node))
else:
outFile.write("No path found")
outFile.close()
# main
def main(argv):
# validates amount of arguments given
if len(argv) != 6:
print("Invalid Input\nUsage: python Search.py <input file> <output file> <start node> <end node> <search_type>")
# validates correct search types entered
elif "DFS" != argv[SEARCH_TYPE] and "BFS" != argv[SEARCH_TYPE] and "UCS" != argv[SEARCH_TYPE]:
print("Invalid Search Type\nUsage: python Search.py <input file> <output file> <start node> <end node> <search_type>\n<search_type> = DFS or BFS or UCS")
else:
# Gets the dictionary of nodes and weights
nodeDict = getNodes(argv[INPUT_FILE], argv[START_NODE], argv[END_NODE])
# validates start and end nodes exist in graph
if type(nodeDict) == int and nodeDict <= MISSING_NODE:
if nodeDict == MISSING_START:
print("Start node ({0}) is not in the given graph.".format(argv[START_NODE]))
elif nodeDict == MISSING_END:
print("End node ({0}) is not in the given graph.".format(argv[END_NODE]))
elif nodeDict == NO_NEIGHBORS:
print("Start node ({0}) has no neighbors.".format(argv[START_NODE]))
else:
print("Start node ({0}) and/or End node ({1}) are not in the given graph.".format(argv[START_NODE], argv[END_NODE]))
# checks if file was sucessfully opened
elif nodeDict != FILE_IO:
# performs the search on the graph that the user requests
if "DFS" == argv[SEARCH_TYPE]:
finalPath = (DFS(nodeDict, argv[START_NODE], argv[END_NODE]))
elif "BFS" == argv[SEARCH_TYPE]:
finalPath = BFS(nodeDict, argv[START_NODE], argv[END_NODE])
elif "UCS" == argv[SEARCH_TYPE]:
finalPath = UCS(nodeDict, argv[START_NODE], argv[END_NODE])
# writes the final result to the provided file
writePath(argv[OUTPUT_FILE], finalPath)
# call to main
main(sys.argv)
# old test cases
#main(['Search.py','test.txt','sup1.txt','A','F', 'DFS'])
#main(['Search.py','utube.txt','sup2.txt','A','H', 'BFS'])
#main(['Search.py','other.txt','sup3.txt','S','G', 'DFS'])
#main(['Search.py','sample.txt','final.txt','1','50', 'BFS'])
#main(['Search.py','another.txt','one.txt','a','g', 'UCS'])
| newOpen.append(node) | conditional_block |
session.rs | use crypto::digest::Digest;
use crypto::sha1::Sha1;
use crypto::hmac::Hmac;
use crypto::mac::Mac;
use eventual;
use eventual::Future;
use eventual::Async;
use protobuf::{self, Message};
use rand::thread_rng;
use rand::Rng;
use std::io::{Read, Write, Cursor};
use std::result::Result;
use std::sync::{Mutex, RwLock, Arc, mpsc};
use album_cover::AlbumCover;
use apresolve::apresolve;
use audio_key::{AudioKeyManager, AudioKey, AudioKeyError};
use audio_file::AudioFile;
use authentication::Credentials;
use cache::Cache;
use connection::{self, PlainConnection, CipherConnection};
use diffie_hellman::DHLocalKeys;
use mercury::{MercuryManager, MercuryRequest, MercuryResponse};
use metadata::{MetadataManager, MetadataRef, MetadataTrait};
use protocol;
use stream::StreamManager;
use util::{self, SpotifyId, FileId, ReadSeek};
use version;
use stream;
pub enum Bitrate {
Bitrate96,
Bitrate160,
Bitrate320,
}
pub struct Config {
pub application_key: Vec<u8>,
pub user_agent: String,
pub device_name: String,
pub bitrate: Bitrate,
}
pub struct SessionData {
country: String,
canonical_username: String,
}
pub struct SessionInternal {
config: Config,
device_id: String,
data: RwLock<SessionData>,
cache: Box<Cache + Send + Sync>,
mercury: Mutex<MercuryManager>,
metadata: Mutex<MetadataManager>,
stream: Mutex<StreamManager>,
audio_key: Mutex<AudioKeyManager>,
rx_connection: Mutex<Option<CipherConnection>>,
tx_connection: Mutex<Option<CipherConnection>>, | pub struct Session(pub Arc<SessionInternal>);
impl Session {
pub fn new(config: Config, cache: Box<Cache + Send + Sync>) -> Session {
let device_id = {
let mut h = Sha1::new();
h.input_str(&config.device_name);
h.result_str()
};
Session(Arc::new(SessionInternal {
config: config,
device_id: device_id,
data: RwLock::new(SessionData {
country: String::new(),
canonical_username: String::new(),
}),
rx_connection: Mutex::new(None),
tx_connection: Mutex::new(None),
cache: cache,
mercury: Mutex::new(MercuryManager::new()),
metadata: Mutex::new(MetadataManager::new()),
stream: Mutex::new(StreamManager::new()),
audio_key: Mutex::new(AudioKeyManager::new()),
}))
}
fn connect(&self) -> CipherConnection {
let local_keys = DHLocalKeys::random(&mut thread_rng());
let aps = apresolve().unwrap();
let ap = thread_rng().choose(&aps).expect("No APs found");
info!("Connecting to AP {}", ap);
let mut connection = PlainConnection::connect(ap).unwrap();
let request = protobuf_init!(protocol::keyexchange::ClientHello::new(), {
build_info => {
product: protocol::keyexchange::Product::PRODUCT_LIBSPOTIFY_EMBEDDED,
platform: protocol::keyexchange::Platform::PLATFORM_LINUX_X86,
version: 0x10800000000,
},
/*
fingerprints_supported => [
protocol::keyexchange::Fingerprint::FINGERPRINT_GRAIN
],
*/
cryptosuites_supported => [
protocol::keyexchange::Cryptosuite::CRYPTO_SUITE_SHANNON,
//protocol::keyexchange::Cryptosuite::CRYPTO_SUITE_RC4_SHA1_HMAC
],
/*
powschemes_supported => [
protocol::keyexchange::Powscheme::POW_HASH_CASH
],
*/
login_crypto_hello.diffie_hellman => {
gc: local_keys.public_key(),
server_keys_known: 1,
},
client_nonce: util::rand_vec(&mut thread_rng(), 0x10),
padding: vec![0x1e],
feature_set => {
autoupdate2: true,
}
});
let init_client_packet = connection.send_packet_prefix(&[0, 4],
&request.write_to_bytes().unwrap())
.unwrap();
let init_server_packet = connection.recv_packet().unwrap();
let response: protocol::keyexchange::APResponseMessage =
protobuf::parse_from_bytes(&init_server_packet[4..]).unwrap();
let remote_key = response.get_challenge()
.get_login_crypto_challenge()
.get_diffie_hellman()
.get_gs();
let shared_secret = local_keys.shared_secret(remote_key);
let (challenge, send_key, recv_key) = {
let mut data = Vec::with_capacity(0x64);
let mut mac = Hmac::new(Sha1::new(), &shared_secret);
for i in 1..6 {
mac.input(&init_client_packet);
mac.input(&init_server_packet);
mac.input(&[i]);
data.write(&mac.result().code()).unwrap();
mac.reset();
}
mac = Hmac::new(Sha1::new(), &data[..0x14]);
mac.input(&init_client_packet);
mac.input(&init_server_packet);
(mac.result().code().to_vec(),
data[0x14..0x34].to_vec(),
data[0x34..0x54].to_vec())
};
let packet = protobuf_init!(protocol::keyexchange::ClientResponsePlaintext::new(), {
login_crypto_response.diffie_hellman => {
hmac: challenge
},
pow_response => {},
crypto_response => {},
});
connection.send_packet(&packet.write_to_bytes().unwrap()).unwrap();
CipherConnection::new(connection.into_stream(),
&send_key,
&recv_key)
}
pub fn login(&self, credentials: Credentials) -> Result<Credentials, ()> {
let packet = protobuf_init!(protocol::authentication::ClientResponseEncrypted::new(), {
login_credentials => {
username: credentials.username,
typ: credentials.auth_type,
auth_data: credentials.auth_data,
},
system_info => {
cpu_family: protocol::authentication::CpuFamily::CPU_UNKNOWN,
os: protocol::authentication::Os::OS_UNKNOWN,
system_information_string: "librespot".to_owned(),
device_id: self.device_id().to_owned(),
},
version_string: version::version_string(),
appkey => {
version: self.config().application_key[0] as u32,
devkey: self.config().application_key[0x1..0x81].to_vec(),
signature: self.config().application_key[0x81..0x141].to_vec(),
useragent: self.config().user_agent.clone(),
callback_hash: vec![0; 20],
}
});
let mut connection = self.connect();
connection.send_packet(0xab, &packet.write_to_bytes().unwrap()).unwrap();
let (cmd, data) = connection.recv_packet().unwrap();
match cmd {
0xac => {
let welcome_data: protocol::authentication::APWelcome =
protobuf::parse_from_bytes(&data).unwrap();
let username = welcome_data.get_canonical_username().to_owned();
self.0.data.write().unwrap().canonical_username = username.clone();
*self.0.rx_connection.lock().unwrap() = Some(connection.clone());
*self.0.tx_connection.lock().unwrap() = Some(connection);
info!("Authenticated !");
let reusable_credentials = Credentials {
username: username,
auth_type: welcome_data.get_reusable_auth_credentials_type(),
auth_data: welcome_data.get_reusable_auth_credentials().to_owned(),
};
self.0.cache.put_credentials(&reusable_credentials);
Ok(reusable_credentials)
}
0xad => {
let msg: protocol::keyexchange::APLoginFailed =
protobuf::parse_from_bytes(&data).unwrap();
error!("Authentication failed, {:?}", msg);
Err(())
}
_ => {
error!("Unexpected message {:x}", cmd);
Err(())
}
}
}
pub fn poll(&self) {
let (cmd, data) = self.recv();
match cmd {
0x4 => self.send_packet(0x49, &data).unwrap(),
0x4a => (),
0x9 | 0xa => self.0.stream.lock().unwrap().handle(cmd, data, self),
0xd | 0xe => self.0.audio_key.lock().unwrap().handle(cmd, data, self),
0x1b => {
self.0.data.write().unwrap().country = String::from_utf8(data).unwrap();
}
0xb2...0xb6 => self.0.mercury.lock().unwrap().handle(cmd, data, self),
_ => (),
}
}
pub fn recv(&self) -> (u8, Vec<u8>) {
self.0.rx_connection.lock().unwrap().as_mut().unwrap().recv_packet().unwrap()
}
pub fn send_packet(&self, cmd: u8, data: &[u8]) -> connection::Result<()> {
self.0.tx_connection.lock().unwrap().as_mut().unwrap().send_packet(cmd, data)
}
pub fn audio_key(&self, track: SpotifyId, file_id: FileId) -> Future<AudioKey, AudioKeyError> {
self.0.cache
.get_audio_key(track, file_id)
.map(Future::of)
.unwrap_or_else(|| {
let self_ = self.clone();
self.0.audio_key.lock().unwrap()
.request(self, track, file_id)
.map(move |key| {
self_.0.cache.put_audio_key(track, file_id, key);
key
})
})
}
pub fn audio_file(&self, file_id: FileId) -> Box<ReadSeek> {
self.0.cache
.get_file(file_id)
.unwrap_or_else(|| {
let (audio_file, complete_rx) = AudioFile::new(self, file_id);
let self_ = self.clone();
complete_rx.map(move |mut complete_file| {
self_.0.cache.put_file(file_id, &mut complete_file)
}).fire();
Box::new(audio_file.await().unwrap())
})
}
pub fn album_cover(&self, file_id: FileId) -> eventual::Future<Vec<u8>, ()> {
self.0.cache
.get_file(file_id)
.map(|mut f| {
let mut data = Vec::new();
f.read_to_end(&mut data).unwrap();
Future::of(data)
})
.unwrap_or_else(|| {
let self_ = self.clone();
AlbumCover::get(file_id, self)
.map(move |data| {
self_.0.cache.put_file(file_id, &mut Cursor::new(&data));
data
})
})
}
pub fn stream(&self, handler: Box<stream::Handler>) {
self.0.stream.lock().unwrap().create(handler, self)
}
pub fn metadata<T: MetadataTrait>(&self, id: SpotifyId) -> MetadataRef<T> {
self.0.metadata.lock().unwrap().get(self, id)
}
pub fn mercury(&self, req: MercuryRequest) -> Future<MercuryResponse, ()> {
self.0.mercury.lock().unwrap().request(self, req)
}
pub fn mercury_sub(&self, uri: String) -> mpsc::Receiver<MercuryResponse> {
self.0.mercury.lock().unwrap().subscribe(self, uri)
}
pub fn cache(&self) -> &Cache {
self.0.cache.as_ref()
}
pub fn config(&self) -> &Config {
&self.0.config
}
pub fn username(&self) -> String {
self.0.data.read().unwrap().canonical_username.clone()
}
pub fn country(&self) -> String {
self.0.data.read().unwrap().country.clone()
}
pub fn device_id(&self) -> &str {
&self.0.device_id
}
}
pub trait PacketHandler {
fn handle(&mut self, cmd: u8, data: Vec<u8>, session: &Session);
} | }
#[derive(Clone)] | random_line_split |
session.rs | use crypto::digest::Digest;
use crypto::sha1::Sha1;
use crypto::hmac::Hmac;
use crypto::mac::Mac;
use eventual;
use eventual::Future;
use eventual::Async;
use protobuf::{self, Message};
use rand::thread_rng;
use rand::Rng;
use std::io::{Read, Write, Cursor};
use std::result::Result;
use std::sync::{Mutex, RwLock, Arc, mpsc};
use album_cover::AlbumCover;
use apresolve::apresolve;
use audio_key::{AudioKeyManager, AudioKey, AudioKeyError};
use audio_file::AudioFile;
use authentication::Credentials;
use cache::Cache;
use connection::{self, PlainConnection, CipherConnection};
use diffie_hellman::DHLocalKeys;
use mercury::{MercuryManager, MercuryRequest, MercuryResponse};
use metadata::{MetadataManager, MetadataRef, MetadataTrait};
use protocol;
use stream::StreamManager;
use util::{self, SpotifyId, FileId, ReadSeek};
use version;
use stream;
pub enum Bitrate {
Bitrate96,
Bitrate160,
Bitrate320,
}
pub struct Config {
pub application_key: Vec<u8>,
pub user_agent: String,
pub device_name: String,
pub bitrate: Bitrate,
}
pub struct SessionData {
country: String,
canonical_username: String,
}
pub struct | {
config: Config,
device_id: String,
data: RwLock<SessionData>,
cache: Box<Cache + Send + Sync>,
mercury: Mutex<MercuryManager>,
metadata: Mutex<MetadataManager>,
stream: Mutex<StreamManager>,
audio_key: Mutex<AudioKeyManager>,
rx_connection: Mutex<Option<CipherConnection>>,
tx_connection: Mutex<Option<CipherConnection>>,
}
#[derive(Clone)]
pub struct Session(pub Arc<SessionInternal>);
impl Session {
pub fn new(config: Config, cache: Box<Cache + Send + Sync>) -> Session {
let device_id = {
let mut h = Sha1::new();
h.input_str(&config.device_name);
h.result_str()
};
Session(Arc::new(SessionInternal {
config: config,
device_id: device_id,
data: RwLock::new(SessionData {
country: String::new(),
canonical_username: String::new(),
}),
rx_connection: Mutex::new(None),
tx_connection: Mutex::new(None),
cache: cache,
mercury: Mutex::new(MercuryManager::new()),
metadata: Mutex::new(MetadataManager::new()),
stream: Mutex::new(StreamManager::new()),
audio_key: Mutex::new(AudioKeyManager::new()),
}))
}
fn connect(&self) -> CipherConnection {
let local_keys = DHLocalKeys::random(&mut thread_rng());
let aps = apresolve().unwrap();
let ap = thread_rng().choose(&aps).expect("No APs found");
info!("Connecting to AP {}", ap);
let mut connection = PlainConnection::connect(ap).unwrap();
let request = protobuf_init!(protocol::keyexchange::ClientHello::new(), {
build_info => {
product: protocol::keyexchange::Product::PRODUCT_LIBSPOTIFY_EMBEDDED,
platform: protocol::keyexchange::Platform::PLATFORM_LINUX_X86,
version: 0x10800000000,
},
/*
fingerprints_supported => [
protocol::keyexchange::Fingerprint::FINGERPRINT_GRAIN
],
*/
cryptosuites_supported => [
protocol::keyexchange::Cryptosuite::CRYPTO_SUITE_SHANNON,
//protocol::keyexchange::Cryptosuite::CRYPTO_SUITE_RC4_SHA1_HMAC
],
/*
powschemes_supported => [
protocol::keyexchange::Powscheme::POW_HASH_CASH
],
*/
login_crypto_hello.diffie_hellman => {
gc: local_keys.public_key(),
server_keys_known: 1,
},
client_nonce: util::rand_vec(&mut thread_rng(), 0x10),
padding: vec![0x1e],
feature_set => {
autoupdate2: true,
}
});
let init_client_packet = connection.send_packet_prefix(&[0, 4],
&request.write_to_bytes().unwrap())
.unwrap();
let init_server_packet = connection.recv_packet().unwrap();
let response: protocol::keyexchange::APResponseMessage =
protobuf::parse_from_bytes(&init_server_packet[4..]).unwrap();
let remote_key = response.get_challenge()
.get_login_crypto_challenge()
.get_diffie_hellman()
.get_gs();
let shared_secret = local_keys.shared_secret(remote_key);
let (challenge, send_key, recv_key) = {
let mut data = Vec::with_capacity(0x64);
let mut mac = Hmac::new(Sha1::new(), &shared_secret);
for i in 1..6 {
mac.input(&init_client_packet);
mac.input(&init_server_packet);
mac.input(&[i]);
data.write(&mac.result().code()).unwrap();
mac.reset();
}
mac = Hmac::new(Sha1::new(), &data[..0x14]);
mac.input(&init_client_packet);
mac.input(&init_server_packet);
(mac.result().code().to_vec(),
data[0x14..0x34].to_vec(),
data[0x34..0x54].to_vec())
};
let packet = protobuf_init!(protocol::keyexchange::ClientResponsePlaintext::new(), {
login_crypto_response.diffie_hellman => {
hmac: challenge
},
pow_response => {},
crypto_response => {},
});
connection.send_packet(&packet.write_to_bytes().unwrap()).unwrap();
CipherConnection::new(connection.into_stream(),
&send_key,
&recv_key)
}
pub fn login(&self, credentials: Credentials) -> Result<Credentials, ()> {
let packet = protobuf_init!(protocol::authentication::ClientResponseEncrypted::new(), {
login_credentials => {
username: credentials.username,
typ: credentials.auth_type,
auth_data: credentials.auth_data,
},
system_info => {
cpu_family: protocol::authentication::CpuFamily::CPU_UNKNOWN,
os: protocol::authentication::Os::OS_UNKNOWN,
system_information_string: "librespot".to_owned(),
device_id: self.device_id().to_owned(),
},
version_string: version::version_string(),
appkey => {
version: self.config().application_key[0] as u32,
devkey: self.config().application_key[0x1..0x81].to_vec(),
signature: self.config().application_key[0x81..0x141].to_vec(),
useragent: self.config().user_agent.clone(),
callback_hash: vec![0; 20],
}
});
let mut connection = self.connect();
connection.send_packet(0xab, &packet.write_to_bytes().unwrap()).unwrap();
let (cmd, data) = connection.recv_packet().unwrap();
match cmd {
0xac => {
let welcome_data: protocol::authentication::APWelcome =
protobuf::parse_from_bytes(&data).unwrap();
let username = welcome_data.get_canonical_username().to_owned();
self.0.data.write().unwrap().canonical_username = username.clone();
*self.0.rx_connection.lock().unwrap() = Some(connection.clone());
*self.0.tx_connection.lock().unwrap() = Some(connection);
info!("Authenticated !");
let reusable_credentials = Credentials {
username: username,
auth_type: welcome_data.get_reusable_auth_credentials_type(),
auth_data: welcome_data.get_reusable_auth_credentials().to_owned(),
};
self.0.cache.put_credentials(&reusable_credentials);
Ok(reusable_credentials)
}
0xad => {
let msg: protocol::keyexchange::APLoginFailed =
protobuf::parse_from_bytes(&data).unwrap();
error!("Authentication failed, {:?}", msg);
Err(())
}
_ => {
error!("Unexpected message {:x}", cmd);
Err(())
}
}
}
pub fn poll(&self) {
let (cmd, data) = self.recv();
match cmd {
0x4 => self.send_packet(0x49, &data).unwrap(),
0x4a => (),
0x9 | 0xa => self.0.stream.lock().unwrap().handle(cmd, data, self),
0xd | 0xe => self.0.audio_key.lock().unwrap().handle(cmd, data, self),
0x1b => {
self.0.data.write().unwrap().country = String::from_utf8(data).unwrap();
}
0xb2...0xb6 => self.0.mercury.lock().unwrap().handle(cmd, data, self),
_ => (),
}
}
pub fn recv(&self) -> (u8, Vec<u8>) {
self.0.rx_connection.lock().unwrap().as_mut().unwrap().recv_packet().unwrap()
}
pub fn send_packet(&self, cmd: u8, data: &[u8]) -> connection::Result<()> {
self.0.tx_connection.lock().unwrap().as_mut().unwrap().send_packet(cmd, data)
}
pub fn audio_key(&self, track: SpotifyId, file_id: FileId) -> Future<AudioKey, AudioKeyError> {
self.0.cache
.get_audio_key(track, file_id)
.map(Future::of)
.unwrap_or_else(|| {
let self_ = self.clone();
self.0.audio_key.lock().unwrap()
.request(self, track, file_id)
.map(move |key| {
self_.0.cache.put_audio_key(track, file_id, key);
key
})
})
}
pub fn audio_file(&self, file_id: FileId) -> Box<ReadSeek> {
self.0.cache
.get_file(file_id)
.unwrap_or_else(|| {
let (audio_file, complete_rx) = AudioFile::new(self, file_id);
let self_ = self.clone();
complete_rx.map(move |mut complete_file| {
self_.0.cache.put_file(file_id, &mut complete_file)
}).fire();
Box::new(audio_file.await().unwrap())
})
}
pub fn album_cover(&self, file_id: FileId) -> eventual::Future<Vec<u8>, ()> {
self.0.cache
.get_file(file_id)
.map(|mut f| {
let mut data = Vec::new();
f.read_to_end(&mut data).unwrap();
Future::of(data)
})
.unwrap_or_else(|| {
let self_ = self.clone();
AlbumCover::get(file_id, self)
.map(move |data| {
self_.0.cache.put_file(file_id, &mut Cursor::new(&data));
data
})
})
}
pub fn stream(&self, handler: Box<stream::Handler>) {
self.0.stream.lock().unwrap().create(handler, self)
}
pub fn metadata<T: MetadataTrait>(&self, id: SpotifyId) -> MetadataRef<T> {
self.0.metadata.lock().unwrap().get(self, id)
}
pub fn mercury(&self, req: MercuryRequest) -> Future<MercuryResponse, ()> {
self.0.mercury.lock().unwrap().request(self, req)
}
pub fn mercury_sub(&self, uri: String) -> mpsc::Receiver<MercuryResponse> {
self.0.mercury.lock().unwrap().subscribe(self, uri)
}
pub fn cache(&self) -> &Cache {
self.0.cache.as_ref()
}
pub fn config(&self) -> &Config {
&self.0.config
}
pub fn username(&self) -> String {
self.0.data.read().unwrap().canonical_username.clone()
}
pub fn country(&self) -> String {
self.0.data.read().unwrap().country.clone()
}
pub fn device_id(&self) -> &str {
&self.0.device_id
}
}
pub trait PacketHandler {
fn handle(&mut self, cmd: u8, data: Vec<u8>, session: &Session);
}
| SessionInternal | identifier_name |
session.rs | use crypto::digest::Digest;
use crypto::sha1::Sha1;
use crypto::hmac::Hmac;
use crypto::mac::Mac;
use eventual;
use eventual::Future;
use eventual::Async;
use protobuf::{self, Message};
use rand::thread_rng;
use rand::Rng;
use std::io::{Read, Write, Cursor};
use std::result::Result;
use std::sync::{Mutex, RwLock, Arc, mpsc};
use album_cover::AlbumCover;
use apresolve::apresolve;
use audio_key::{AudioKeyManager, AudioKey, AudioKeyError};
use audio_file::AudioFile;
use authentication::Credentials;
use cache::Cache;
use connection::{self, PlainConnection, CipherConnection};
use diffie_hellman::DHLocalKeys;
use mercury::{MercuryManager, MercuryRequest, MercuryResponse};
use metadata::{MetadataManager, MetadataRef, MetadataTrait};
use protocol;
use stream::StreamManager;
use util::{self, SpotifyId, FileId, ReadSeek};
use version;
use stream;
pub enum Bitrate {
Bitrate96,
Bitrate160,
Bitrate320,
}
pub struct Config {
pub application_key: Vec<u8>,
pub user_agent: String,
pub device_name: String,
pub bitrate: Bitrate,
}
pub struct SessionData {
country: String,
canonical_username: String,
}
pub struct SessionInternal {
config: Config,
device_id: String,
data: RwLock<SessionData>,
cache: Box<Cache + Send + Sync>,
mercury: Mutex<MercuryManager>,
metadata: Mutex<MetadataManager>,
stream: Mutex<StreamManager>,
audio_key: Mutex<AudioKeyManager>,
rx_connection: Mutex<Option<CipherConnection>>,
tx_connection: Mutex<Option<CipherConnection>>,
}
#[derive(Clone)]
pub struct Session(pub Arc<SessionInternal>);
impl Session {
pub fn new(config: Config, cache: Box<Cache + Send + Sync>) -> Session {
let device_id = {
let mut h = Sha1::new();
h.input_str(&config.device_name);
h.result_str()
};
Session(Arc::new(SessionInternal {
config: config,
device_id: device_id,
data: RwLock::new(SessionData {
country: String::new(),
canonical_username: String::new(),
}),
rx_connection: Mutex::new(None),
tx_connection: Mutex::new(None),
cache: cache,
mercury: Mutex::new(MercuryManager::new()),
metadata: Mutex::new(MetadataManager::new()),
stream: Mutex::new(StreamManager::new()),
audio_key: Mutex::new(AudioKeyManager::new()),
}))
}
fn connect(&self) -> CipherConnection {
let local_keys = DHLocalKeys::random(&mut thread_rng());
let aps = apresolve().unwrap();
let ap = thread_rng().choose(&aps).expect("No APs found");
info!("Connecting to AP {}", ap);
let mut connection = PlainConnection::connect(ap).unwrap();
let request = protobuf_init!(protocol::keyexchange::ClientHello::new(), {
build_info => {
product: protocol::keyexchange::Product::PRODUCT_LIBSPOTIFY_EMBEDDED,
platform: protocol::keyexchange::Platform::PLATFORM_LINUX_X86,
version: 0x10800000000,
},
/*
fingerprints_supported => [
protocol::keyexchange::Fingerprint::FINGERPRINT_GRAIN
],
*/
cryptosuites_supported => [
protocol::keyexchange::Cryptosuite::CRYPTO_SUITE_SHANNON,
//protocol::keyexchange::Cryptosuite::CRYPTO_SUITE_RC4_SHA1_HMAC
],
/*
powschemes_supported => [
protocol::keyexchange::Powscheme::POW_HASH_CASH
],
*/
login_crypto_hello.diffie_hellman => {
gc: local_keys.public_key(),
server_keys_known: 1,
},
client_nonce: util::rand_vec(&mut thread_rng(), 0x10),
padding: vec![0x1e],
feature_set => {
autoupdate2: true,
}
});
let init_client_packet = connection.send_packet_prefix(&[0, 4],
&request.write_to_bytes().unwrap())
.unwrap();
let init_server_packet = connection.recv_packet().unwrap();
let response: protocol::keyexchange::APResponseMessage =
protobuf::parse_from_bytes(&init_server_packet[4..]).unwrap();
let remote_key = response.get_challenge()
.get_login_crypto_challenge()
.get_diffie_hellman()
.get_gs();
let shared_secret = local_keys.shared_secret(remote_key);
let (challenge, send_key, recv_key) = {
let mut data = Vec::with_capacity(0x64);
let mut mac = Hmac::new(Sha1::new(), &shared_secret);
for i in 1..6 {
mac.input(&init_client_packet);
mac.input(&init_server_packet);
mac.input(&[i]);
data.write(&mac.result().code()).unwrap();
mac.reset();
}
mac = Hmac::new(Sha1::new(), &data[..0x14]);
mac.input(&init_client_packet);
mac.input(&init_server_packet);
(mac.result().code().to_vec(),
data[0x14..0x34].to_vec(),
data[0x34..0x54].to_vec())
};
let packet = protobuf_init!(protocol::keyexchange::ClientResponsePlaintext::new(), {
login_crypto_response.diffie_hellman => {
hmac: challenge
},
pow_response => {},
crypto_response => {},
});
connection.send_packet(&packet.write_to_bytes().unwrap()).unwrap();
CipherConnection::new(connection.into_stream(),
&send_key,
&recv_key)
}
pub fn login(&self, credentials: Credentials) -> Result<Credentials, ()> {
let packet = protobuf_init!(protocol::authentication::ClientResponseEncrypted::new(), {
login_credentials => {
username: credentials.username,
typ: credentials.auth_type,
auth_data: credentials.auth_data,
},
system_info => {
cpu_family: protocol::authentication::CpuFamily::CPU_UNKNOWN,
os: protocol::authentication::Os::OS_UNKNOWN,
system_information_string: "librespot".to_owned(),
device_id: self.device_id().to_owned(),
},
version_string: version::version_string(),
appkey => {
version: self.config().application_key[0] as u32,
devkey: self.config().application_key[0x1..0x81].to_vec(),
signature: self.config().application_key[0x81..0x141].to_vec(),
useragent: self.config().user_agent.clone(),
callback_hash: vec![0; 20],
}
});
let mut connection = self.connect();
connection.send_packet(0xab, &packet.write_to_bytes().unwrap()).unwrap();
let (cmd, data) = connection.recv_packet().unwrap();
match cmd {
0xac => {
let welcome_data: protocol::authentication::APWelcome =
protobuf::parse_from_bytes(&data).unwrap();
let username = welcome_data.get_canonical_username().to_owned();
self.0.data.write().unwrap().canonical_username = username.clone();
*self.0.rx_connection.lock().unwrap() = Some(connection.clone());
*self.0.tx_connection.lock().unwrap() = Some(connection);
info!("Authenticated !");
let reusable_credentials = Credentials {
username: username,
auth_type: welcome_data.get_reusable_auth_credentials_type(),
auth_data: welcome_data.get_reusable_auth_credentials().to_owned(),
};
self.0.cache.put_credentials(&reusable_credentials);
Ok(reusable_credentials)
}
0xad => {
let msg: protocol::keyexchange::APLoginFailed =
protobuf::parse_from_bytes(&data).unwrap();
error!("Authentication failed, {:?}", msg);
Err(())
}
_ => {
error!("Unexpected message {:x}", cmd);
Err(())
}
}
}
pub fn poll(&self) {
let (cmd, data) = self.recv();
match cmd {
0x4 => self.send_packet(0x49, &data).unwrap(),
0x4a => (),
0x9 | 0xa => self.0.stream.lock().unwrap().handle(cmd, data, self),
0xd | 0xe => self.0.audio_key.lock().unwrap().handle(cmd, data, self),
0x1b => {
self.0.data.write().unwrap().country = String::from_utf8(data).unwrap();
}
0xb2...0xb6 => self.0.mercury.lock().unwrap().handle(cmd, data, self),
_ => (),
}
}
pub fn recv(&self) -> (u8, Vec<u8>) {
self.0.rx_connection.lock().unwrap().as_mut().unwrap().recv_packet().unwrap()
}
pub fn send_packet(&self, cmd: u8, data: &[u8]) -> connection::Result<()> {
self.0.tx_connection.lock().unwrap().as_mut().unwrap().send_packet(cmd, data)
}
pub fn audio_key(&self, track: SpotifyId, file_id: FileId) -> Future<AudioKey, AudioKeyError> {
self.0.cache
.get_audio_key(track, file_id)
.map(Future::of)
.unwrap_or_else(|| {
let self_ = self.clone();
self.0.audio_key.lock().unwrap()
.request(self, track, file_id)
.map(move |key| {
self_.0.cache.put_audio_key(track, file_id, key);
key
})
})
}
pub fn audio_file(&self, file_id: FileId) -> Box<ReadSeek> {
self.0.cache
.get_file(file_id)
.unwrap_or_else(|| {
let (audio_file, complete_rx) = AudioFile::new(self, file_id);
let self_ = self.clone();
complete_rx.map(move |mut complete_file| {
self_.0.cache.put_file(file_id, &mut complete_file)
}).fire();
Box::new(audio_file.await().unwrap())
})
}
pub fn album_cover(&self, file_id: FileId) -> eventual::Future<Vec<u8>, ()> {
self.0.cache
.get_file(file_id)
.map(|mut f| {
let mut data = Vec::new();
f.read_to_end(&mut data).unwrap();
Future::of(data)
})
.unwrap_or_else(|| {
let self_ = self.clone();
AlbumCover::get(file_id, self)
.map(move |data| {
self_.0.cache.put_file(file_id, &mut Cursor::new(&data));
data
})
})
}
pub fn stream(&self, handler: Box<stream::Handler>) {
self.0.stream.lock().unwrap().create(handler, self)
}
pub fn metadata<T: MetadataTrait>(&self, id: SpotifyId) -> MetadataRef<T> {
self.0.metadata.lock().unwrap().get(self, id)
}
pub fn mercury(&self, req: MercuryRequest) -> Future<MercuryResponse, ()> {
self.0.mercury.lock().unwrap().request(self, req)
}
pub fn mercury_sub(&self, uri: String) -> mpsc::Receiver<MercuryResponse> |
pub fn cache(&self) -> &Cache {
self.0.cache.as_ref()
}
pub fn config(&self) -> &Config {
&self.0.config
}
pub fn username(&self) -> String {
self.0.data.read().unwrap().canonical_username.clone()
}
pub fn country(&self) -> String {
self.0.data.read().unwrap().country.clone()
}
pub fn device_id(&self) -> &str {
&self.0.device_id
}
}
pub trait PacketHandler {
fn handle(&mut self, cmd: u8, data: Vec<u8>, session: &Session);
}
| {
self.0.mercury.lock().unwrap().subscribe(self, uri)
} | identifier_body |
option.rs | //! Enumeration of RS2 sensor options.
//!
//! Not all options apply to every sensor. In order to retrieve the correct options,
//! one must iterate over the `sensor` object for option compatibility.
//!
//! Notice that this option refers to the `sensor`, not the device. However, the device
//! used also matters; sensors that are alike across devices are not guaranteed to share
//! the same sensor options. Again, it is up to the user to query whether an option
//! is supported by the sensor before attempting to set it. Failure to do so may cause
//! an error in operation.
use super::Rs2Exception;
use num_derive::{FromPrimitive, ToPrimitive};
use realsense_sys as sys;
use std::ffi::CStr;
use thiserror::Error;
/// Occur when an option cannot be set.
#[derive(Error, Debug)]
pub enum OptionSetError {
/// The requested option is not supported by this sensor.
#[error("Option not supported on this sensor.")]
OptionNotSupported,
/// The requested option is read-only and cannot be set.
#[error("Option is read only.")]
OptionIsReadOnly,
/// The requested option could not be set. Reason is reported by the sensor.
#[error("Could not set option. Type: {0}; Reason: {1}")]
CouldNotSetOption(Rs2Exception, String),
}
/// The enumeration of options available in the RealSense SDK.
///
/// The majority of the options presented have a specific range of valid values. Run
/// `sensor.get_option_range(Rs2Option::_)` to retrieve possible values of an Option type for your sensor.
/// Setting a bad value will lead to a no-op at best, and a malfunction at worst.
///
/// # Deprecated Options
///
/// `AmbientLight`
///
/// - Equivalent to `RS2_OPTION_AMBIENT_LIGHT`
/// - Replacement: [Rs2Option::DigitalGain].
/// - Old Description: "Change the depth ambient light see rs2_ambient_light for values".
///
/// `ZeroOrderEnabled`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_ENABLED`
/// - Replacement: N/A.
/// - Old Description: "Toggle Zero-Order mode."
///
/// `ZeroOrderPointX`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_POINT_X`
/// - Replacement: N/A.
/// - Old Description: "Get the Zero order point x."
///
/// `ZeroOrderPointY`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_POINT_Y`
/// - Replacement: N/A.
/// - Old Description: "Get the Zero order point y."
///
/// `Trigger camera accuracy health`
///
/// - Deprecated as of 2.46 (not officially released, so technically 2.47)
/// - Old Description: "Enable Depth & color frame sync with periodic calibration for proper
/// alignment"
///
/// `Reset camera accuracy health`
///
/// - Deprecated as of 2.46 (not officially released, so technically 2.47)
/// - Old Description: "Reset Camera Accuracy metric (if affected by TriggerCameraAccuracyHealth
/// option)."
#[repr(i32)]
#[derive(FromPrimitive, ToPrimitive, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Rs2Option {
/// Enable/disable color backlight compensation.
BacklightCompensation = sys::rs2_option_RS2_OPTION_BACKLIGHT_COMPENSATION as i32,
/// Set color image brightness.
Brightness = sys::rs2_option_RS2_OPTION_BRIGHTNESS as i32,
/// Set color image contrast.
Contrast = sys::rs2_option_RS2_OPTION_CONTRAST as i32,
/// Set exposure time of color camera. Setting any value will disable auto exposure.
Exposure = sys::rs2_option_RS2_OPTION_EXPOSURE as i32,
/// Set color image gain.
Gain = sys::rs2_option_RS2_OPTION_GAIN as i32,
/// Set color image gamma setting.
Gamma = sys::rs2_option_RS2_OPTION_GAMMA as i32,
/// Set color image hue.
Hue = sys::rs2_option_RS2_OPTION_HUE as i32,
/// Set color image saturation.
Saturation = sys::rs2_option_RS2_OPTION_SATURATION as i32,
/// Set color image sharpness.
Sharpness = sys::rs2_option_RS2_OPTION_SHARPNESS as i32,
/// Set white balance of color image. Setting any value will disable auto white balance.
WhiteBalance = sys::rs2_option_RS2_OPTION_WHITE_BALANCE as i32,
/// Enable/disable color image auto-exposure.
EnableAutoExposure = sys::rs2_option_RS2_OPTION_ENABLE_AUTO_EXPOSURE as i32,
/// Enable/disable color image auto-white-balance
EnableAutoWhiteBalance = sys::rs2_option_RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE as i32,
/// Set the visual preset on the sensor. `sensor.get_option_range()` provides
/// access to several recommend sets of option presets for a depth camera. The preset
/// selection varies between devices and sensors.
VisualPreset = sys::rs2_option_RS2_OPTION_VISUAL_PRESET as i32,
/// Set the power of the laser emitter, with 0 meaning projector off.
LaserPower = sys::rs2_option_RS2_OPTION_LASER_POWER as i32,
/// Set the number of patterns projected per frame. The higher the accuracy value,
/// the more patterns projected. Increasing the number of patterns helps to achieve
/// better accuracy. Note that this control affects Depth FPS.
Accuracy = sys::rs2_option_RS2_OPTION_ACCURACY as i32,
/// Set the motion vs. range trade-off. Lower values allow for better motion sensitivity.
/// Higher values allow for better depth range.
MotionRange = sys::rs2_option_RS2_OPTION_MOTION_RANGE as i32,
/// Set the filter to apply to each depth frame. Each one of the filter is optimized per the
/// application requirements.
FilterOption = sys::rs2_option_RS2_OPTION_FILTER_OPTION as i32,
/// Set the confidence level threshold used by the Depth algorithm pipe.
/// This determines whether a pixel will get a valid range or will be marked as invalid.
ConfidenceThreshold = sys::rs2_option_RS2_OPTION_CONFIDENCE_THRESHOLD as i32,
/// Enable/disable emitters. Emitter selection:
///
/// - `0`: disable all emitters
/// - `1`: enable laser
/// - `2`: enable auto laser
/// - `3`: enable LED
EmitterEnabled = sys::rs2_option_RS2_OPTION_EMITTER_ENABLED as i32,
/// Set the number of frames the user is allowed to keep per stream.
/// Trying to hold on to more frames will cause frame drops.
FramesQueueSize = sys::rs2_option_RS2_OPTION_FRAMES_QUEUE_SIZE as i32,
/// Get the total number of detected frame drops from all streams.
TotalFrameDrops = sys::rs2_option_RS2_OPTION_TOTAL_FRAME_DROPS as i32,
/// Set the auto-exposure mode:
///
/// - Static
/// - Anti-Flicker
/// - Hybrid
AutoExposureMode = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_MODE as i32,
/// Set the power line frequency control for anti-flickering:
///
/// - Off
/// - 50Hz
/// - 60Hz
/// - Auto
PowerLineFrequency = sys::rs2_option_RS2_OPTION_POWER_LINE_FREQUENCY as i32,
/// Get the current Temperature of the ASIC.
AsicTemperature = sys::rs2_option_RS2_OPTION_ASIC_TEMPERATURE as i32,
/// Enable/disable error handling.
ErrorPollingEnabled = sys::rs2_option_RS2_OPTION_ERROR_POLLING_ENABLED as i32,
/// Get the Current Temperature of the projector.
ProjectorTemperature = sys::rs2_option_RS2_OPTION_PROJECTOR_TEMPERATURE as i32,
/// Enable/disable trigger to be outputed from the camera to any external device on
/// every depth frame.
OutputTriggerEnabled = sys::rs2_option_RS2_OPTION_OUTPUT_TRIGGER_ENABLED as i32,
/// Get the current Motion-Module Temperature.
MotionModuleTemperature = sys::rs2_option_RS2_OPTION_MOTION_MODULE_TEMPERATURE as i32,
/// Set the number of meters represented by a single depth unit.
DepthUnits = sys::rs2_option_RS2_OPTION_DEPTH_UNITS as i32,
/// Enable/Disable automatic correction of the motion data.
EnableMotionCorrection = sys::rs2_option_RS2_OPTION_ENABLE_MOTION_CORRECTION as i32,
/// Allows sensor to dynamically ajust the frame rate depending on lighting conditions.
AutoExposurePriority = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_PRIORITY as i32,
/// Set the color scheme for data visualization.
ColorScheme = sys::rs2_option_RS2_OPTION_COLOR_SCHEME as i32,
/// Enable/disable histogram equalization post-processing on the depth data.
HistogramEqualizationEnabled = sys::rs2_option_RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED as i32,
/// Set the Minimal distance to the target.
MinDistance = sys::rs2_option_RS2_OPTION_MIN_DISTANCE as i32,
/// Set the Maximum distance to the target.
MaxDistance = sys::rs2_option_RS2_OPTION_MAX_DISTANCE as i32,
/// Get the texture mapping stream unique ID.
TextureSource = sys::rs2_option_RS2_OPTION_TEXTURE_SOURCE as i32,
/// Set the 2D-filter effect. The specific interpretation is given within the context of the filter.
FilterMagnitude = sys::rs2_option_RS2_OPTION_FILTER_MAGNITUDE as i32,
/// Set the 2D-filter parameter that controls the weight/radius for smoothing.
FilterSmoothAlpha = sys::rs2_option_RS2_OPTION_FILTER_SMOOTH_ALPHA as i32,
/// Set the 2D-filter range/validity threshold.
FilterSmoothDelta = sys::rs2_option_RS2_OPTION_FILTER_SMOOTH_DELTA as i32,
/// Enhance depth data post-processing with holes filling where appropriate.
HolesFill = sys::rs2_option_RS2_OPTION_HOLES_FILL as i32,
/// Get the distance in mm between the first and the second imagers in stereo-based depth cameras.
StereoBaseline = sys::rs2_option_RS2_OPTION_STEREO_BASELINE as i32,
/// Allows dynamically ajust the converge step value of the target exposure in
/// the Auto-Exposure algorithm.
AutoExposureConvergeStep = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_CONVERGE_STEP as i32,
/// Impose Inter-camera HW synchronization mode. Applicable for D400/L500/Rolling Shutter SKUs.
InterCamSyncMode = sys::rs2_option_RS2_OPTION_INTER_CAM_SYNC_MODE as i32,
/// Select a stream to process.
StreamFilter = sys::rs2_option_RS2_OPTION_STREAM_FILTER as i32,
/// Select a stream format to process.
StreamFormatFilter = sys::rs2_option_RS2_OPTION_STREAM_FORMAT_FILTER as i32,
/// Select a stream index to process.
StreamIndexFilter = sys::rs2_option_RS2_OPTION_STREAM_INDEX_FILTER as i32,
/// When supported, this option make the camera to switch the emitter state every frame.
/// 0 for disabled, 1 for enabled.
EmitterOnOff = sys::rs2_option_RS2_OPTION_EMITTER_ON_OFF as i32,
/// Get the LDD temperature.
LldTemperature = sys::rs2_option_RS2_OPTION_LLD_TEMPERATURE as i32,
/// Get the MC temperature.
McTemperature = sys::rs2_option_RS2_OPTION_MC_TEMPERATURE as i32,
/// Get the MA temperature.
MaTemperature = sys::rs2_option_RS2_OPTION_MA_TEMPERATURE as i32,
/// Hardware stream configuration.
HardwarePreset = sys::rs2_option_RS2_OPTION_HARDWARE_PRESET as i32,
/// Enable/disable global time.
GlobalTimeEnabled = sys::rs2_option_RS2_OPTION_GLOBAL_TIME_ENABLED as i32,
/// Get the APD temperature.
ApdTemperature = sys::rs2_option_RS2_OPTION_APD_TEMPERATURE as i32,
/// Enable/disable an internal map.
EnableMapping = sys::rs2_option_RS2_OPTION_ENABLE_MAPPING as i32,
/// Enable/disable appearance-based relocalization.
EnableRelocalization = sys::rs2_option_RS2_OPTION_ENABLE_RELOCALIZATION as i32,
/// Enable/disable position jumping.
EnablePoseJumping = sys::rs2_option_RS2_OPTION_ENABLE_POSE_JUMPING as i32,
/// Enable/disable dynamic calibration.
EnableDynamicCalibration = sys::rs2_option_RS2_OPTION_ENABLE_DYNAMIC_CALIBRATION as i32,
/// Get the offset from sensor to depth origin in millimeters.
DepthOffset = sys::rs2_option_RS2_OPTION_DEPTH_OFFSET as i32,
/// Set the power of the LED (light emitting diode), with 0 meaning off
LedPower = sys::rs2_option_RS2_OPTION_LED_POWER as i32,
/// Preserve the previous map when starting.
EnableMapPreservation = sys::rs2_option_RS2_OPTION_ENABLE_MAP_PRESERVATION as i32,
/// Enable/disable sensor shutdown when a free-fall is detected (on by default).
FreefallDetectionEnabled = sys::rs2_option_RS2_OPTION_FREEFALL_DETECTION_ENABLED as i32,
/// Changes the exposure time of Avalanche Photo Diode in the receiver.
AvalanchePhotoDiode = sys::rs2_option_RS2_OPTION_AVALANCHE_PHOTO_DIODE as i32,
/// Changes the amount of sharpening in the post-processed image.
PostProcessingSharpening = sys::rs2_option_RS2_OPTION_POST_PROCESSING_SHARPENING as i32,
/// Changes the amount of sharpening in the pre-processed image.
PreProcessingSharpening = sys::rs2_option_RS2_OPTION_PRE_PROCESSING_SHARPENING as i32,
/// Control edges and background noise.
NoiseFiltering = sys::rs2_option_RS2_OPTION_NOISE_FILTERING as i32,
/// Enable/disable pixel invalidation.
InvalidationBypass = sys::rs2_option_RS2_OPTION_INVALIDATION_BYPASS as i32,
/// Change the depth digital gain see rs2_digital_gain for values.
DigitalGain = sys::rs2_option_RS2_OPTION_DIGITAL_GAIN as i32,
/// The resolution mode: see rs2_sensor_mode for values.
SensoeMode = sys::rs2_option_RS2_OPTION_SENSOR_MODE as i32,
/// Enable/disable Laser On constantly (GS SKU Only).
EmitterAlwaysOn = sys::rs2_option_RS2_OPTION_EMITTER_ALWAYS_ON as i32,
/// Depth Thermal Compensation for selected D400 SKUs.
ThermalCompensation = sys::rs2_option_RS2_OPTION_THERMAL_COMPENSATION as i32,
/// Set host performance mode to optimize device settings so host can keep up with workload.
/// Take USB transaction granularity as an example. Setting option to low performance host leads
/// to larger USB transaction sizes and a reduced number of transactions. This improves performance
/// and stability if the host machine is relatively weak compared to the workload.
HostPerformance = sys::rs2_option_RS2_OPTION_HOST_PERFORMANCE as i32,
/// Enable/disable HDR.
HdrEnabled = sys::rs2_option_RS2_OPTION_HDR_ENABLED as i32,
/// Get HDR Sequence name.
SequenceName = sys::rs2_option_RS2_OPTION_SEQUENCE_NAME as i32,
/// Get HDR Sequence size.
SequenceSize = sys::rs2_option_RS2_OPTION_SEQUENCE_SIZE as i32,
/// Get HDR Sequence ID - 0 is not HDR; sequence ID for HDR configuration starts from 1.
SequenceId = sys::rs2_option_RS2_OPTION_SEQUENCE_ID as i32,
/// Get Humidity temperature [in Celsius].
HumidityTemperature = sys::rs2_option_RS2_OPTION_HUMIDITY_TEMPERATURE as i32,
/// Enable/disable the maximum usable depth sensor range given the amount of ambient light in the scene.
EnableMaxUsableRange = sys::rs2_option_RS2_OPTION_ENABLE_MAX_USABLE_RANGE as i32,
/// Enable/disable the alternate IR, When enabling alternate IR, the IR image is holding the amplitude of the depth correlation.
AlternateIr = sys::rs2_option_RS2_OPTION_ALTERNATE_IR as i32,
/// Get an estimation of the noise on the IR image.
NoiseEstimation = sys::rs2_option_RS2_OPTION_NOISE_ESTIMATION as i32,
/// Enable/disable data collection for calculating IR pixel reflectivity.
EnableIrReflectivity = sys::rs2_option_RS2_OPTION_ENABLE_IR_REFLECTIVITY as i32,
/// Auto exposure limit in microseconds.
///
/// Default is 0 which means full exposure range. If the requested exposure limit is greater
/// than frame time, it will be set to frame time at runtime. Setting will not take effect
/// until next streaming session.
AutoExposureLimit = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_LIMIT as i32,
/// Auto gain limits ranging from 16 to 248.
///
/// Default is 0 which means full gain. If the requested gain limit is less than 16, it will be
/// set to 16. If the requested gain limit is greater than 248, it will be set to 248. Setting
/// will not take effect until next streaming session.
AutoGainLimit = sys::rs2_option_RS2_OPTION_AUTO_GAIN_LIMIT as i32,
/// Enable receiver sensitivity according to ambient light, bounded by the Receiver GAin
/// control.
AutoReceiverSensitivity = sys::rs2_option_RS2_OPTION_AUTO_RX_SENSITIVITY as i32,
/// Changes the transmistter frequency frequencies increasing effective range over sharpness.
TransmitterFrequency = sys::rs2_option_RS2_OPTION_TRANSMITTER_FREQUENCY as i32,
/* Not included since this just tells us the total number of options.
*
* Count = sys::rs2_option_RS2_OPTION_COUNT, */
}
impl Rs2Option {
/// Get the option as a CStr.
pub fn to_cstr(self) -> &'static CStr {
unsafe {
let ptr = sys::rs2_option_to_string(self as sys::rs2_option);
CStr::from_ptr(ptr)
}
}
/// Get the option as a str.
pub fn to_str(self) -> &'static str {
self.to_cstr().to_str().unwrap()
}
}
impl ToString for Rs2Option {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// The range of available values of a supported option.
pub struct Rs2OptionRange {
/// The minimum value which will be accepted for this option
pub min: f32,
/// The maximum value which will be accepted for this option
pub max: f32,
/// The granularity of options which accept discrete values, or zero if the option accepts
/// continuous values
pub step: f32,
/// The default value of the option
pub default: f32,
}
#[cfg(test)]
mod tests {
use super::*;
use num_traits::FromPrimitive;
#[test]
fn all_variants_exist() {
let deprecated_options = vec![
sys::rs2_option_RS2_OPTION_ZERO_ORDER_POINT_X as i32,
sys::rs2_option_RS2_OPTION_ZERO_ORDER_POINT_Y as i32,
sys::rs2_option_RS2_OPTION_ZERO_ORDER_ENABLED as i32,
sys::rs2_option_RS2_OPTION_AMBIENT_LIGHT as i32,
sys::rs2_option_RS2_OPTION_TRIGGER_CAMERA_ACCURACY_HEALTH as i32,
sys::rs2_option_RS2_OPTION_RESET_CAMERA_ACCURACY_HEALTH as i32,
];
for i in 0..sys::rs2_option_RS2_OPTION_COUNT as i32 {
if deprecated_options.iter().any(|x| x == &i) |
assert!(
Rs2Option::from_i32(i).is_some(),
"Rs2Option variant for ordinal {} does not exist.",
i,
);
}
}
}
| {
continue;
} | conditional_block |
option.rs | //! Enumeration of RS2 sensor options.
//!
//! Not all options apply to every sensor. In order to retrieve the correct options,
//! one must iterate over the `sensor` object for option compatibility.
//!
//! Notice that this option refers to the `sensor`, not the device. However, the device
//! used also matters; sensors that are alike across devices are not guaranteed to share
//! the same sensor options. Again, it is up to the user to query whether an option
//! is supported by the sensor before attempting to set it. Failure to do so may cause
//! an error in operation.
use super::Rs2Exception;
use num_derive::{FromPrimitive, ToPrimitive};
use realsense_sys as sys;
use std::ffi::CStr;
use thiserror::Error;
/// Occur when an option cannot be set.
#[derive(Error, Debug)]
pub enum | {
/// The requested option is not supported by this sensor.
#[error("Option not supported on this sensor.")]
OptionNotSupported,
/// The requested option is read-only and cannot be set.
#[error("Option is read only.")]
OptionIsReadOnly,
/// The requested option could not be set. Reason is reported by the sensor.
#[error("Could not set option. Type: {0}; Reason: {1}")]
CouldNotSetOption(Rs2Exception, String),
}
/// The enumeration of options available in the RealSense SDK.
///
/// The majority of the options presented have a specific range of valid values. Run
/// `sensor.get_option_range(Rs2Option::_)` to retrieve possible values of an Option type for your sensor.
/// Setting a bad value will lead to a no-op at best, and a malfunction at worst.
///
/// # Deprecated Options
///
/// `AmbientLight`
///
/// - Equivalent to `RS2_OPTION_AMBIENT_LIGHT`
/// - Replacement: [Rs2Option::DigitalGain].
/// - Old Description: "Change the depth ambient light see rs2_ambient_light for values".
///
/// `ZeroOrderEnabled`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_ENABLED`
/// - Replacement: N/A.
/// - Old Description: "Toggle Zero-Order mode."
///
/// `ZeroOrderPointX`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_POINT_X`
/// - Replacement: N/A.
/// - Old Description: "Get the Zero order point x."
///
/// `ZeroOrderPointY`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_POINT_Y`
/// - Replacement: N/A.
/// - Old Description: "Get the Zero order point y."
///
/// `Trigger camera accuracy health`
///
/// - Deprecated as of 2.46 (not officially released, so technically 2.47)
/// - Old Description: "Enable Depth & color frame sync with periodic calibration for proper
/// alignment"
///
/// `Reset camera accuracy health`
///
/// - Deprecated as of 2.46 (not officially released, so technically 2.47)
/// - Old Description: "Reset Camera Accuracy metric (if affected by TriggerCameraAccuracyHealth
/// option)."
#[repr(i32)]
#[derive(FromPrimitive, ToPrimitive, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Rs2Option {
/// Enable/disable color backlight compensation.
BacklightCompensation = sys::rs2_option_RS2_OPTION_BACKLIGHT_COMPENSATION as i32,
/// Set color image brightness.
Brightness = sys::rs2_option_RS2_OPTION_BRIGHTNESS as i32,
/// Set color image contrast.
Contrast = sys::rs2_option_RS2_OPTION_CONTRAST as i32,
/// Set exposure time of color camera. Setting any value will disable auto exposure.
Exposure = sys::rs2_option_RS2_OPTION_EXPOSURE as i32,
/// Set color image gain.
Gain = sys::rs2_option_RS2_OPTION_GAIN as i32,
/// Set color image gamma setting.
Gamma = sys::rs2_option_RS2_OPTION_GAMMA as i32,
/// Set color image hue.
Hue = sys::rs2_option_RS2_OPTION_HUE as i32,
/// Set color image saturation.
Saturation = sys::rs2_option_RS2_OPTION_SATURATION as i32,
/// Set color image sharpness.
Sharpness = sys::rs2_option_RS2_OPTION_SHARPNESS as i32,
/// Set white balance of color image. Setting any value will disable auto white balance.
WhiteBalance = sys::rs2_option_RS2_OPTION_WHITE_BALANCE as i32,
/// Enable/disable color image auto-exposure.
EnableAutoExposure = sys::rs2_option_RS2_OPTION_ENABLE_AUTO_EXPOSURE as i32,
/// Enable/disable color image auto-white-balance
EnableAutoWhiteBalance = sys::rs2_option_RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE as i32,
/// Set the visual preset on the sensor. `sensor.get_option_range()` provides
/// access to several recommend sets of option presets for a depth camera. The preset
/// selection varies between devices and sensors.
VisualPreset = sys::rs2_option_RS2_OPTION_VISUAL_PRESET as i32,
/// Set the power of the laser emitter, with 0 meaning projector off.
LaserPower = sys::rs2_option_RS2_OPTION_LASER_POWER as i32,
/// Set the number of patterns projected per frame. The higher the accuracy value,
/// the more patterns projected. Increasing the number of patterns helps to achieve
/// better accuracy. Note that this control affects Depth FPS.
Accuracy = sys::rs2_option_RS2_OPTION_ACCURACY as i32,
/// Set the motion vs. range trade-off. Lower values allow for better motion sensitivity.
/// Higher values allow for better depth range.
MotionRange = sys::rs2_option_RS2_OPTION_MOTION_RANGE as i32,
/// Set the filter to apply to each depth frame. Each one of the filter is optimized per the
/// application requirements.
FilterOption = sys::rs2_option_RS2_OPTION_FILTER_OPTION as i32,
/// Set the confidence level threshold used by the Depth algorithm pipe.
/// This determines whether a pixel will get a valid range or will be marked as invalid.
ConfidenceThreshold = sys::rs2_option_RS2_OPTION_CONFIDENCE_THRESHOLD as i32,
/// Enable/disable emitters. Emitter selection:
///
/// - `0`: disable all emitters
/// - `1`: enable laser
/// - `2`: enable auto laser
/// - `3`: enable LED
EmitterEnabled = sys::rs2_option_RS2_OPTION_EMITTER_ENABLED as i32,
/// Set the number of frames the user is allowed to keep per stream.
/// Trying to hold on to more frames will cause frame drops.
FramesQueueSize = sys::rs2_option_RS2_OPTION_FRAMES_QUEUE_SIZE as i32,
/// Get the total number of detected frame drops from all streams.
TotalFrameDrops = sys::rs2_option_RS2_OPTION_TOTAL_FRAME_DROPS as i32,
/// Set the auto-exposure mode:
///
/// - Static
/// - Anti-Flicker
/// - Hybrid
AutoExposureMode = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_MODE as i32,
/// Set the power line frequency control for anti-flickering:
///
/// - Off
/// - 50Hz
/// - 60Hz
/// - Auto
PowerLineFrequency = sys::rs2_option_RS2_OPTION_POWER_LINE_FREQUENCY as i32,
/// Get the current Temperature of the ASIC.
AsicTemperature = sys::rs2_option_RS2_OPTION_ASIC_TEMPERATURE as i32,
/// Enable/disable error handling.
ErrorPollingEnabled = sys::rs2_option_RS2_OPTION_ERROR_POLLING_ENABLED as i32,
/// Get the Current Temperature of the projector.
ProjectorTemperature = sys::rs2_option_RS2_OPTION_PROJECTOR_TEMPERATURE as i32,
/// Enable/disable trigger to be outputed from the camera to any external device on
/// every depth frame.
OutputTriggerEnabled = sys::rs2_option_RS2_OPTION_OUTPUT_TRIGGER_ENABLED as i32,
/// Get the current Motion-Module Temperature.
MotionModuleTemperature = sys::rs2_option_RS2_OPTION_MOTION_MODULE_TEMPERATURE as i32,
/// Set the number of meters represented by a single depth unit.
DepthUnits = sys::rs2_option_RS2_OPTION_DEPTH_UNITS as i32,
/// Enable/Disable automatic correction of the motion data.
EnableMotionCorrection = sys::rs2_option_RS2_OPTION_ENABLE_MOTION_CORRECTION as i32,
/// Allows sensor to dynamically ajust the frame rate depending on lighting conditions.
AutoExposurePriority = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_PRIORITY as i32,
/// Set the color scheme for data visualization.
ColorScheme = sys::rs2_option_RS2_OPTION_COLOR_SCHEME as i32,
/// Enable/disable histogram equalization post-processing on the depth data.
HistogramEqualizationEnabled = sys::rs2_option_RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED as i32,
/// Set the Minimal distance to the target.
MinDistance = sys::rs2_option_RS2_OPTION_MIN_DISTANCE as i32,
/// Set the Maximum distance to the target.
MaxDistance = sys::rs2_option_RS2_OPTION_MAX_DISTANCE as i32,
/// Get the texture mapping stream unique ID.
TextureSource = sys::rs2_option_RS2_OPTION_TEXTURE_SOURCE as i32,
/// Set the 2D-filter effect. The specific interpretation is given within the context of the filter.
FilterMagnitude = sys::rs2_option_RS2_OPTION_FILTER_MAGNITUDE as i32,
/// Set the 2D-filter parameter that controls the weight/radius for smoothing.
FilterSmoothAlpha = sys::rs2_option_RS2_OPTION_FILTER_SMOOTH_ALPHA as i32,
/// Set the 2D-filter range/validity threshold.
FilterSmoothDelta = sys::rs2_option_RS2_OPTION_FILTER_SMOOTH_DELTA as i32,
/// Enhance depth data post-processing with holes filling where appropriate.
HolesFill = sys::rs2_option_RS2_OPTION_HOLES_FILL as i32,
/// Get the distance in mm between the first and the second imagers in stereo-based depth cameras.
StereoBaseline = sys::rs2_option_RS2_OPTION_STEREO_BASELINE as i32,
/// Allows dynamically ajust the converge step value of the target exposure in
/// the Auto-Exposure algorithm.
AutoExposureConvergeStep = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_CONVERGE_STEP as i32,
/// Impose Inter-camera HW synchronization mode. Applicable for D400/L500/Rolling Shutter SKUs.
InterCamSyncMode = sys::rs2_option_RS2_OPTION_INTER_CAM_SYNC_MODE as i32,
/// Select a stream to process.
StreamFilter = sys::rs2_option_RS2_OPTION_STREAM_FILTER as i32,
/// Select a stream format to process.
StreamFormatFilter = sys::rs2_option_RS2_OPTION_STREAM_FORMAT_FILTER as i32,
/// Select a stream index to process.
StreamIndexFilter = sys::rs2_option_RS2_OPTION_STREAM_INDEX_FILTER as i32,
/// When supported, this option make the camera to switch the emitter state every frame.
/// 0 for disabled, 1 for enabled.
EmitterOnOff = sys::rs2_option_RS2_OPTION_EMITTER_ON_OFF as i32,
/// Get the LDD temperature.
LldTemperature = sys::rs2_option_RS2_OPTION_LLD_TEMPERATURE as i32,
/// Get the MC temperature.
McTemperature = sys::rs2_option_RS2_OPTION_MC_TEMPERATURE as i32,
/// Get the MA temperature.
MaTemperature = sys::rs2_option_RS2_OPTION_MA_TEMPERATURE as i32,
/// Hardware stream configuration.
HardwarePreset = sys::rs2_option_RS2_OPTION_HARDWARE_PRESET as i32,
/// Enable/disable global time.
GlobalTimeEnabled = sys::rs2_option_RS2_OPTION_GLOBAL_TIME_ENABLED as i32,
/// Get the APD temperature.
ApdTemperature = sys::rs2_option_RS2_OPTION_APD_TEMPERATURE as i32,
/// Enable/disable an internal map.
EnableMapping = sys::rs2_option_RS2_OPTION_ENABLE_MAPPING as i32,
/// Enable/disable appearance-based relocalization.
EnableRelocalization = sys::rs2_option_RS2_OPTION_ENABLE_RELOCALIZATION as i32,
/// Enable/disable position jumping.
EnablePoseJumping = sys::rs2_option_RS2_OPTION_ENABLE_POSE_JUMPING as i32,
/// Enable/disable dynamic calibration.
EnableDynamicCalibration = sys::rs2_option_RS2_OPTION_ENABLE_DYNAMIC_CALIBRATION as i32,
/// Get the offset from sensor to depth origin in millimeters.
DepthOffset = sys::rs2_option_RS2_OPTION_DEPTH_OFFSET as i32,
/// Set the power of the LED (light emitting diode), with 0 meaning off
LedPower = sys::rs2_option_RS2_OPTION_LED_POWER as i32,
/// Preserve the previous map when starting.
EnableMapPreservation = sys::rs2_option_RS2_OPTION_ENABLE_MAP_PRESERVATION as i32,
/// Enable/disable sensor shutdown when a free-fall is detected (on by default).
FreefallDetectionEnabled = sys::rs2_option_RS2_OPTION_FREEFALL_DETECTION_ENABLED as i32,
/// Changes the exposure time of Avalanche Photo Diode in the receiver.
AvalanchePhotoDiode = sys::rs2_option_RS2_OPTION_AVALANCHE_PHOTO_DIODE as i32,
/// Changes the amount of sharpening in the post-processed image.
PostProcessingSharpening = sys::rs2_option_RS2_OPTION_POST_PROCESSING_SHARPENING as i32,
/// Changes the amount of sharpening in the pre-processed image.
PreProcessingSharpening = sys::rs2_option_RS2_OPTION_PRE_PROCESSING_SHARPENING as i32,
/// Control edges and background noise.
NoiseFiltering = sys::rs2_option_RS2_OPTION_NOISE_FILTERING as i32,
/// Enable/disable pixel invalidation.
InvalidationBypass = sys::rs2_option_RS2_OPTION_INVALIDATION_BYPASS as i32,
/// Change the depth digital gain see rs2_digital_gain for values.
DigitalGain = sys::rs2_option_RS2_OPTION_DIGITAL_GAIN as i32,
/// The resolution mode: see rs2_sensor_mode for values.
SensoeMode = sys::rs2_option_RS2_OPTION_SENSOR_MODE as i32,
/// Enable/disable Laser On constantly (GS SKU Only).
EmitterAlwaysOn = sys::rs2_option_RS2_OPTION_EMITTER_ALWAYS_ON as i32,
/// Depth Thermal Compensation for selected D400 SKUs.
ThermalCompensation = sys::rs2_option_RS2_OPTION_THERMAL_COMPENSATION as i32,
/// Set host performance mode to optimize device settings so host can keep up with workload.
/// Take USB transaction granularity as an example. Setting option to low performance host leads
/// to larger USB transaction sizes and a reduced number of transactions. This improves performance
/// and stability if the host machine is relatively weak compared to the workload.
HostPerformance = sys::rs2_option_RS2_OPTION_HOST_PERFORMANCE as i32,
/// Enable/disable HDR.
HdrEnabled = sys::rs2_option_RS2_OPTION_HDR_ENABLED as i32,
/// Get HDR Sequence name.
SequenceName = sys::rs2_option_RS2_OPTION_SEQUENCE_NAME as i32,
/// Get HDR Sequence size.
SequenceSize = sys::rs2_option_RS2_OPTION_SEQUENCE_SIZE as i32,
/// Get HDR Sequence ID - 0 is not HDR; sequence ID for HDR configuration starts from 1.
SequenceId = sys::rs2_option_RS2_OPTION_SEQUENCE_ID as i32,
/// Get Humidity temperature [in Celsius].
HumidityTemperature = sys::rs2_option_RS2_OPTION_HUMIDITY_TEMPERATURE as i32,
/// Enable/disable the maximum usable depth sensor range given the amount of ambient light in the scene.
EnableMaxUsableRange = sys::rs2_option_RS2_OPTION_ENABLE_MAX_USABLE_RANGE as i32,
/// Enable/disable the alternate IR, When enabling alternate IR, the IR image is holding the amplitude of the depth correlation.
AlternateIr = sys::rs2_option_RS2_OPTION_ALTERNATE_IR as i32,
/// Get an estimation of the noise on the IR image.
NoiseEstimation = sys::rs2_option_RS2_OPTION_NOISE_ESTIMATION as i32,
/// Enable/disable data collection for calculating IR pixel reflectivity.
EnableIrReflectivity = sys::rs2_option_RS2_OPTION_ENABLE_IR_REFLECTIVITY as i32,
/// Auto exposure limit in microseconds.
///
/// Default is 0 which means full exposure range. If the requested exposure limit is greater
/// than frame time, it will be set to frame time at runtime. Setting will not take effect
/// until next streaming session.
AutoExposureLimit = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_LIMIT as i32,
/// Auto gain limits ranging from 16 to 248.
///
/// Default is 0 which means full gain. If the requested gain limit is less than 16, it will be
/// set to 16. If the requested gain limit is greater than 248, it will be set to 248. Setting
/// will not take effect until next streaming session.
AutoGainLimit = sys::rs2_option_RS2_OPTION_AUTO_GAIN_LIMIT as i32,
/// Enable receiver sensitivity according to ambient light, bounded by the Receiver GAin
/// control.
AutoReceiverSensitivity = sys::rs2_option_RS2_OPTION_AUTO_RX_SENSITIVITY as i32,
/// Changes the transmistter frequency frequencies increasing effective range over sharpness.
TransmitterFrequency = sys::rs2_option_RS2_OPTION_TRANSMITTER_FREQUENCY as i32,
/* Not included since this just tells us the total number of options.
*
* Count = sys::rs2_option_RS2_OPTION_COUNT, */
}
impl Rs2Option {
/// Get the option as a CStr.
pub fn to_cstr(self) -> &'static CStr {
unsafe {
let ptr = sys::rs2_option_to_string(self as sys::rs2_option);
CStr::from_ptr(ptr)
}
}
/// Get the option as a str.
pub fn to_str(self) -> &'static str {
self.to_cstr().to_str().unwrap()
}
}
impl ToString for Rs2Option {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// The range of available values of a supported option.
pub struct Rs2OptionRange {
/// The minimum value which will be accepted for this option
pub min: f32,
/// The maximum value which will be accepted for this option
pub max: f32,
/// The granularity of options which accept discrete values, or zero if the option accepts
/// continuous values
pub step: f32,
/// The default value of the option
pub default: f32,
}
#[cfg(test)]
mod tests {
use super::*;
use num_traits::FromPrimitive;
#[test]
fn all_variants_exist() {
let deprecated_options = vec![
sys::rs2_option_RS2_OPTION_ZERO_ORDER_POINT_X as i32,
sys::rs2_option_RS2_OPTION_ZERO_ORDER_POINT_Y as i32,
sys::rs2_option_RS2_OPTION_ZERO_ORDER_ENABLED as i32,
sys::rs2_option_RS2_OPTION_AMBIENT_LIGHT as i32,
sys::rs2_option_RS2_OPTION_TRIGGER_CAMERA_ACCURACY_HEALTH as i32,
sys::rs2_option_RS2_OPTION_RESET_CAMERA_ACCURACY_HEALTH as i32,
];
for i in 0..sys::rs2_option_RS2_OPTION_COUNT as i32 {
if deprecated_options.iter().any(|x| x == &i) {
continue;
}
assert!(
Rs2Option::from_i32(i).is_some(),
"Rs2Option variant for ordinal {} does not exist.",
i,
);
}
}
}
| OptionSetError | identifier_name |
option.rs | //! Enumeration of RS2 sensor options.
//!
//! Not all options apply to every sensor. In order to retrieve the correct options,
//! one must iterate over the `sensor` object for option compatibility.
//!
//! Notice that this option refers to the `sensor`, not the device. However, the device
//! used also matters; sensors that are alike across devices are not guaranteed to share
//! the same sensor options. Again, it is up to the user to query whether an option
//! is supported by the sensor before attempting to set it. Failure to do so may cause
//! an error in operation.
use super::Rs2Exception;
use num_derive::{FromPrimitive, ToPrimitive};
use realsense_sys as sys;
use std::ffi::CStr;
use thiserror::Error;
/// Occur when an option cannot be set.
#[derive(Error, Debug)]
pub enum OptionSetError {
/// The requested option is not supported by this sensor.
#[error("Option not supported on this sensor.")]
OptionNotSupported,
/// The requested option is read-only and cannot be set.
#[error("Option is read only.")]
OptionIsReadOnly,
/// The requested option could not be set. Reason is reported by the sensor.
#[error("Could not set option. Type: {0}; Reason: {1}")]
CouldNotSetOption(Rs2Exception, String),
}
/// The enumeration of options available in the RealSense SDK.
///
/// The majority of the options presented have a specific range of valid values. Run
/// `sensor.get_option_range(Rs2Option::_)` to retrieve possible values of an Option type for your sensor.
/// Setting a bad value will lead to a no-op at best, and a malfunction at worst.
///
/// # Deprecated Options
///
/// `AmbientLight`
///
/// - Equivalent to `RS2_OPTION_AMBIENT_LIGHT`
/// - Replacement: [Rs2Option::DigitalGain].
/// - Old Description: "Change the depth ambient light see rs2_ambient_light for values".
///
/// `ZeroOrderEnabled`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_ENABLED`
/// - Replacement: N/A.
/// - Old Description: "Toggle Zero-Order mode."
///
/// `ZeroOrderPointX`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_POINT_X`
/// - Replacement: N/A.
/// - Old Description: "Get the Zero order point x."
///
/// `ZeroOrderPointY`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_POINT_Y`
/// - Replacement: N/A.
/// - Old Description: "Get the Zero order point y."
///
/// `Trigger camera accuracy health`
///
/// - Deprecated as of 2.46 (not officially released, so technically 2.47)
/// - Old Description: "Enable Depth & color frame sync with periodic calibration for proper
/// alignment"
///
/// `Reset camera accuracy health`
///
/// - Deprecated as of 2.46 (not officially released, so technically 2.47)
/// - Old Description: "Reset Camera Accuracy metric (if affected by TriggerCameraAccuracyHealth
/// option)."
#[repr(i32)]
#[derive(FromPrimitive, ToPrimitive, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Rs2Option {
/// Enable/disable color backlight compensation.
BacklightCompensation = sys::rs2_option_RS2_OPTION_BACKLIGHT_COMPENSATION as i32,
/// Set color image brightness.
Brightness = sys::rs2_option_RS2_OPTION_BRIGHTNESS as i32,
/// Set color image contrast.
Contrast = sys::rs2_option_RS2_OPTION_CONTRAST as i32,
/// Set exposure time of color camera. Setting any value will disable auto exposure.
Exposure = sys::rs2_option_RS2_OPTION_EXPOSURE as i32,
/// Set color image gain.
Gain = sys::rs2_option_RS2_OPTION_GAIN as i32,
/// Set color image gamma setting.
Gamma = sys::rs2_option_RS2_OPTION_GAMMA as i32,
/// Set color image hue.
Hue = sys::rs2_option_RS2_OPTION_HUE as i32,
/// Set color image saturation.
Saturation = sys::rs2_option_RS2_OPTION_SATURATION as i32,
/// Set color image sharpness.
Sharpness = sys::rs2_option_RS2_OPTION_SHARPNESS as i32,
/// Set white balance of color image. Setting any value will disable auto white balance.
WhiteBalance = sys::rs2_option_RS2_OPTION_WHITE_BALANCE as i32,
/// Enable/disable color image auto-exposure.
EnableAutoExposure = sys::rs2_option_RS2_OPTION_ENABLE_AUTO_EXPOSURE as i32,
/// Enable/disable color image auto-white-balance
EnableAutoWhiteBalance = sys::rs2_option_RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE as i32,
/// Set the visual preset on the sensor. `sensor.get_option_range()` provides
/// access to several recommend sets of option presets for a depth camera. The preset
/// selection varies between devices and sensors.
VisualPreset = sys::rs2_option_RS2_OPTION_VISUAL_PRESET as i32,
/// Set the power of the laser emitter, with 0 meaning projector off.
LaserPower = sys::rs2_option_RS2_OPTION_LASER_POWER as i32,
/// Set the number of patterns projected per frame. The higher the accuracy value,
/// the more patterns projected. Increasing the number of patterns helps to achieve
/// better accuracy. Note that this control affects Depth FPS.
Accuracy = sys::rs2_option_RS2_OPTION_ACCURACY as i32,
/// Set the motion vs. range trade-off. Lower values allow for better motion sensitivity.
/// Higher values allow for better depth range.
MotionRange = sys::rs2_option_RS2_OPTION_MOTION_RANGE as i32,
/// Set the filter to apply to each depth frame. Each one of the filter is optimized per the
/// application requirements.
FilterOption = sys::rs2_option_RS2_OPTION_FILTER_OPTION as i32,
/// Set the confidence level threshold used by the Depth algorithm pipe.
/// This determines whether a pixel will get a valid range or will be marked as invalid.
ConfidenceThreshold = sys::rs2_option_RS2_OPTION_CONFIDENCE_THRESHOLD as i32,
/// Enable/disable emitters. Emitter selection:
///
/// - `0`: disable all emitters
/// - `1`: enable laser
/// - `2`: enable auto laser
/// - `3`: enable LED
EmitterEnabled = sys::rs2_option_RS2_OPTION_EMITTER_ENABLED as i32,
/// Set the number of frames the user is allowed to keep per stream.
/// Trying to hold on to more frames will cause frame drops.
FramesQueueSize = sys::rs2_option_RS2_OPTION_FRAMES_QUEUE_SIZE as i32,
/// Get the total number of detected frame drops from all streams.
TotalFrameDrops = sys::rs2_option_RS2_OPTION_TOTAL_FRAME_DROPS as i32,
/// Set the auto-exposure mode:
///
/// - Static
/// - Anti-Flicker
/// - Hybrid
AutoExposureMode = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_MODE as i32,
/// Set the power line frequency control for anti-flickering:
///
/// - Off
/// - 50Hz
/// - 60Hz
/// - Auto
PowerLineFrequency = sys::rs2_option_RS2_OPTION_POWER_LINE_FREQUENCY as i32,
/// Get the current Temperature of the ASIC.
AsicTemperature = sys::rs2_option_RS2_OPTION_ASIC_TEMPERATURE as i32,
/// Enable/disable error handling.
ErrorPollingEnabled = sys::rs2_option_RS2_OPTION_ERROR_POLLING_ENABLED as i32,
/// Get the Current Temperature of the projector.
ProjectorTemperature = sys::rs2_option_RS2_OPTION_PROJECTOR_TEMPERATURE as i32,
/// Enable/disable trigger to be outputed from the camera to any external device on
/// every depth frame.
OutputTriggerEnabled = sys::rs2_option_RS2_OPTION_OUTPUT_TRIGGER_ENABLED as i32,
/// Get the current Motion-Module Temperature.
MotionModuleTemperature = sys::rs2_option_RS2_OPTION_MOTION_MODULE_TEMPERATURE as i32,
/// Set the number of meters represented by a single depth unit.
DepthUnits = sys::rs2_option_RS2_OPTION_DEPTH_UNITS as i32,
/// Enable/Disable automatic correction of the motion data.
EnableMotionCorrection = sys::rs2_option_RS2_OPTION_ENABLE_MOTION_CORRECTION as i32,
/// Allows sensor to dynamically ajust the frame rate depending on lighting conditions.
AutoExposurePriority = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_PRIORITY as i32,
/// Set the color scheme for data visualization.
ColorScheme = sys::rs2_option_RS2_OPTION_COLOR_SCHEME as i32,
/// Enable/disable histogram equalization post-processing on the depth data.
HistogramEqualizationEnabled = sys::rs2_option_RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED as i32,
/// Set the Minimal distance to the target.
MinDistance = sys::rs2_option_RS2_OPTION_MIN_DISTANCE as i32,
/// Set the Maximum distance to the target.
MaxDistance = sys::rs2_option_RS2_OPTION_MAX_DISTANCE as i32,
/// Get the texture mapping stream unique ID.
TextureSource = sys::rs2_option_RS2_OPTION_TEXTURE_SOURCE as i32,
/// Set the 2D-filter effect. The specific interpretation is given within the context of the filter.
FilterMagnitude = sys::rs2_option_RS2_OPTION_FILTER_MAGNITUDE as i32,
/// Set the 2D-filter parameter that controls the weight/radius for smoothing.
FilterSmoothAlpha = sys::rs2_option_RS2_OPTION_FILTER_SMOOTH_ALPHA as i32,
/// Set the 2D-filter range/validity threshold.
FilterSmoothDelta = sys::rs2_option_RS2_OPTION_FILTER_SMOOTH_DELTA as i32,
/// Enhance depth data post-processing with holes filling where appropriate.
HolesFill = sys::rs2_option_RS2_OPTION_HOLES_FILL as i32,
/// Get the distance in mm between the first and the second imagers in stereo-based depth cameras.
StereoBaseline = sys::rs2_option_RS2_OPTION_STEREO_BASELINE as i32,
/// Allows dynamically ajust the converge step value of the target exposure in
/// the Auto-Exposure algorithm.
AutoExposureConvergeStep = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_CONVERGE_STEP as i32,
/// Impose Inter-camera HW synchronization mode. Applicable for D400/L500/Rolling Shutter SKUs.
InterCamSyncMode = sys::rs2_option_RS2_OPTION_INTER_CAM_SYNC_MODE as i32,
/// Select a stream to process.
StreamFilter = sys::rs2_option_RS2_OPTION_STREAM_FILTER as i32,
/// Select a stream format to process.
StreamFormatFilter = sys::rs2_option_RS2_OPTION_STREAM_FORMAT_FILTER as i32,
/// Select a stream index to process.
StreamIndexFilter = sys::rs2_option_RS2_OPTION_STREAM_INDEX_FILTER as i32,
/// When supported, this option make the camera to switch the emitter state every frame.
/// 0 for disabled, 1 for enabled.
EmitterOnOff = sys::rs2_option_RS2_OPTION_EMITTER_ON_OFF as i32,
/// Get the LDD temperature.
LldTemperature = sys::rs2_option_RS2_OPTION_LLD_TEMPERATURE as i32,
/// Get the MC temperature.
McTemperature = sys::rs2_option_RS2_OPTION_MC_TEMPERATURE as i32,
/// Get the MA temperature.
MaTemperature = sys::rs2_option_RS2_OPTION_MA_TEMPERATURE as i32,
/// Hardware stream configuration.
HardwarePreset = sys::rs2_option_RS2_OPTION_HARDWARE_PRESET as i32,
/// Enable/disable global time.
GlobalTimeEnabled = sys::rs2_option_RS2_OPTION_GLOBAL_TIME_ENABLED as i32,
/// Get the APD temperature.
ApdTemperature = sys::rs2_option_RS2_OPTION_APD_TEMPERATURE as i32,
/// Enable/disable an internal map.
EnableMapping = sys::rs2_option_RS2_OPTION_ENABLE_MAPPING as i32,
/// Enable/disable appearance-based relocalization.
EnableRelocalization = sys::rs2_option_RS2_OPTION_ENABLE_RELOCALIZATION as i32,
/// Enable/disable position jumping.
EnablePoseJumping = sys::rs2_option_RS2_OPTION_ENABLE_POSE_JUMPING as i32,
/// Enable/disable dynamic calibration.
EnableDynamicCalibration = sys::rs2_option_RS2_OPTION_ENABLE_DYNAMIC_CALIBRATION as i32,
/// Get the offset from sensor to depth origin in millimeters.
DepthOffset = sys::rs2_option_RS2_OPTION_DEPTH_OFFSET as i32,
/// Set the power of the LED (light emitting diode), with 0 meaning off
LedPower = sys::rs2_option_RS2_OPTION_LED_POWER as i32,
/// Preserve the previous map when starting.
EnableMapPreservation = sys::rs2_option_RS2_OPTION_ENABLE_MAP_PRESERVATION as i32,
/// Enable/disable sensor shutdown when a free-fall is detected (on by default).
FreefallDetectionEnabled = sys::rs2_option_RS2_OPTION_FREEFALL_DETECTION_ENABLED as i32,
/// Changes the exposure time of Avalanche Photo Diode in the receiver.
AvalanchePhotoDiode = sys::rs2_option_RS2_OPTION_AVALANCHE_PHOTO_DIODE as i32,
/// Changes the amount of sharpening in the post-processed image.
PostProcessingSharpening = sys::rs2_option_RS2_OPTION_POST_PROCESSING_SHARPENING as i32,
/// Changes the amount of sharpening in the pre-processed image.
PreProcessingSharpening = sys::rs2_option_RS2_OPTION_PRE_PROCESSING_SHARPENING as i32,
/// Control edges and background noise.
NoiseFiltering = sys::rs2_option_RS2_OPTION_NOISE_FILTERING as i32,
/// Enable/disable pixel invalidation.
InvalidationBypass = sys::rs2_option_RS2_OPTION_INVALIDATION_BYPASS as i32,
/// Change the depth digital gain see rs2_digital_gain for values.
DigitalGain = sys::rs2_option_RS2_OPTION_DIGITAL_GAIN as i32,
/// The resolution mode: see rs2_sensor_mode for values.
SensoeMode = sys::rs2_option_RS2_OPTION_SENSOR_MODE as i32,
/// Enable/disable Laser On constantly (GS SKU Only).
EmitterAlwaysOn = sys::rs2_option_RS2_OPTION_EMITTER_ALWAYS_ON as i32,
/// Depth Thermal Compensation for selected D400 SKUs.
ThermalCompensation = sys::rs2_option_RS2_OPTION_THERMAL_COMPENSATION as i32,
/// Set host performance mode to optimize device settings so host can keep up with workload.
/// Take USB transaction granularity as an example. Setting option to low performance host leads
/// to larger USB transaction sizes and a reduced number of transactions. This improves performance
/// and stability if the host machine is relatively weak compared to the workload.
HostPerformance = sys::rs2_option_RS2_OPTION_HOST_PERFORMANCE as i32,
/// Enable/disable HDR.
HdrEnabled = sys::rs2_option_RS2_OPTION_HDR_ENABLED as i32,
/// Get HDR Sequence name.
SequenceName = sys::rs2_option_RS2_OPTION_SEQUENCE_NAME as i32,
/// Get HDR Sequence size.
SequenceSize = sys::rs2_option_RS2_OPTION_SEQUENCE_SIZE as i32,
/// Get HDR Sequence ID - 0 is not HDR; sequence ID for HDR configuration starts from 1.
SequenceId = sys::rs2_option_RS2_OPTION_SEQUENCE_ID as i32,
/// Get Humidity temperature [in Celsius].
HumidityTemperature = sys::rs2_option_RS2_OPTION_HUMIDITY_TEMPERATURE as i32, | NoiseEstimation = sys::rs2_option_RS2_OPTION_NOISE_ESTIMATION as i32,
/// Enable/disable data collection for calculating IR pixel reflectivity.
EnableIrReflectivity = sys::rs2_option_RS2_OPTION_ENABLE_IR_REFLECTIVITY as i32,
/// Auto exposure limit in microseconds.
///
/// Default is 0 which means full exposure range. If the requested exposure limit is greater
/// than frame time, it will be set to frame time at runtime. Setting will not take effect
/// until next streaming session.
AutoExposureLimit = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_LIMIT as i32,
/// Auto gain limits ranging from 16 to 248.
///
/// Default is 0 which means full gain. If the requested gain limit is less than 16, it will be
/// set to 16. If the requested gain limit is greater than 248, it will be set to 248. Setting
/// will not take effect until next streaming session.
AutoGainLimit = sys::rs2_option_RS2_OPTION_AUTO_GAIN_LIMIT as i32,
/// Enable receiver sensitivity according to ambient light, bounded by the Receiver GAin
/// control.
AutoReceiverSensitivity = sys::rs2_option_RS2_OPTION_AUTO_RX_SENSITIVITY as i32,
/// Changes the transmistter frequency frequencies increasing effective range over sharpness.
TransmitterFrequency = sys::rs2_option_RS2_OPTION_TRANSMITTER_FREQUENCY as i32,
/* Not included since this just tells us the total number of options.
*
* Count = sys::rs2_option_RS2_OPTION_COUNT, */
}
impl Rs2Option {
/// Get the option as a CStr.
pub fn to_cstr(self) -> &'static CStr {
unsafe {
let ptr = sys::rs2_option_to_string(self as sys::rs2_option);
CStr::from_ptr(ptr)
}
}
/// Get the option as a str.
pub fn to_str(self) -> &'static str {
self.to_cstr().to_str().unwrap()
}
}
impl ToString for Rs2Option {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// The range of available values of a supported option.
pub struct Rs2OptionRange {
/// The minimum value which will be accepted for this option
pub min: f32,
/// The maximum value which will be accepted for this option
pub max: f32,
/// The granularity of options which accept discrete values, or zero if the option accepts
/// continuous values
pub step: f32,
/// The default value of the option
pub default: f32,
}
#[cfg(test)]
mod tests {
use super::*;
use num_traits::FromPrimitive;
#[test]
fn all_variants_exist() {
let deprecated_options = vec![
sys::rs2_option_RS2_OPTION_ZERO_ORDER_POINT_X as i32,
sys::rs2_option_RS2_OPTION_ZERO_ORDER_POINT_Y as i32,
sys::rs2_option_RS2_OPTION_ZERO_ORDER_ENABLED as i32,
sys::rs2_option_RS2_OPTION_AMBIENT_LIGHT as i32,
sys::rs2_option_RS2_OPTION_TRIGGER_CAMERA_ACCURACY_HEALTH as i32,
sys::rs2_option_RS2_OPTION_RESET_CAMERA_ACCURACY_HEALTH as i32,
];
for i in 0..sys::rs2_option_RS2_OPTION_COUNT as i32 {
if deprecated_options.iter().any(|x| x == &i) {
continue;
}
assert!(
Rs2Option::from_i32(i).is_some(),
"Rs2Option variant for ordinal {} does not exist.",
i,
);
}
}
} | /// Enable/disable the maximum usable depth sensor range given the amount of ambient light in the scene.
EnableMaxUsableRange = sys::rs2_option_RS2_OPTION_ENABLE_MAX_USABLE_RANGE as i32,
/// Enable/disable the alternate IR, When enabling alternate IR, the IR image is holding the amplitude of the depth correlation.
AlternateIr = sys::rs2_option_RS2_OPTION_ALTERNATE_IR as i32,
/// Get an estimation of the noise on the IR image. | random_line_split |
option.rs | //! Enumeration of RS2 sensor options.
//!
//! Not all options apply to every sensor. In order to retrieve the correct options,
//! one must iterate over the `sensor` object for option compatibility.
//!
//! Notice that this option refers to the `sensor`, not the device. However, the device
//! used also matters; sensors that are alike across devices are not guaranteed to share
//! the same sensor options. Again, it is up to the user to query whether an option
//! is supported by the sensor before attempting to set it. Failure to do so may cause
//! an error in operation.
use super::Rs2Exception;
use num_derive::{FromPrimitive, ToPrimitive};
use realsense_sys as sys;
use std::ffi::CStr;
use thiserror::Error;
/// Occur when an option cannot be set.
#[derive(Error, Debug)]
pub enum OptionSetError {
/// The requested option is not supported by this sensor.
#[error("Option not supported on this sensor.")]
OptionNotSupported,
/// The requested option is read-only and cannot be set.
#[error("Option is read only.")]
OptionIsReadOnly,
/// The requested option could not be set. Reason is reported by the sensor.
#[error("Could not set option. Type: {0}; Reason: {1}")]
CouldNotSetOption(Rs2Exception, String),
}
/// The enumeration of options available in the RealSense SDK.
///
/// The majority of the options presented have a specific range of valid values. Run
/// `sensor.get_option_range(Rs2Option::_)` to retrieve possible values of an Option type for your sensor.
/// Setting a bad value will lead to a no-op at best, and a malfunction at worst.
///
/// # Deprecated Options
///
/// `AmbientLight`
///
/// - Equivalent to `RS2_OPTION_AMBIENT_LIGHT`
/// - Replacement: [Rs2Option::DigitalGain].
/// - Old Description: "Change the depth ambient light see rs2_ambient_light for values".
///
/// `ZeroOrderEnabled`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_ENABLED`
/// - Replacement: N/A.
/// - Old Description: "Toggle Zero-Order mode."
///
/// `ZeroOrderPointX`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_POINT_X`
/// - Replacement: N/A.
/// - Old Description: "Get the Zero order point x."
///
/// `ZeroOrderPointY`
///
/// - Equivalent to `RS2_OPTION_ZERO_ORDER_POINT_Y`
/// - Replacement: N/A.
/// - Old Description: "Get the Zero order point y."
///
/// `Trigger camera accuracy health`
///
/// - Deprecated as of 2.46 (not officially released, so technically 2.47)
/// - Old Description: "Enable Depth & color frame sync with periodic calibration for proper
/// alignment"
///
/// `Reset camera accuracy health`
///
/// - Deprecated as of 2.46 (not officially released, so technically 2.47)
/// - Old Description: "Reset Camera Accuracy metric (if affected by TriggerCameraAccuracyHealth
/// option)."
#[repr(i32)]
#[derive(FromPrimitive, ToPrimitive, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Rs2Option {
/// Enable/disable color backlight compensation.
BacklightCompensation = sys::rs2_option_RS2_OPTION_BACKLIGHT_COMPENSATION as i32,
/// Set color image brightness.
Brightness = sys::rs2_option_RS2_OPTION_BRIGHTNESS as i32,
/// Set color image contrast.
Contrast = sys::rs2_option_RS2_OPTION_CONTRAST as i32,
/// Set exposure time of color camera. Setting any value will disable auto exposure.
Exposure = sys::rs2_option_RS2_OPTION_EXPOSURE as i32,
/// Set color image gain.
Gain = sys::rs2_option_RS2_OPTION_GAIN as i32,
/// Set color image gamma setting.
Gamma = sys::rs2_option_RS2_OPTION_GAMMA as i32,
/// Set color image hue.
Hue = sys::rs2_option_RS2_OPTION_HUE as i32,
/// Set color image saturation.
Saturation = sys::rs2_option_RS2_OPTION_SATURATION as i32,
/// Set color image sharpness.
Sharpness = sys::rs2_option_RS2_OPTION_SHARPNESS as i32,
/// Set white balance of color image. Setting any value will disable auto white balance.
WhiteBalance = sys::rs2_option_RS2_OPTION_WHITE_BALANCE as i32,
/// Enable/disable color image auto-exposure.
EnableAutoExposure = sys::rs2_option_RS2_OPTION_ENABLE_AUTO_EXPOSURE as i32,
/// Enable/disable color image auto-white-balance
EnableAutoWhiteBalance = sys::rs2_option_RS2_OPTION_ENABLE_AUTO_WHITE_BALANCE as i32,
/// Set the visual preset on the sensor. `sensor.get_option_range()` provides
/// access to several recommend sets of option presets for a depth camera. The preset
/// selection varies between devices and sensors.
VisualPreset = sys::rs2_option_RS2_OPTION_VISUAL_PRESET as i32,
/// Set the power of the laser emitter, with 0 meaning projector off.
LaserPower = sys::rs2_option_RS2_OPTION_LASER_POWER as i32,
/// Set the number of patterns projected per frame. The higher the accuracy value,
/// the more patterns projected. Increasing the number of patterns helps to achieve
/// better accuracy. Note that this control affects Depth FPS.
Accuracy = sys::rs2_option_RS2_OPTION_ACCURACY as i32,
/// Set the motion vs. range trade-off. Lower values allow for better motion sensitivity.
/// Higher values allow for better depth range.
MotionRange = sys::rs2_option_RS2_OPTION_MOTION_RANGE as i32,
/// Set the filter to apply to each depth frame. Each one of the filter is optimized per the
/// application requirements.
FilterOption = sys::rs2_option_RS2_OPTION_FILTER_OPTION as i32,
/// Set the confidence level threshold used by the Depth algorithm pipe.
/// This determines whether a pixel will get a valid range or will be marked as invalid.
ConfidenceThreshold = sys::rs2_option_RS2_OPTION_CONFIDENCE_THRESHOLD as i32,
/// Enable/disable emitters. Emitter selection:
///
/// - `0`: disable all emitters
/// - `1`: enable laser
/// - `2`: enable auto laser
/// - `3`: enable LED
EmitterEnabled = sys::rs2_option_RS2_OPTION_EMITTER_ENABLED as i32,
/// Set the number of frames the user is allowed to keep per stream.
/// Trying to hold on to more frames will cause frame drops.
FramesQueueSize = sys::rs2_option_RS2_OPTION_FRAMES_QUEUE_SIZE as i32,
/// Get the total number of detected frame drops from all streams.
TotalFrameDrops = sys::rs2_option_RS2_OPTION_TOTAL_FRAME_DROPS as i32,
/// Set the auto-exposure mode:
///
/// - Static
/// - Anti-Flicker
/// - Hybrid
AutoExposureMode = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_MODE as i32,
/// Set the power line frequency control for anti-flickering:
///
/// - Off
/// - 50Hz
/// - 60Hz
/// - Auto
PowerLineFrequency = sys::rs2_option_RS2_OPTION_POWER_LINE_FREQUENCY as i32,
/// Get the current Temperature of the ASIC.
AsicTemperature = sys::rs2_option_RS2_OPTION_ASIC_TEMPERATURE as i32,
/// Enable/disable error handling.
ErrorPollingEnabled = sys::rs2_option_RS2_OPTION_ERROR_POLLING_ENABLED as i32,
/// Get the Current Temperature of the projector.
ProjectorTemperature = sys::rs2_option_RS2_OPTION_PROJECTOR_TEMPERATURE as i32,
/// Enable/disable trigger to be outputed from the camera to any external device on
/// every depth frame.
OutputTriggerEnabled = sys::rs2_option_RS2_OPTION_OUTPUT_TRIGGER_ENABLED as i32,
/// Get the current Motion-Module Temperature.
MotionModuleTemperature = sys::rs2_option_RS2_OPTION_MOTION_MODULE_TEMPERATURE as i32,
/// Set the number of meters represented by a single depth unit.
DepthUnits = sys::rs2_option_RS2_OPTION_DEPTH_UNITS as i32,
/// Enable/Disable automatic correction of the motion data.
EnableMotionCorrection = sys::rs2_option_RS2_OPTION_ENABLE_MOTION_CORRECTION as i32,
/// Allows sensor to dynamically ajust the frame rate depending on lighting conditions.
AutoExposurePriority = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_PRIORITY as i32,
/// Set the color scheme for data visualization.
ColorScheme = sys::rs2_option_RS2_OPTION_COLOR_SCHEME as i32,
/// Enable/disable histogram equalization post-processing on the depth data.
HistogramEqualizationEnabled = sys::rs2_option_RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED as i32,
/// Set the Minimal distance to the target.
MinDistance = sys::rs2_option_RS2_OPTION_MIN_DISTANCE as i32,
/// Set the Maximum distance to the target.
MaxDistance = sys::rs2_option_RS2_OPTION_MAX_DISTANCE as i32,
/// Get the texture mapping stream unique ID.
TextureSource = sys::rs2_option_RS2_OPTION_TEXTURE_SOURCE as i32,
/// Set the 2D-filter effect. The specific interpretation is given within the context of the filter.
FilterMagnitude = sys::rs2_option_RS2_OPTION_FILTER_MAGNITUDE as i32,
/// Set the 2D-filter parameter that controls the weight/radius for smoothing.
FilterSmoothAlpha = sys::rs2_option_RS2_OPTION_FILTER_SMOOTH_ALPHA as i32,
/// Set the 2D-filter range/validity threshold.
FilterSmoothDelta = sys::rs2_option_RS2_OPTION_FILTER_SMOOTH_DELTA as i32,
/// Enhance depth data post-processing with holes filling where appropriate.
HolesFill = sys::rs2_option_RS2_OPTION_HOLES_FILL as i32,
/// Get the distance in mm between the first and the second imagers in stereo-based depth cameras.
StereoBaseline = sys::rs2_option_RS2_OPTION_STEREO_BASELINE as i32,
/// Allows dynamically ajust the converge step value of the target exposure in
/// the Auto-Exposure algorithm.
AutoExposureConvergeStep = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_CONVERGE_STEP as i32,
/// Impose Inter-camera HW synchronization mode. Applicable for D400/L500/Rolling Shutter SKUs.
InterCamSyncMode = sys::rs2_option_RS2_OPTION_INTER_CAM_SYNC_MODE as i32,
/// Select a stream to process.
StreamFilter = sys::rs2_option_RS2_OPTION_STREAM_FILTER as i32,
/// Select a stream format to process.
StreamFormatFilter = sys::rs2_option_RS2_OPTION_STREAM_FORMAT_FILTER as i32,
/// Select a stream index to process.
StreamIndexFilter = sys::rs2_option_RS2_OPTION_STREAM_INDEX_FILTER as i32,
/// When supported, this option make the camera to switch the emitter state every frame.
/// 0 for disabled, 1 for enabled.
EmitterOnOff = sys::rs2_option_RS2_OPTION_EMITTER_ON_OFF as i32,
/// Get the LDD temperature.
LldTemperature = sys::rs2_option_RS2_OPTION_LLD_TEMPERATURE as i32,
/// Get the MC temperature.
McTemperature = sys::rs2_option_RS2_OPTION_MC_TEMPERATURE as i32,
/// Get the MA temperature.
MaTemperature = sys::rs2_option_RS2_OPTION_MA_TEMPERATURE as i32,
/// Hardware stream configuration.
HardwarePreset = sys::rs2_option_RS2_OPTION_HARDWARE_PRESET as i32,
/// Enable/disable global time.
GlobalTimeEnabled = sys::rs2_option_RS2_OPTION_GLOBAL_TIME_ENABLED as i32,
/// Get the APD temperature.
ApdTemperature = sys::rs2_option_RS2_OPTION_APD_TEMPERATURE as i32,
/// Enable/disable an internal map.
EnableMapping = sys::rs2_option_RS2_OPTION_ENABLE_MAPPING as i32,
/// Enable/disable appearance-based relocalization.
EnableRelocalization = sys::rs2_option_RS2_OPTION_ENABLE_RELOCALIZATION as i32,
/// Enable/disable position jumping.
EnablePoseJumping = sys::rs2_option_RS2_OPTION_ENABLE_POSE_JUMPING as i32,
/// Enable/disable dynamic calibration.
EnableDynamicCalibration = sys::rs2_option_RS2_OPTION_ENABLE_DYNAMIC_CALIBRATION as i32,
/// Get the offset from sensor to depth origin in millimeters.
DepthOffset = sys::rs2_option_RS2_OPTION_DEPTH_OFFSET as i32,
/// Set the power of the LED (light emitting diode), with 0 meaning off
LedPower = sys::rs2_option_RS2_OPTION_LED_POWER as i32,
/// Preserve the previous map when starting.
EnableMapPreservation = sys::rs2_option_RS2_OPTION_ENABLE_MAP_PRESERVATION as i32,
/// Enable/disable sensor shutdown when a free-fall is detected (on by default).
FreefallDetectionEnabled = sys::rs2_option_RS2_OPTION_FREEFALL_DETECTION_ENABLED as i32,
/// Changes the exposure time of Avalanche Photo Diode in the receiver.
AvalanchePhotoDiode = sys::rs2_option_RS2_OPTION_AVALANCHE_PHOTO_DIODE as i32,
/// Changes the amount of sharpening in the post-processed image.
PostProcessingSharpening = sys::rs2_option_RS2_OPTION_POST_PROCESSING_SHARPENING as i32,
/// Changes the amount of sharpening in the pre-processed image.
PreProcessingSharpening = sys::rs2_option_RS2_OPTION_PRE_PROCESSING_SHARPENING as i32,
/// Control edges and background noise.
NoiseFiltering = sys::rs2_option_RS2_OPTION_NOISE_FILTERING as i32,
/// Enable/disable pixel invalidation.
InvalidationBypass = sys::rs2_option_RS2_OPTION_INVALIDATION_BYPASS as i32,
/// Change the depth digital gain see rs2_digital_gain for values.
DigitalGain = sys::rs2_option_RS2_OPTION_DIGITAL_GAIN as i32,
/// The resolution mode: see rs2_sensor_mode for values.
SensoeMode = sys::rs2_option_RS2_OPTION_SENSOR_MODE as i32,
/// Enable/disable Laser On constantly (GS SKU Only).
EmitterAlwaysOn = sys::rs2_option_RS2_OPTION_EMITTER_ALWAYS_ON as i32,
/// Depth Thermal Compensation for selected D400 SKUs.
ThermalCompensation = sys::rs2_option_RS2_OPTION_THERMAL_COMPENSATION as i32,
/// Set host performance mode to optimize device settings so host can keep up with workload.
/// Take USB transaction granularity as an example. Setting option to low performance host leads
/// to larger USB transaction sizes and a reduced number of transactions. This improves performance
/// and stability if the host machine is relatively weak compared to the workload.
HostPerformance = sys::rs2_option_RS2_OPTION_HOST_PERFORMANCE as i32,
/// Enable/disable HDR.
HdrEnabled = sys::rs2_option_RS2_OPTION_HDR_ENABLED as i32,
/// Get HDR Sequence name.
SequenceName = sys::rs2_option_RS2_OPTION_SEQUENCE_NAME as i32,
/// Get HDR Sequence size.
SequenceSize = sys::rs2_option_RS2_OPTION_SEQUENCE_SIZE as i32,
/// Get HDR Sequence ID - 0 is not HDR; sequence ID for HDR configuration starts from 1.
SequenceId = sys::rs2_option_RS2_OPTION_SEQUENCE_ID as i32,
/// Get Humidity temperature [in Celsius].
HumidityTemperature = sys::rs2_option_RS2_OPTION_HUMIDITY_TEMPERATURE as i32,
/// Enable/disable the maximum usable depth sensor range given the amount of ambient light in the scene.
EnableMaxUsableRange = sys::rs2_option_RS2_OPTION_ENABLE_MAX_USABLE_RANGE as i32,
/// Enable/disable the alternate IR, When enabling alternate IR, the IR image is holding the amplitude of the depth correlation.
AlternateIr = sys::rs2_option_RS2_OPTION_ALTERNATE_IR as i32,
/// Get an estimation of the noise on the IR image.
NoiseEstimation = sys::rs2_option_RS2_OPTION_NOISE_ESTIMATION as i32,
/// Enable/disable data collection for calculating IR pixel reflectivity.
EnableIrReflectivity = sys::rs2_option_RS2_OPTION_ENABLE_IR_REFLECTIVITY as i32,
/// Auto exposure limit in microseconds.
///
/// Default is 0 which means full exposure range. If the requested exposure limit is greater
/// than frame time, it will be set to frame time at runtime. Setting will not take effect
/// until next streaming session.
AutoExposureLimit = sys::rs2_option_RS2_OPTION_AUTO_EXPOSURE_LIMIT as i32,
/// Auto gain limits ranging from 16 to 248.
///
/// Default is 0 which means full gain. If the requested gain limit is less than 16, it will be
/// set to 16. If the requested gain limit is greater than 248, it will be set to 248. Setting
/// will not take effect until next streaming session.
AutoGainLimit = sys::rs2_option_RS2_OPTION_AUTO_GAIN_LIMIT as i32,
/// Enable receiver sensitivity according to ambient light, bounded by the Receiver GAin
/// control.
AutoReceiverSensitivity = sys::rs2_option_RS2_OPTION_AUTO_RX_SENSITIVITY as i32,
/// Changes the transmistter frequency frequencies increasing effective range over sharpness.
TransmitterFrequency = sys::rs2_option_RS2_OPTION_TRANSMITTER_FREQUENCY as i32,
/* Not included since this just tells us the total number of options.
*
* Count = sys::rs2_option_RS2_OPTION_COUNT, */
}
impl Rs2Option {
/// Get the option as a CStr.
pub fn to_cstr(self) -> &'static CStr {
unsafe {
let ptr = sys::rs2_option_to_string(self as sys::rs2_option);
CStr::from_ptr(ptr)
}
}
/// Get the option as a str.
pub fn to_str(self) -> &'static str {
self.to_cstr().to_str().unwrap()
}
}
impl ToString for Rs2Option {
fn to_string(&self) -> String {
self.to_str().to_owned()
}
}
/// The range of available values of a supported option.
pub struct Rs2OptionRange {
/// The minimum value which will be accepted for this option
pub min: f32,
/// The maximum value which will be accepted for this option
pub max: f32,
/// The granularity of options which accept discrete values, or zero if the option accepts
/// continuous values
pub step: f32,
/// The default value of the option
pub default: f32,
}
#[cfg(test)]
mod tests {
use super::*;
use num_traits::FromPrimitive;
#[test]
fn all_variants_exist() |
}
| {
let deprecated_options = vec![
sys::rs2_option_RS2_OPTION_ZERO_ORDER_POINT_X as i32,
sys::rs2_option_RS2_OPTION_ZERO_ORDER_POINT_Y as i32,
sys::rs2_option_RS2_OPTION_ZERO_ORDER_ENABLED as i32,
sys::rs2_option_RS2_OPTION_AMBIENT_LIGHT as i32,
sys::rs2_option_RS2_OPTION_TRIGGER_CAMERA_ACCURACY_HEALTH as i32,
sys::rs2_option_RS2_OPTION_RESET_CAMERA_ACCURACY_HEALTH as i32,
];
for i in 0..sys::rs2_option_RS2_OPTION_COUNT as i32 {
if deprecated_options.iter().any(|x| x == &i) {
continue;
}
assert!(
Rs2Option::from_i32(i).is_some(),
"Rs2Option variant for ordinal {} does not exist.",
i,
);
}
} | identifier_body |
mixture.rs | use itertools::{
Either,
EitherOrBoth::{Both, Left, Right},
Itertools,
};
use std::{
cell::Cell,
sync::atomic::{AtomicU64, Ordering::Relaxed},
};
use tinyvec::TinyVec;
use crate::reaction::ReactionIdentifier;
use super::{
constants::*, gas_visibility, total_num_gases, with_reactions, with_specific_heats, GasIDX,
};
type SpecificFireInfo = (usize, f32, f32);
struct VisHash(AtomicU64);
impl Clone for VisHash {
fn clone(&self) -> Self {
VisHash(AtomicU64::new(self.0.load(Relaxed)))
}
}
/// The data structure representing a Space Station 13 gas mixture.
/// Unlike Monstermos, this doesn't have the archive built-in; instead,
/// the archive is a feature of the turf grid, only existing during
/// turf processing.
/// Also missing is `last_share`; due to the usage of Rust,
/// processing no longer requires sleeping turfs. Instead, we're using
/// a proper, fully-simulated FDM system, much like LINDA but without
/// sleeping turfs.
#[derive(Clone)]
pub struct Mixture {
temperature: f32,
pub volume: f32,
min_heat_capacity: f32,
immutable: bool,
moles: TinyVec<[f32; 8]>,
cached_heat_capacity: Cell<Option<f32>>,
cached_vis_hash: VisHash,
}
/*
Cell is not thread-safe. However, we use it only for caching heat capacity. The worst case race condition
is thus thread A and B try to access heat capacity at the same time; both find that it's currently
uncached, so both go to calculate it; both calculate it, and both calculate it to the same value,
then one sets the cache to that value, then the other does.
Technically, a worse one would be thread A mutates the gas mixture, changing a gas amount,
while thread B tries to get its heat capacity; thread B finds a well-defined heat capacity,
which is not correct, and uses it for a calculation, but this cannot happen: thread A would
have a write lock, precluding thread B from accessing it.
*/
unsafe impl Sync for Mixture {}
impl Default for Mixture {
fn default() -> Self {
Self::new()
}
}
impl Mixture {
/// Makes an empty gas mixture.
pub fn new() -> Self {
Self {
moles: TinyVec::new(),
temperature: 2.7,
volume: 2500.0,
min_heat_capacity: 0.0,
immutable: false,
cached_heat_capacity: Cell::new(None),
cached_vis_hash: VisHash(AtomicU64::new(0)),
}
}
/// Makes an empty gas mixture with the given volume.
pub fn from_vol(vol: f32) -> Self {
let mut ret = Self::new();
ret.volume = vol;
ret
}
/// Returns if any data is corrupt.
pub fn is_corrupt(&self) -> bool {
!self.temperature.is_normal() || self.moles.len() > total_num_gases()
}
/// Fixes any corruption found.
pub fn fix_corruption(&mut self) {
self.garbage_collect();
if self.temperature < 2.7 || !self.temperature.is_normal() {
self.set_temperature(293.15);
}
}
/// Returns the temperature of the mix. T
pub fn get_temperature(&self) -> f32 {
self.temperature
}
/// Sets the temperature, if the mix isn't immutable. T
pub fn set_temperature(&mut self, temp: f32) {
if !self.immutable && temp.is_normal() {
self.temperature = temp;
}
}
/// Sets the minimum heat capacity of this mix.
pub fn set_min_heat_capacity(&mut self, amt: f32) {
self.min_heat_capacity = amt;
}
/// Returns an iterator over the gas keys and mole amounts thereof.
pub fn enumerate(&self) -> impl Iterator<Item = (GasIDX, f32)> + '_ {
self.moles.iter().copied().enumerate()
}
/// Allows closures to iterate over each gas.
pub fn for_each_gas(
&self,
mut f: impl FnMut(GasIDX, f32) -> Result<(), auxtools::Runtime>,
) -> Result<(), auxtools::Runtime> {
for (i, g) in self.enumerate() {
f(i, g)?;
}
Ok(())
}
/// Returns (by value) the amount of moles of a given index the mix has. M
pub fn get_moles(&self, idx: GasIDX) -> f32 {
self.moles.get(idx).copied().unwrap_or(0.0)
}
/// Sets the mix to be internally immutable. Rust doesn't know about any of this, obviously.
pub fn mark_immutable(&mut self) {
self.immutable = true;
}
/// Returns whether this gas mixture is immutable.
pub fn is_immutable(&self) -> bool {
self.immutable
}
fn maybe_expand(&mut self, size: usize) {
if self.moles.len() < size {
self.moles.resize(size, 0.0);
}
}
/// If mix is not immutable, sets the gas at the given `idx` to the given `amt`.
pub fn set_moles(&mut self, idx: GasIDX, amt: f32) {
if !self.immutable
&& idx < total_num_gases()
&& (idx <= self.moles.len() || (amt > GAS_MIN_MOLES && amt.is_normal()))
{
self.maybe_expand((idx + 1) as usize);
unsafe {
*self.moles.get_unchecked_mut(idx) = amt;
};
self.cached_heat_capacity.set(None);
}
}
pub fn adjust_moles(&mut self, idx: GasIDX, amt: f32) {
if !self.immutable && amt.is_normal() && idx < total_num_gases() {
self.maybe_expand((idx + 1) as usize);
let r = unsafe { self.moles.get_unchecked_mut(idx) };
*r += amt;
if amt < 0.0 {
self.garbage_collect();
}
self.cached_heat_capacity.set(None);
}
}
#[inline(never)] // mostly this makes it so that heat_capacity itself is inlined
fn slow_heat_capacity(&self) -> f32 {
let heat_cap = with_specific_heats(|heats| {
self.moles
.iter()
.copied()
.zip(heats.iter())
.fold(0.0, |acc, (amt, cap)| cap.mul_add(amt, acc))
})
.max(self.min_heat_capacity);
self.cached_heat_capacity.set(Some(heat_cap));
heat_cap
}
/// The heat capacity of the material. [joules?]/mole-kelvin.
pub fn heat_capacity(&self) -> f32 {
self.cached_heat_capacity
.get()
.filter(|cap| cap.is_finite() && cap.is_sign_positive())
.unwrap_or_else(|| self.slow_heat_capacity())
}
/// Heat capacity of exactly one gas in this mix.
pub fn partial_heat_capacity(&self, idx: GasIDX) -> f32 {
self.moles
.get(idx)
.filter(|amt| amt.is_normal())
.map_or(0.0, |amt| amt * with_specific_heats(|heats| heats[idx]))
}
/// The total mole count of the mixture. Moles.
pub fn total_moles(&self) -> f32 {
self.moles.iter().sum()
}
/// Pressure. Kilopascals.
pub fn return_pressure(&self) -> f32 {
self.total_moles() * R_IDEAL_GAS_EQUATION * self.temperature / self.volume
}
/// Thermal energy. Joules?
pub fn thermal_energy(&self) -> f32 {
self.heat_capacity() * self.temperature
}
/// Merges one gas mixture into another.
pub fn merge(&mut self, giver: &Self) {
if self.immutable {
return;
}
let our_heat_capacity = self.heat_capacity();
let other_heat_capacity = giver.heat_capacity();
self.maybe_expand(giver.moles.len());
for (a, b) in self.moles.iter_mut().zip(giver.moles.iter()) {
*a += b;
}
let combined_heat_capacity = our_heat_capacity + other_heat_capacity;
if combined_heat_capacity > MINIMUM_HEAT_CAPACITY {
self.set_temperature(
(our_heat_capacity * self.temperature + other_heat_capacity * giver.temperature)
/ (combined_heat_capacity),
);
}
self.cached_heat_capacity.set(Some(combined_heat_capacity));
}
/// Transfers only the given gases from us to another mix.
pub fn transfer_gases_to(&mut self, r: f32, gases: &[GasIDX], into: &mut Self) {
let ratio = r.clamp(0.0, 1.0);
let initial_energy = into.thermal_energy();
let mut heat_transfer = 0.0;
with_specific_heats(|heats| {
for i in gases.iter().copied() {
if let Some(orig) = self.moles.get_mut(i) {
let delta = *orig * ratio;
heat_transfer += delta * self.temperature * heats[i];
*orig -= delta;
into.adjust_moles(i, delta);
}
}
});
self.cached_heat_capacity.set(None);
into.cached_heat_capacity.set(None);
into.set_temperature((initial_energy + heat_transfer) / into.heat_capacity());
}
/// Takes a percentage of this gas mixture's moles and puts it into another mixture. if this mix is mutable, also removes those moles from the original.
pub fn remove_ratio_into(&mut self, mut ratio: f32, into: &mut Self) {
if ratio <= 0.0 {
return;
}
if ratio >= 1.0 {
ratio = 1.0;
}
let orig_temp = self.temperature;
into.copy_from_mutable(self);
into.multiply(ratio);
self.multiply(1.0 - ratio);
self.temperature = orig_temp;
into.temperature = orig_temp;
}
/// As `remove_ratio_into`, but a raw number of moles instead of a ratio.
pub fn remove_into(&mut self, amount: f32, into: &mut Self) {
self.remove_ratio_into(amount / self.total_moles(), into);
}
/// A convenience function that makes the mixture for `remove_ratio_into` on the spot and returns it.
pub fn remove_ratio(&mut self, ratio: f32) -> Self {
let mut removed = Self::from_vol(self.volume);
self.remove_ratio_into(ratio, &mut removed);
removed
}
/// Like `remove_ratio`, but with moles.
pub fn remove(&mut self, amount: f32) -> Self {
self.remove_ratio(amount / self.total_moles())
}
/// Copies from a given gas mixture, if we're mutable.
pub fn copy_from_mutable(&mut self, sample: &Self) {
if self.immutable {
return;
}
self.moles = sample.moles.clone();
self.temperature = sample.temperature;
self.cached_heat_capacity
.set(sample.cached_heat_capacity.get());
}
/// A very simple finite difference solution to the heat transfer equation.
/// Works well enough for our purposes, though perhaps called less often
/// than it ought to be while we're working in Rust.
/// Differs from the original by not using archive, since we don't put the archive into the gas mix itself anymore.
pub fn temperature_share(&mut self, sharer: &mut Self, conduction_coefficient: f32) -> f32 {
let temperature_delta = self.temperature - sharer.temperature;
if temperature_delta.abs() > MINIMUM_TEMPERATURE_DELTA_TO_CONSIDER {
let self_heat_capacity = self.heat_capacity();
let sharer_heat_capacity = sharer.heat_capacity();
if sharer_heat_capacity > MINIMUM_HEAT_CAPACITY
&& self_heat_capacity > MINIMUM_HEAT_CAPACITY
{
let heat = conduction_coefficient
* temperature_delta * (self_heat_capacity * sharer_heat_capacity
/ (self_heat_capacity + sharer_heat_capacity));
if !self.immutable {
self.set_temperature((self.temperature - heat / self_heat_capacity).max(TCMB));
}
if !sharer.immutable {
sharer.set_temperature(
(sharer.temperature + heat / sharer_heat_capacity).max(TCMB),
);
}
}
}
sharer.temperature
}
/// As above, but you may put in any arbitrary coefficient, temp, heat capacity.
/// Only used for superconductivity as of right now.
pub fn temperature_share_non_gas(
&mut self,
conduction_coefficient: f32,
sharer_temperature: f32,
sharer_heat_capacity: f32,
) -> f32 {
let temperature_delta = self.temperature - sharer_temperature;
if temperature_delta.abs() > MINIMUM_TEMPERATURE_DELTA_TO_CONSIDER {
let self_heat_capacity = self.heat_capacity();
if sharer_heat_capacity > MINIMUM_HEAT_CAPACITY
&& self_heat_capacity > MINIMUM_HEAT_CAPACITY
{
let heat = conduction_coefficient
* temperature_delta * (self_heat_capacity * sharer_heat_capacity
/ (self_heat_capacity + sharer_heat_capacity));
if !self.immutable {
self.set_temperature((self.temperature - heat / self_heat_capacity).max(TCMB));
}
return (sharer_temperature + heat / sharer_heat_capacity).max(TCMB);
}
}
sharer_temperature
}
/// The second part of old compare(). Compares temperature, but only if this gas has sufficiently high moles.
pub fn temperature_compare(&self, sample: &Self) -> bool {
(self.get_temperature() - sample.get_temperature()).abs()
> MINIMUM_TEMPERATURE_DELTA_TO_SUSPEND
&& (self.total_moles() > MINIMUM_MOLES_DELTA_TO_MOVE)
}
/// Returns the maximum mole delta for an individual gas.
pub fn compare(&self, sample: &Self) -> f32 {
self.moles
.iter()
.copied()
.zip_longest(sample.moles.iter().copied())
.fold(0.0, |acc, pair| acc.max(pair.reduce(|a, b| (b - a).abs())))
}
pub fn compare_with(&self, sample: &Self, amt: f32) -> bool {
self.moles
.as_slice()
.iter()
.zip_longest(sample.moles.as_slice().iter())
.rev()
.any(|pair| match pair {
Left(a) => a >= &amt,
Right(b) => b >= &amt,
Both(a, b) => a != b && (a - b).abs() >= amt,
})
}
/// Clears the moles from the gas.
pub fn clear(&mut self) {
if !self.immutable {
self.moles.clear();
self.cached_heat_capacity.set(None);
}
}
/// Resets the gas mixture to an initialized-with-volume state.
pub fn clear_with_vol(&mut self, vol: f32) {
self.temperature = 2.7;
self.volume = vol;
self.min_heat_capacity = 0.0;
self.immutable = false;
self.clear();
}
/// Multiplies every gas molage with this value.
pub fn multiply(&mut self, multiplier: f32) {
if !self.immutable {
for amt in self.moles.iter_mut() {
*amt *= multiplier;
}
self.cached_heat_capacity.set(None);
self.garbage_collect();
}
}
/// Checks if the proc can react with any reactions.
pub fn can_react(&self) -> bool {
with_reactions(|reactions| reactions.iter().any(|r| r.check_conditions(self))) | reactions
.iter()
.filter_map(|r| r.check_conditions(self).then(|| r.get_id()))
.collect()
})
}
/// Returns a tuple with oxidation power and fuel amount of this gas mixture.
pub fn get_burnability(&self) -> (f32, f32) {
use crate::types::FireInfo;
super::with_gas_info(|gas_info| {
self.moles
.iter()
.zip(gas_info)
.fold((0.0, 0.0), |mut acc, (&amt, this_gas_info)| {
if amt > GAS_MIN_MOLES {
match this_gas_info.fire_info {
FireInfo::Oxidation(oxidation) => {
if self.temperature > oxidation.temperature() {
let amount = amt
* (1.0 - oxidation.temperature() / self.temperature)
.max(0.0);
acc.0 += amount * oxidation.power();
}
}
FireInfo::Fuel(fire) => {
if self.temperature > fire.temperature() {
let amount = amt
* (1.0 - fire.temperature() / self.temperature).max(0.0);
acc.1 += amount / fire.burn_rate();
}
}
FireInfo::None => (),
}
}
acc
})
})
}
/// Returns only the oxidation power. Since this calculates burnability anyway, prefer `get_burnability`.
pub fn get_oxidation_power(&self) -> f32 {
self.get_burnability().0
}
/// Returns only fuel amount. Since this calculates burnability anyway, prefer `get_burnability`.
pub fn get_fuel_amount(&self) -> f32 {
self.get_burnability().1
}
/// Like `get_fire_info`, but takes a reference to a gas info vector,
/// so one doesn't need to do a recursive lock on the global list.
pub fn get_fire_info_with_lock(
&self,
gas_info: &[super::GasType],
) -> (Vec<SpecificFireInfo>, Vec<SpecificFireInfo>) {
use crate::types::FireInfo;
self.moles
.iter()
.zip(gas_info)
.enumerate()
.filter_map(|(i, (&amt, this_gas_info))| {
(amt > GAS_MIN_MOLES)
.then(|| match this_gas_info.fire_info {
FireInfo::Oxidation(oxidation) => (self.get_temperature()
> oxidation.temperature())
.then(|| {
let amount = amt
* (1.0 - oxidation.temperature() / self.get_temperature()).max(0.0);
Either::Right((i, amount, amount * oxidation.power()))
}),
FireInfo::Fuel(fuel) => {
(self.get_temperature() > fuel.temperature()).then(|| {
let amount = amt
* (1.0 - fuel.temperature() / self.get_temperature()).max(0.0);
Either::Left((i, amount, amount / fuel.burn_rate()))
})
}
FireInfo::None => None,
})
.flatten()
})
.partition_map(|r| r)
}
/// Returns two vectors:
/// The first contains all oxidizers in this list, as well as their actual mole amounts and how much fuel they can oxidize.
/// The second contains all fuel sources in this list, as well as their actual mole amounts and how much oxidizer they can react with.
pub fn get_fire_info(&self) -> (Vec<SpecificFireInfo>, Vec<SpecificFireInfo>) {
super::with_gas_info(|gas_info| self.get_fire_info_with_lock(gas_info))
}
/// Adds heat directly to the gas mixture, in joules (probably).
pub fn adjust_heat(&mut self, heat: f32) {
let cap = self.heat_capacity();
self.set_temperature(((cap * self.temperature) + heat) / cap);
}
/// Returns true if there's a visible gas in this mix.
pub fn is_visible(&self) -> bool {
self.enumerate()
.any(|(i, gas)| gas_visibility(i as usize).map_or(false, |amt| gas >= amt))
}
/// A hashed representation of the visibility of a gas, so that it only needs to update vis when actually changed.
pub fn vis_hash_changed(&self, gas_visibility: &[Option<f32>]) -> bool {
use std::hash::Hasher;
let mut hasher: ahash::AHasher = ahash::AHasher::default();
for (i, gas) in self.enumerate() {
if let Some(amt) = unsafe { gas_visibility.get_unchecked(i) }.filter(|&amt| gas >= amt)
{
hasher.write_usize(i);
hasher.write_usize((FACTOR_GAS_VISIBLE_MAX).min((gas / amt).ceil()) as usize);
}
}
let cur_hash = hasher.finish();
self.cached_vis_hash.0.swap(cur_hash, Relaxed) != cur_hash
}
// Removes all redundant zeroes from the gas mixture.
pub fn garbage_collect(&mut self) {
let mut last_valid_found = 0;
for (i, amt) in self.moles.iter_mut().enumerate() {
if *amt > GAS_MIN_MOLES {
last_valid_found = i;
} else {
*amt = 0.0;
}
}
self.moles.truncate(last_valid_found + 1);
}
}
use std::ops::{Add, Mul};
/// Takes a copy of the mix, merges the right hand side, then returns the copy.
impl Add<&Mixture> for Mixture {
type Output = Self;
fn add(self, rhs: &Mixture) -> Self {
let mut ret = self;
ret.merge(rhs);
ret
}
}
/// Takes a copy of the mix, merges the right hand side, then returns the copy.
impl<'a, 'b> Add<&'a Mixture> for &'b Mixture {
type Output = Mixture;
fn add(self, rhs: &Mixture) -> Mixture {
let mut ret = self.clone();
ret.merge(rhs);
ret
}
}
/// Makes a copy of the given mix, multiplied by a scalar.
impl Mul<f32> for Mixture {
type Output = Self;
fn mul(self, rhs: f32) -> Self {
let mut ret = self;
ret.multiply(rhs);
ret
}
}
/// Makes a copy of the given mix, multiplied by a scalar.
impl<'a> Mul<f32> for &'a Mixture {
type Output = Mixture;
fn mul(self, rhs: f32) -> Mixture {
let mut ret = self.clone();
ret.multiply(rhs);
ret
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_merge() {
let mut into = Mixture::new();
into.set_moles(0, 82.0);
into.set_moles(1, 22.0);
into.set_temperature(293.15);
let mut source = Mixture::new();
source.set_moles(3, 100.0);
source.set_temperature(313.15);
into.merge(&source);
// make sure that the merge successfuly moved the moles
assert_eq!(into.get_moles(3), 100.0);
assert_eq!(source.get_moles(3), 100.0); // source is not modified by merge
/*
make sure that the merge successfuly changed the temperature of the mix merged into:
test gases have heat capacities of 2,080 and 20,000 respectively, so total thermal energies of
609,752 and 6,263,000 respectively once multiplied by temperatures. add those together,
then divide by new total heat capacity:
(609,752 + 6,263,000)/(2,080 + 20,000) =
6,872,752 / 2,2080 ~
311.265942
so we compare to see if it's relatively close to 311.266, cause of floating point precision
*/
assert!(
(into.get_temperature() - 311.266).abs() < 0.01,
"{} should be near 311.266, is {}",
into.get_temperature(),
(into.get_temperature() - 311.266)
);
}
#[test]
fn test_remove() {
// also tests multiply, copy_from_mutable
let mut removed = Mixture::new();
removed.set_moles(0, 22.0);
removed.set_moles(1, 82.0);
let new = removed.remove_ratio(0.5);
assert_eq!(removed.compare(&new) >= MINIMUM_MOLES_DELTA_TO_MOVE, false);
assert_eq!(removed.get_moles(0), 11.0);
assert_eq!(removed.get_moles(1), 41.0);
removed.mark_immutable();
let new_two = removed.remove_ratio(0.5);
assert_eq!(
removed.compare(&new_two) >= MINIMUM_MOLES_DELTA_TO_MOVE,
true
);
assert_eq!(removed.get_moles(0), 11.0);
assert_eq!(removed.get_moles(1), 41.0);
assert_eq!(new_two.get_moles(0), 5.5);
}
} | }
/// Gets all of the reactions this mix should do.
pub fn all_reactable(&self) -> Vec<ReactionIdentifier> {
with_reactions(|reactions| { | random_line_split |
mixture.rs | use itertools::{
Either,
EitherOrBoth::{Both, Left, Right},
Itertools,
};
use std::{
cell::Cell,
sync::atomic::{AtomicU64, Ordering::Relaxed},
};
use tinyvec::TinyVec;
use crate::reaction::ReactionIdentifier;
use super::{
constants::*, gas_visibility, total_num_gases, with_reactions, with_specific_heats, GasIDX,
};
type SpecificFireInfo = (usize, f32, f32);
struct VisHash(AtomicU64);
impl Clone for VisHash {
fn clone(&self) -> Self {
VisHash(AtomicU64::new(self.0.load(Relaxed)))
}
}
/// The data structure representing a Space Station 13 gas mixture.
/// Unlike Monstermos, this doesn't have the archive built-in; instead,
/// the archive is a feature of the turf grid, only existing during
/// turf processing.
/// Also missing is `last_share`; due to the usage of Rust,
/// processing no longer requires sleeping turfs. Instead, we're using
/// a proper, fully-simulated FDM system, much like LINDA but without
/// sleeping turfs.
#[derive(Clone)]
pub struct Mixture {
temperature: f32,
pub volume: f32,
min_heat_capacity: f32,
immutable: bool,
moles: TinyVec<[f32; 8]>,
cached_heat_capacity: Cell<Option<f32>>,
cached_vis_hash: VisHash,
}
/*
Cell is not thread-safe. However, we use it only for caching heat capacity. The worst case race condition
is thus thread A and B try to access heat capacity at the same time; both find that it's currently
uncached, so both go to calculate it; both calculate it, and both calculate it to the same value,
then one sets the cache to that value, then the other does.
Technically, a worse one would be thread A mutates the gas mixture, changing a gas amount,
while thread B tries to get its heat capacity; thread B finds a well-defined heat capacity,
which is not correct, and uses it for a calculation, but this cannot happen: thread A would
have a write lock, precluding thread B from accessing it.
*/
unsafe impl Sync for Mixture {}
impl Default for Mixture {
fn default() -> Self {
Self::new()
}
}
impl Mixture {
/// Makes an empty gas mixture.
pub fn new() -> Self {
Self {
moles: TinyVec::new(),
temperature: 2.7,
volume: 2500.0,
min_heat_capacity: 0.0,
immutable: false,
cached_heat_capacity: Cell::new(None),
cached_vis_hash: VisHash(AtomicU64::new(0)),
}
}
/// Makes an empty gas mixture with the given volume.
pub fn from_vol(vol: f32) -> Self {
let mut ret = Self::new();
ret.volume = vol;
ret
}
/// Returns if any data is corrupt.
pub fn is_corrupt(&self) -> bool {
!self.temperature.is_normal() || self.moles.len() > total_num_gases()
}
/// Fixes any corruption found.
pub fn fix_corruption(&mut self) {
self.garbage_collect();
if self.temperature < 2.7 || !self.temperature.is_normal() {
self.set_temperature(293.15);
}
}
/// Returns the temperature of the mix. T
pub fn get_temperature(&self) -> f32 {
self.temperature
}
/// Sets the temperature, if the mix isn't immutable. T
pub fn set_temperature(&mut self, temp: f32) {
if !self.immutable && temp.is_normal() {
self.temperature = temp;
}
}
/// Sets the minimum heat capacity of this mix.
pub fn set_min_heat_capacity(&mut self, amt: f32) {
self.min_heat_capacity = amt;
}
/// Returns an iterator over the gas keys and mole amounts thereof.
pub fn enumerate(&self) -> impl Iterator<Item = (GasIDX, f32)> + '_ {
self.moles.iter().copied().enumerate()
}
/// Allows closures to iterate over each gas.
pub fn for_each_gas(
&self,
mut f: impl FnMut(GasIDX, f32) -> Result<(), auxtools::Runtime>,
) -> Result<(), auxtools::Runtime> {
for (i, g) in self.enumerate() {
f(i, g)?;
}
Ok(())
}
/// Returns (by value) the amount of moles of a given index the mix has. M
pub fn get_moles(&self, idx: GasIDX) -> f32 {
self.moles.get(idx).copied().unwrap_or(0.0)
}
/// Sets the mix to be internally immutable. Rust doesn't know about any of this, obviously.
pub fn mark_immutable(&mut self) {
self.immutable = true;
}
/// Returns whether this gas mixture is immutable.
pub fn is_immutable(&self) -> bool {
self.immutable
}
fn maybe_expand(&mut self, size: usize) {
if self.moles.len() < size {
self.moles.resize(size, 0.0);
}
}
/// If mix is not immutable, sets the gas at the given `idx` to the given `amt`.
pub fn set_moles(&mut self, idx: GasIDX, amt: f32) {
if !self.immutable
&& idx < total_num_gases()
&& (idx <= self.moles.len() || (amt > GAS_MIN_MOLES && amt.is_normal()))
{
self.maybe_expand((idx + 1) as usize);
unsafe {
*self.moles.get_unchecked_mut(idx) = amt;
};
self.cached_heat_capacity.set(None);
}
}
pub fn adjust_moles(&mut self, idx: GasIDX, amt: f32) {
if !self.immutable && amt.is_normal() && idx < total_num_gases() {
self.maybe_expand((idx + 1) as usize);
let r = unsafe { self.moles.get_unchecked_mut(idx) };
*r += amt;
if amt < 0.0 {
self.garbage_collect();
}
self.cached_heat_capacity.set(None);
}
}
#[inline(never)] // mostly this makes it so that heat_capacity itself is inlined
fn slow_heat_capacity(&self) -> f32 {
let heat_cap = with_specific_heats(|heats| {
self.moles
.iter()
.copied()
.zip(heats.iter())
.fold(0.0, |acc, (amt, cap)| cap.mul_add(amt, acc))
})
.max(self.min_heat_capacity);
self.cached_heat_capacity.set(Some(heat_cap));
heat_cap
}
/// The heat capacity of the material. [joules?]/mole-kelvin.
pub fn heat_capacity(&self) -> f32 {
self.cached_heat_capacity
.get()
.filter(|cap| cap.is_finite() && cap.is_sign_positive())
.unwrap_or_else(|| self.slow_heat_capacity())
}
/// Heat capacity of exactly one gas in this mix.
pub fn partial_heat_capacity(&self, idx: GasIDX) -> f32 {
self.moles
.get(idx)
.filter(|amt| amt.is_normal())
.map_or(0.0, |amt| amt * with_specific_heats(|heats| heats[idx]))
}
/// The total mole count of the mixture. Moles.
pub fn total_moles(&self) -> f32 {
self.moles.iter().sum()
}
/// Pressure. Kilopascals.
pub fn return_pressure(&self) -> f32 {
self.total_moles() * R_IDEAL_GAS_EQUATION * self.temperature / self.volume
}
/// Thermal energy. Joules?
pub fn thermal_energy(&self) -> f32 {
self.heat_capacity() * self.temperature
}
/// Merges one gas mixture into another.
pub fn merge(&mut self, giver: &Self) {
if self.immutable {
return;
}
let our_heat_capacity = self.heat_capacity();
let other_heat_capacity = giver.heat_capacity();
self.maybe_expand(giver.moles.len());
for (a, b) in self.moles.iter_mut().zip(giver.moles.iter()) {
*a += b;
}
let combined_heat_capacity = our_heat_capacity + other_heat_capacity;
if combined_heat_capacity > MINIMUM_HEAT_CAPACITY {
self.set_temperature(
(our_heat_capacity * self.temperature + other_heat_capacity * giver.temperature)
/ (combined_heat_capacity),
);
}
self.cached_heat_capacity.set(Some(combined_heat_capacity));
}
/// Transfers only the given gases from us to another mix.
pub fn transfer_gases_to(&mut self, r: f32, gases: &[GasIDX], into: &mut Self) {
let ratio = r.clamp(0.0, 1.0);
let initial_energy = into.thermal_energy();
let mut heat_transfer = 0.0;
with_specific_heats(|heats| {
for i in gases.iter().copied() {
if let Some(orig) = self.moles.get_mut(i) {
let delta = *orig * ratio;
heat_transfer += delta * self.temperature * heats[i];
*orig -= delta;
into.adjust_moles(i, delta);
}
}
});
self.cached_heat_capacity.set(None);
into.cached_heat_capacity.set(None);
into.set_temperature((initial_energy + heat_transfer) / into.heat_capacity());
}
/// Takes a percentage of this gas mixture's moles and puts it into another mixture. if this mix is mutable, also removes those moles from the original.
pub fn remove_ratio_into(&mut self, mut ratio: f32, into: &mut Self) {
if ratio <= 0.0 {
return;
}
if ratio >= 1.0 {
ratio = 1.0;
}
let orig_temp = self.temperature;
into.copy_from_mutable(self);
into.multiply(ratio);
self.multiply(1.0 - ratio);
self.temperature = orig_temp;
into.temperature = orig_temp;
}
/// As `remove_ratio_into`, but a raw number of moles instead of a ratio.
pub fn remove_into(&mut self, amount: f32, into: &mut Self) {
self.remove_ratio_into(amount / self.total_moles(), into);
}
/// A convenience function that makes the mixture for `remove_ratio_into` on the spot and returns it.
pub fn remove_ratio(&mut self, ratio: f32) -> Self {
let mut removed = Self::from_vol(self.volume);
self.remove_ratio_into(ratio, &mut removed);
removed
}
/// Like `remove_ratio`, but with moles.
pub fn remove(&mut self, amount: f32) -> Self {
self.remove_ratio(amount / self.total_moles())
}
/// Copies from a given gas mixture, if we're mutable.
pub fn copy_from_mutable(&mut self, sample: &Self) {
if self.immutable {
return;
}
self.moles = sample.moles.clone();
self.temperature = sample.temperature;
self.cached_heat_capacity
.set(sample.cached_heat_capacity.get());
}
/// A very simple finite difference solution to the heat transfer equation.
/// Works well enough for our purposes, though perhaps called less often
/// than it ought to be while we're working in Rust.
/// Differs from the original by not using archive, since we don't put the archive into the gas mix itself anymore.
pub fn temperature_share(&mut self, sharer: &mut Self, conduction_coefficient: f32) -> f32 {
let temperature_delta = self.temperature - sharer.temperature;
if temperature_delta.abs() > MINIMUM_TEMPERATURE_DELTA_TO_CONSIDER {
let self_heat_capacity = self.heat_capacity();
let sharer_heat_capacity = sharer.heat_capacity();
if sharer_heat_capacity > MINIMUM_HEAT_CAPACITY
&& self_heat_capacity > MINIMUM_HEAT_CAPACITY
{
let heat = conduction_coefficient
* temperature_delta * (self_heat_capacity * sharer_heat_capacity
/ (self_heat_capacity + sharer_heat_capacity));
if !self.immutable {
self.set_temperature((self.temperature - heat / self_heat_capacity).max(TCMB));
}
if !sharer.immutable {
sharer.set_temperature(
(sharer.temperature + heat / sharer_heat_capacity).max(TCMB),
);
}
}
}
sharer.temperature
}
/// As above, but you may put in any arbitrary coefficient, temp, heat capacity.
/// Only used for superconductivity as of right now.
pub fn temperature_share_non_gas(
&mut self,
conduction_coefficient: f32,
sharer_temperature: f32,
sharer_heat_capacity: f32,
) -> f32 {
let temperature_delta = self.temperature - sharer_temperature;
if temperature_delta.abs() > MINIMUM_TEMPERATURE_DELTA_TO_CONSIDER {
let self_heat_capacity = self.heat_capacity();
if sharer_heat_capacity > MINIMUM_HEAT_CAPACITY
&& self_heat_capacity > MINIMUM_HEAT_CAPACITY
{
let heat = conduction_coefficient
* temperature_delta * (self_heat_capacity * sharer_heat_capacity
/ (self_heat_capacity + sharer_heat_capacity));
if !self.immutable {
self.set_temperature((self.temperature - heat / self_heat_capacity).max(TCMB));
}
return (sharer_temperature + heat / sharer_heat_capacity).max(TCMB);
}
}
sharer_temperature
}
/// The second part of old compare(). Compares temperature, but only if this gas has sufficiently high moles.
pub fn temperature_compare(&self, sample: &Self) -> bool {
(self.get_temperature() - sample.get_temperature()).abs()
> MINIMUM_TEMPERATURE_DELTA_TO_SUSPEND
&& (self.total_moles() > MINIMUM_MOLES_DELTA_TO_MOVE)
}
/// Returns the maximum mole delta for an individual gas.
pub fn compare(&self, sample: &Self) -> f32 {
self.moles
.iter()
.copied()
.zip_longest(sample.moles.iter().copied())
.fold(0.0, |acc, pair| acc.max(pair.reduce(|a, b| (b - a).abs())))
}
pub fn compare_with(&self, sample: &Self, amt: f32) -> bool {
self.moles
.as_slice()
.iter()
.zip_longest(sample.moles.as_slice().iter())
.rev()
.any(|pair| match pair {
Left(a) => a >= &amt,
Right(b) => b >= &amt,
Both(a, b) => a != b && (a - b).abs() >= amt,
})
}
/// Clears the moles from the gas.
pub fn clear(&mut self) {
if !self.immutable {
self.moles.clear();
self.cached_heat_capacity.set(None);
}
}
/// Resets the gas mixture to an initialized-with-volume state.
pub fn clear_with_vol(&mut self, vol: f32) {
self.temperature = 2.7;
self.volume = vol;
self.min_heat_capacity = 0.0;
self.immutable = false;
self.clear();
}
/// Multiplies every gas molage with this value.
pub fn multiply(&mut self, multiplier: f32) {
if !self.immutable {
for amt in self.moles.iter_mut() {
*amt *= multiplier;
}
self.cached_heat_capacity.set(None);
self.garbage_collect();
}
}
/// Checks if the proc can react with any reactions.
pub fn can_react(&self) -> bool {
with_reactions(|reactions| reactions.iter().any(|r| r.check_conditions(self)))
}
/// Gets all of the reactions this mix should do.
pub fn all_reactable(&self) -> Vec<ReactionIdentifier> {
with_reactions(|reactions| {
reactions
.iter()
.filter_map(|r| r.check_conditions(self).then(|| r.get_id()))
.collect()
})
}
/// Returns a tuple with oxidation power and fuel amount of this gas mixture.
pub fn get_burnability(&self) -> (f32, f32) {
use crate::types::FireInfo;
super::with_gas_info(|gas_info| {
self.moles
.iter()
.zip(gas_info)
.fold((0.0, 0.0), |mut acc, (&amt, this_gas_info)| {
if amt > GAS_MIN_MOLES {
match this_gas_info.fire_info {
FireInfo::Oxidation(oxidation) => |
FireInfo::Fuel(fire) => {
if self.temperature > fire.temperature() {
let amount = amt
* (1.0 - fire.temperature() / self.temperature).max(0.0);
acc.1 += amount / fire.burn_rate();
}
}
FireInfo::None => (),
}
}
acc
})
})
}
/// Returns only the oxidation power. Since this calculates burnability anyway, prefer `get_burnability`.
pub fn get_oxidation_power(&self) -> f32 {
self.get_burnability().0
}
/// Returns only fuel amount. Since this calculates burnability anyway, prefer `get_burnability`.
pub fn get_fuel_amount(&self) -> f32 {
self.get_burnability().1
}
/// Like `get_fire_info`, but takes a reference to a gas info vector,
/// so one doesn't need to do a recursive lock on the global list.
pub fn get_fire_info_with_lock(
&self,
gas_info: &[super::GasType],
) -> (Vec<SpecificFireInfo>, Vec<SpecificFireInfo>) {
use crate::types::FireInfo;
self.moles
.iter()
.zip(gas_info)
.enumerate()
.filter_map(|(i, (&amt, this_gas_info))| {
(amt > GAS_MIN_MOLES)
.then(|| match this_gas_info.fire_info {
FireInfo::Oxidation(oxidation) => (self.get_temperature()
> oxidation.temperature())
.then(|| {
let amount = amt
* (1.0 - oxidation.temperature() / self.get_temperature()).max(0.0);
Either::Right((i, amount, amount * oxidation.power()))
}),
FireInfo::Fuel(fuel) => {
(self.get_temperature() > fuel.temperature()).then(|| {
let amount = amt
* (1.0 - fuel.temperature() / self.get_temperature()).max(0.0);
Either::Left((i, amount, amount / fuel.burn_rate()))
})
}
FireInfo::None => None,
})
.flatten()
})
.partition_map(|r| r)
}
/// Returns two vectors:
/// The first contains all oxidizers in this list, as well as their actual mole amounts and how much fuel they can oxidize.
/// The second contains all fuel sources in this list, as well as their actual mole amounts and how much oxidizer they can react with.
pub fn get_fire_info(&self) -> (Vec<SpecificFireInfo>, Vec<SpecificFireInfo>) {
super::with_gas_info(|gas_info| self.get_fire_info_with_lock(gas_info))
}
/// Adds heat directly to the gas mixture, in joules (probably).
pub fn adjust_heat(&mut self, heat: f32) {
let cap = self.heat_capacity();
self.set_temperature(((cap * self.temperature) + heat) / cap);
}
/// Returns true if there's a visible gas in this mix.
pub fn is_visible(&self) -> bool {
self.enumerate()
.any(|(i, gas)| gas_visibility(i as usize).map_or(false, |amt| gas >= amt))
}
/// A hashed representation of the visibility of a gas, so that it only needs to update vis when actually changed.
pub fn vis_hash_changed(&self, gas_visibility: &[Option<f32>]) -> bool {
use std::hash::Hasher;
let mut hasher: ahash::AHasher = ahash::AHasher::default();
for (i, gas) in self.enumerate() {
if let Some(amt) = unsafe { gas_visibility.get_unchecked(i) }.filter(|&amt| gas >= amt)
{
hasher.write_usize(i);
hasher.write_usize((FACTOR_GAS_VISIBLE_MAX).min((gas / amt).ceil()) as usize);
}
}
let cur_hash = hasher.finish();
self.cached_vis_hash.0.swap(cur_hash, Relaxed) != cur_hash
}
// Removes all redundant zeroes from the gas mixture.
pub fn garbage_collect(&mut self) {
let mut last_valid_found = 0;
for (i, amt) in self.moles.iter_mut().enumerate() {
if *amt > GAS_MIN_MOLES {
last_valid_found = i;
} else {
*amt = 0.0;
}
}
self.moles.truncate(last_valid_found + 1);
}
}
use std::ops::{Add, Mul};
/// Takes a copy of the mix, merges the right hand side, then returns the copy.
impl Add<&Mixture> for Mixture {
type Output = Self;
fn add(self, rhs: &Mixture) -> Self {
let mut ret = self;
ret.merge(rhs);
ret
}
}
/// Takes a copy of the mix, merges the right hand side, then returns the copy.
impl<'a, 'b> Add<&'a Mixture> for &'b Mixture {
type Output = Mixture;
fn add(self, rhs: &Mixture) -> Mixture {
let mut ret = self.clone();
ret.merge(rhs);
ret
}
}
/// Makes a copy of the given mix, multiplied by a scalar.
impl Mul<f32> for Mixture {
type Output = Self;
fn mul(self, rhs: f32) -> Self {
let mut ret = self;
ret.multiply(rhs);
ret
}
}
/// Makes a copy of the given mix, multiplied by a scalar.
impl<'a> Mul<f32> for &'a Mixture {
type Output = Mixture;
fn mul(self, rhs: f32) -> Mixture {
let mut ret = self.clone();
ret.multiply(rhs);
ret
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_merge() {
let mut into = Mixture::new();
into.set_moles(0, 82.0);
into.set_moles(1, 22.0);
into.set_temperature(293.15);
let mut source = Mixture::new();
source.set_moles(3, 100.0);
source.set_temperature(313.15);
into.merge(&source);
// make sure that the merge successfuly moved the moles
assert_eq!(into.get_moles(3), 100.0);
assert_eq!(source.get_moles(3), 100.0); // source is not modified by merge
/*
make sure that the merge successfuly changed the temperature of the mix merged into:
test gases have heat capacities of 2,080 and 20,000 respectively, so total thermal energies of
609,752 and 6,263,000 respectively once multiplied by temperatures. add those together,
then divide by new total heat capacity:
(609,752 + 6,263,000)/(2,080 + 20,000) =
6,872,752 / 2,2080 ~
311.265942
so we compare to see if it's relatively close to 311.266, cause of floating point precision
*/
assert!(
(into.get_temperature() - 311.266).abs() < 0.01,
"{} should be near 311.266, is {}",
into.get_temperature(),
(into.get_temperature() - 311.266)
);
}
#[test]
fn test_remove() {
// also tests multiply, copy_from_mutable
let mut removed = Mixture::new();
removed.set_moles(0, 22.0);
removed.set_moles(1, 82.0);
let new = removed.remove_ratio(0.5);
assert_eq!(removed.compare(&new) >= MINIMUM_MOLES_DELTA_TO_MOVE, false);
assert_eq!(removed.get_moles(0), 11.0);
assert_eq!(removed.get_moles(1), 41.0);
removed.mark_immutable();
let new_two = removed.remove_ratio(0.5);
assert_eq!(
removed.compare(&new_two) >= MINIMUM_MOLES_DELTA_TO_MOVE,
true
);
assert_eq!(removed.get_moles(0), 11.0);
assert_eq!(removed.get_moles(1), 41.0);
assert_eq!(new_two.get_moles(0), 5.5);
}
}
| {
if self.temperature > oxidation.temperature() {
let amount = amt
* (1.0 - oxidation.temperature() / self.temperature)
.max(0.0);
acc.0 += amount * oxidation.power();
}
} | conditional_block |
mixture.rs | use itertools::{
Either,
EitherOrBoth::{Both, Left, Right},
Itertools,
};
use std::{
cell::Cell,
sync::atomic::{AtomicU64, Ordering::Relaxed},
};
use tinyvec::TinyVec;
use crate::reaction::ReactionIdentifier;
use super::{
constants::*, gas_visibility, total_num_gases, with_reactions, with_specific_heats, GasIDX,
};
type SpecificFireInfo = (usize, f32, f32);
struct VisHash(AtomicU64);
impl Clone for VisHash {
fn clone(&self) -> Self {
VisHash(AtomicU64::new(self.0.load(Relaxed)))
}
}
/// The data structure representing a Space Station 13 gas mixture.
/// Unlike Monstermos, this doesn't have the archive built-in; instead,
/// the archive is a feature of the turf grid, only existing during
/// turf processing.
/// Also missing is `last_share`; due to the usage of Rust,
/// processing no longer requires sleeping turfs. Instead, we're using
/// a proper, fully-simulated FDM system, much like LINDA but without
/// sleeping turfs.
#[derive(Clone)]
pub struct Mixture {
temperature: f32,
pub volume: f32,
min_heat_capacity: f32,
immutable: bool,
moles: TinyVec<[f32; 8]>,
cached_heat_capacity: Cell<Option<f32>>,
cached_vis_hash: VisHash,
}
/*
Cell is not thread-safe. However, we use it only for caching heat capacity. The worst case race condition
is thus thread A and B try to access heat capacity at the same time; both find that it's currently
uncached, so both go to calculate it; both calculate it, and both calculate it to the same value,
then one sets the cache to that value, then the other does.
Technically, a worse one would be thread A mutates the gas mixture, changing a gas amount,
while thread B tries to get its heat capacity; thread B finds a well-defined heat capacity,
which is not correct, and uses it for a calculation, but this cannot happen: thread A would
have a write lock, precluding thread B from accessing it.
*/
unsafe impl Sync for Mixture {}
impl Default for Mixture {
fn default() -> Self {
Self::new()
}
}
impl Mixture {
/// Makes an empty gas mixture.
pub fn new() -> Self {
Self {
moles: TinyVec::new(),
temperature: 2.7,
volume: 2500.0,
min_heat_capacity: 0.0,
immutable: false,
cached_heat_capacity: Cell::new(None),
cached_vis_hash: VisHash(AtomicU64::new(0)),
}
}
/// Makes an empty gas mixture with the given volume.
pub fn from_vol(vol: f32) -> Self {
let mut ret = Self::new();
ret.volume = vol;
ret
}
/// Returns if any data is corrupt.
pub fn is_corrupt(&self) -> bool {
!self.temperature.is_normal() || self.moles.len() > total_num_gases()
}
/// Fixes any corruption found.
pub fn fix_corruption(&mut self) {
self.garbage_collect();
if self.temperature < 2.7 || !self.temperature.is_normal() {
self.set_temperature(293.15);
}
}
/// Returns the temperature of the mix. T
pub fn get_temperature(&self) -> f32 {
self.temperature
}
/// Sets the temperature, if the mix isn't immutable. T
pub fn set_temperature(&mut self, temp: f32) {
if !self.immutable && temp.is_normal() {
self.temperature = temp;
}
}
/// Sets the minimum heat capacity of this mix.
pub fn set_min_heat_capacity(&mut self, amt: f32) {
self.min_heat_capacity = amt;
}
/// Returns an iterator over the gas keys and mole amounts thereof.
pub fn enumerate(&self) -> impl Iterator<Item = (GasIDX, f32)> + '_ {
self.moles.iter().copied().enumerate()
}
/// Allows closures to iterate over each gas.
pub fn for_each_gas(
&self,
mut f: impl FnMut(GasIDX, f32) -> Result<(), auxtools::Runtime>,
) -> Result<(), auxtools::Runtime> {
for (i, g) in self.enumerate() {
f(i, g)?;
}
Ok(())
}
/// Returns (by value) the amount of moles of a given index the mix has. M
pub fn get_moles(&self, idx: GasIDX) -> f32 {
self.moles.get(idx).copied().unwrap_or(0.0)
}
/// Sets the mix to be internally immutable. Rust doesn't know about any of this, obviously.
pub fn mark_immutable(&mut self) {
self.immutable = true;
}
/// Returns whether this gas mixture is immutable.
pub fn is_immutable(&self) -> bool {
self.immutable
}
fn maybe_expand(&mut self, size: usize) {
if self.moles.len() < size {
self.moles.resize(size, 0.0);
}
}
/// If mix is not immutable, sets the gas at the given `idx` to the given `amt`.
pub fn set_moles(&mut self, idx: GasIDX, amt: f32) {
if !self.immutable
&& idx < total_num_gases()
&& (idx <= self.moles.len() || (amt > GAS_MIN_MOLES && amt.is_normal()))
{
self.maybe_expand((idx + 1) as usize);
unsafe {
*self.moles.get_unchecked_mut(idx) = amt;
};
self.cached_heat_capacity.set(None);
}
}
pub fn adjust_moles(&mut self, idx: GasIDX, amt: f32) {
if !self.immutable && amt.is_normal() && idx < total_num_gases() {
self.maybe_expand((idx + 1) as usize);
let r = unsafe { self.moles.get_unchecked_mut(idx) };
*r += amt;
if amt < 0.0 {
self.garbage_collect();
}
self.cached_heat_capacity.set(None);
}
}
#[inline(never)] // mostly this makes it so that heat_capacity itself is inlined
fn slow_heat_capacity(&self) -> f32 {
let heat_cap = with_specific_heats(|heats| {
self.moles
.iter()
.copied()
.zip(heats.iter())
.fold(0.0, |acc, (amt, cap)| cap.mul_add(amt, acc))
})
.max(self.min_heat_capacity);
self.cached_heat_capacity.set(Some(heat_cap));
heat_cap
}
/// The heat capacity of the material. [joules?]/mole-kelvin.
pub fn heat_capacity(&self) -> f32 {
self.cached_heat_capacity
.get()
.filter(|cap| cap.is_finite() && cap.is_sign_positive())
.unwrap_or_else(|| self.slow_heat_capacity())
}
/// Heat capacity of exactly one gas in this mix.
pub fn partial_heat_capacity(&self, idx: GasIDX) -> f32 {
self.moles
.get(idx)
.filter(|amt| amt.is_normal())
.map_or(0.0, |amt| amt * with_specific_heats(|heats| heats[idx]))
}
/// The total mole count of the mixture. Moles.
pub fn total_moles(&self) -> f32 {
self.moles.iter().sum()
}
/// Pressure. Kilopascals.
pub fn return_pressure(&self) -> f32 {
self.total_moles() * R_IDEAL_GAS_EQUATION * self.temperature / self.volume
}
/// Thermal energy. Joules?
pub fn thermal_energy(&self) -> f32 |
/// Merges one gas mixture into another.
pub fn merge(&mut self, giver: &Self) {
if self.immutable {
return;
}
let our_heat_capacity = self.heat_capacity();
let other_heat_capacity = giver.heat_capacity();
self.maybe_expand(giver.moles.len());
for (a, b) in self.moles.iter_mut().zip(giver.moles.iter()) {
*a += b;
}
let combined_heat_capacity = our_heat_capacity + other_heat_capacity;
if combined_heat_capacity > MINIMUM_HEAT_CAPACITY {
self.set_temperature(
(our_heat_capacity * self.temperature + other_heat_capacity * giver.temperature)
/ (combined_heat_capacity),
);
}
self.cached_heat_capacity.set(Some(combined_heat_capacity));
}
/// Transfers only the given gases from us to another mix.
pub fn transfer_gases_to(&mut self, r: f32, gases: &[GasIDX], into: &mut Self) {
let ratio = r.clamp(0.0, 1.0);
let initial_energy = into.thermal_energy();
let mut heat_transfer = 0.0;
with_specific_heats(|heats| {
for i in gases.iter().copied() {
if let Some(orig) = self.moles.get_mut(i) {
let delta = *orig * ratio;
heat_transfer += delta * self.temperature * heats[i];
*orig -= delta;
into.adjust_moles(i, delta);
}
}
});
self.cached_heat_capacity.set(None);
into.cached_heat_capacity.set(None);
into.set_temperature((initial_energy + heat_transfer) / into.heat_capacity());
}
/// Takes a percentage of this gas mixture's moles and puts it into another mixture. if this mix is mutable, also removes those moles from the original.
pub fn remove_ratio_into(&mut self, mut ratio: f32, into: &mut Self) {
if ratio <= 0.0 {
return;
}
if ratio >= 1.0 {
ratio = 1.0;
}
let orig_temp = self.temperature;
into.copy_from_mutable(self);
into.multiply(ratio);
self.multiply(1.0 - ratio);
self.temperature = orig_temp;
into.temperature = orig_temp;
}
/// As `remove_ratio_into`, but a raw number of moles instead of a ratio.
pub fn remove_into(&mut self, amount: f32, into: &mut Self) {
self.remove_ratio_into(amount / self.total_moles(), into);
}
/// A convenience function that makes the mixture for `remove_ratio_into` on the spot and returns it.
pub fn remove_ratio(&mut self, ratio: f32) -> Self {
let mut removed = Self::from_vol(self.volume);
self.remove_ratio_into(ratio, &mut removed);
removed
}
/// Like `remove_ratio`, but with moles.
pub fn remove(&mut self, amount: f32) -> Self {
self.remove_ratio(amount / self.total_moles())
}
/// Copies from a given gas mixture, if we're mutable.
pub fn copy_from_mutable(&mut self, sample: &Self) {
if self.immutable {
return;
}
self.moles = sample.moles.clone();
self.temperature = sample.temperature;
self.cached_heat_capacity
.set(sample.cached_heat_capacity.get());
}
/// A very simple finite difference solution to the heat transfer equation.
/// Works well enough for our purposes, though perhaps called less often
/// than it ought to be while we're working in Rust.
/// Differs from the original by not using archive, since we don't put the archive into the gas mix itself anymore.
pub fn temperature_share(&mut self, sharer: &mut Self, conduction_coefficient: f32) -> f32 {
let temperature_delta = self.temperature - sharer.temperature;
if temperature_delta.abs() > MINIMUM_TEMPERATURE_DELTA_TO_CONSIDER {
let self_heat_capacity = self.heat_capacity();
let sharer_heat_capacity = sharer.heat_capacity();
if sharer_heat_capacity > MINIMUM_HEAT_CAPACITY
&& self_heat_capacity > MINIMUM_HEAT_CAPACITY
{
let heat = conduction_coefficient
* temperature_delta * (self_heat_capacity * sharer_heat_capacity
/ (self_heat_capacity + sharer_heat_capacity));
if !self.immutable {
self.set_temperature((self.temperature - heat / self_heat_capacity).max(TCMB));
}
if !sharer.immutable {
sharer.set_temperature(
(sharer.temperature + heat / sharer_heat_capacity).max(TCMB),
);
}
}
}
sharer.temperature
}
/// As above, but you may put in any arbitrary coefficient, temp, heat capacity.
/// Only used for superconductivity as of right now.
pub fn temperature_share_non_gas(
&mut self,
conduction_coefficient: f32,
sharer_temperature: f32,
sharer_heat_capacity: f32,
) -> f32 {
let temperature_delta = self.temperature - sharer_temperature;
if temperature_delta.abs() > MINIMUM_TEMPERATURE_DELTA_TO_CONSIDER {
let self_heat_capacity = self.heat_capacity();
if sharer_heat_capacity > MINIMUM_HEAT_CAPACITY
&& self_heat_capacity > MINIMUM_HEAT_CAPACITY
{
let heat = conduction_coefficient
* temperature_delta * (self_heat_capacity * sharer_heat_capacity
/ (self_heat_capacity + sharer_heat_capacity));
if !self.immutable {
self.set_temperature((self.temperature - heat / self_heat_capacity).max(TCMB));
}
return (sharer_temperature + heat / sharer_heat_capacity).max(TCMB);
}
}
sharer_temperature
}
/// The second part of old compare(). Compares temperature, but only if this gas has sufficiently high moles.
pub fn temperature_compare(&self, sample: &Self) -> bool {
(self.get_temperature() - sample.get_temperature()).abs()
> MINIMUM_TEMPERATURE_DELTA_TO_SUSPEND
&& (self.total_moles() > MINIMUM_MOLES_DELTA_TO_MOVE)
}
/// Returns the maximum mole delta for an individual gas.
pub fn compare(&self, sample: &Self) -> f32 {
self.moles
.iter()
.copied()
.zip_longest(sample.moles.iter().copied())
.fold(0.0, |acc, pair| acc.max(pair.reduce(|a, b| (b - a).abs())))
}
pub fn compare_with(&self, sample: &Self, amt: f32) -> bool {
self.moles
.as_slice()
.iter()
.zip_longest(sample.moles.as_slice().iter())
.rev()
.any(|pair| match pair {
Left(a) => a >= &amt,
Right(b) => b >= &amt,
Both(a, b) => a != b && (a - b).abs() >= amt,
})
}
/// Clears the moles from the gas.
pub fn clear(&mut self) {
if !self.immutable {
self.moles.clear();
self.cached_heat_capacity.set(None);
}
}
/// Resets the gas mixture to an initialized-with-volume state.
pub fn clear_with_vol(&mut self, vol: f32) {
self.temperature = 2.7;
self.volume = vol;
self.min_heat_capacity = 0.0;
self.immutable = false;
self.clear();
}
/// Multiplies every gas molage with this value.
pub fn multiply(&mut self, multiplier: f32) {
if !self.immutable {
for amt in self.moles.iter_mut() {
*amt *= multiplier;
}
self.cached_heat_capacity.set(None);
self.garbage_collect();
}
}
/// Checks if the proc can react with any reactions.
pub fn can_react(&self) -> bool {
with_reactions(|reactions| reactions.iter().any(|r| r.check_conditions(self)))
}
/// Gets all of the reactions this mix should do.
pub fn all_reactable(&self) -> Vec<ReactionIdentifier> {
with_reactions(|reactions| {
reactions
.iter()
.filter_map(|r| r.check_conditions(self).then(|| r.get_id()))
.collect()
})
}
/// Returns a tuple with oxidation power and fuel amount of this gas mixture.
pub fn get_burnability(&self) -> (f32, f32) {
use crate::types::FireInfo;
super::with_gas_info(|gas_info| {
self.moles
.iter()
.zip(gas_info)
.fold((0.0, 0.0), |mut acc, (&amt, this_gas_info)| {
if amt > GAS_MIN_MOLES {
match this_gas_info.fire_info {
FireInfo::Oxidation(oxidation) => {
if self.temperature > oxidation.temperature() {
let amount = amt
* (1.0 - oxidation.temperature() / self.temperature)
.max(0.0);
acc.0 += amount * oxidation.power();
}
}
FireInfo::Fuel(fire) => {
if self.temperature > fire.temperature() {
let amount = amt
* (1.0 - fire.temperature() / self.temperature).max(0.0);
acc.1 += amount / fire.burn_rate();
}
}
FireInfo::None => (),
}
}
acc
})
})
}
/// Returns only the oxidation power. Since this calculates burnability anyway, prefer `get_burnability`.
pub fn get_oxidation_power(&self) -> f32 {
self.get_burnability().0
}
/// Returns only fuel amount. Since this calculates burnability anyway, prefer `get_burnability`.
pub fn get_fuel_amount(&self) -> f32 {
self.get_burnability().1
}
/// Like `get_fire_info`, but takes a reference to a gas info vector,
/// so one doesn't need to do a recursive lock on the global list.
pub fn get_fire_info_with_lock(
&self,
gas_info: &[super::GasType],
) -> (Vec<SpecificFireInfo>, Vec<SpecificFireInfo>) {
use crate::types::FireInfo;
self.moles
.iter()
.zip(gas_info)
.enumerate()
.filter_map(|(i, (&amt, this_gas_info))| {
(amt > GAS_MIN_MOLES)
.then(|| match this_gas_info.fire_info {
FireInfo::Oxidation(oxidation) => (self.get_temperature()
> oxidation.temperature())
.then(|| {
let amount = amt
* (1.0 - oxidation.temperature() / self.get_temperature()).max(0.0);
Either::Right((i, amount, amount * oxidation.power()))
}),
FireInfo::Fuel(fuel) => {
(self.get_temperature() > fuel.temperature()).then(|| {
let amount = amt
* (1.0 - fuel.temperature() / self.get_temperature()).max(0.0);
Either::Left((i, amount, amount / fuel.burn_rate()))
})
}
FireInfo::None => None,
})
.flatten()
})
.partition_map(|r| r)
}
/// Returns two vectors:
/// The first contains all oxidizers in this list, as well as their actual mole amounts and how much fuel they can oxidize.
/// The second contains all fuel sources in this list, as well as their actual mole amounts and how much oxidizer they can react with.
pub fn get_fire_info(&self) -> (Vec<SpecificFireInfo>, Vec<SpecificFireInfo>) {
super::with_gas_info(|gas_info| self.get_fire_info_with_lock(gas_info))
}
/// Adds heat directly to the gas mixture, in joules (probably).
pub fn adjust_heat(&mut self, heat: f32) {
let cap = self.heat_capacity();
self.set_temperature(((cap * self.temperature) + heat) / cap);
}
/// Returns true if there's a visible gas in this mix.
pub fn is_visible(&self) -> bool {
self.enumerate()
.any(|(i, gas)| gas_visibility(i as usize).map_or(false, |amt| gas >= amt))
}
/// A hashed representation of the visibility of a gas, so that it only needs to update vis when actually changed.
pub fn vis_hash_changed(&self, gas_visibility: &[Option<f32>]) -> bool {
use std::hash::Hasher;
let mut hasher: ahash::AHasher = ahash::AHasher::default();
for (i, gas) in self.enumerate() {
if let Some(amt) = unsafe { gas_visibility.get_unchecked(i) }.filter(|&amt| gas >= amt)
{
hasher.write_usize(i);
hasher.write_usize((FACTOR_GAS_VISIBLE_MAX).min((gas / amt).ceil()) as usize);
}
}
let cur_hash = hasher.finish();
self.cached_vis_hash.0.swap(cur_hash, Relaxed) != cur_hash
}
// Removes all redundant zeroes from the gas mixture.
pub fn garbage_collect(&mut self) {
let mut last_valid_found = 0;
for (i, amt) in self.moles.iter_mut().enumerate() {
if *amt > GAS_MIN_MOLES {
last_valid_found = i;
} else {
*amt = 0.0;
}
}
self.moles.truncate(last_valid_found + 1);
}
}
use std::ops::{Add, Mul};
/// Takes a copy of the mix, merges the right hand side, then returns the copy.
impl Add<&Mixture> for Mixture {
type Output = Self;
fn add(self, rhs: &Mixture) -> Self {
let mut ret = self;
ret.merge(rhs);
ret
}
}
/// Takes a copy of the mix, merges the right hand side, then returns the copy.
impl<'a, 'b> Add<&'a Mixture> for &'b Mixture {
type Output = Mixture;
fn add(self, rhs: &Mixture) -> Mixture {
let mut ret = self.clone();
ret.merge(rhs);
ret
}
}
/// Makes a copy of the given mix, multiplied by a scalar.
impl Mul<f32> for Mixture {
type Output = Self;
fn mul(self, rhs: f32) -> Self {
let mut ret = self;
ret.multiply(rhs);
ret
}
}
/// Makes a copy of the given mix, multiplied by a scalar.
impl<'a> Mul<f32> for &'a Mixture {
type Output = Mixture;
fn mul(self, rhs: f32) -> Mixture {
let mut ret = self.clone();
ret.multiply(rhs);
ret
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_merge() {
let mut into = Mixture::new();
into.set_moles(0, 82.0);
into.set_moles(1, 22.0);
into.set_temperature(293.15);
let mut source = Mixture::new();
source.set_moles(3, 100.0);
source.set_temperature(313.15);
into.merge(&source);
// make sure that the merge successfuly moved the moles
assert_eq!(into.get_moles(3), 100.0);
assert_eq!(source.get_moles(3), 100.0); // source is not modified by merge
/*
make sure that the merge successfuly changed the temperature of the mix merged into:
test gases have heat capacities of 2,080 and 20,000 respectively, so total thermal energies of
609,752 and 6,263,000 respectively once multiplied by temperatures. add those together,
then divide by new total heat capacity:
(609,752 + 6,263,000)/(2,080 + 20,000) =
6,872,752 / 2,2080 ~
311.265942
so we compare to see if it's relatively close to 311.266, cause of floating point precision
*/
assert!(
(into.get_temperature() - 311.266).abs() < 0.01,
"{} should be near 311.266, is {}",
into.get_temperature(),
(into.get_temperature() - 311.266)
);
}
#[test]
fn test_remove() {
// also tests multiply, copy_from_mutable
let mut removed = Mixture::new();
removed.set_moles(0, 22.0);
removed.set_moles(1, 82.0);
let new = removed.remove_ratio(0.5);
assert_eq!(removed.compare(&new) >= MINIMUM_MOLES_DELTA_TO_MOVE, false);
assert_eq!(removed.get_moles(0), 11.0);
assert_eq!(removed.get_moles(1), 41.0);
removed.mark_immutable();
let new_two = removed.remove_ratio(0.5);
assert_eq!(
removed.compare(&new_two) >= MINIMUM_MOLES_DELTA_TO_MOVE,
true
);
assert_eq!(removed.get_moles(0), 11.0);
assert_eq!(removed.get_moles(1), 41.0);
assert_eq!(new_two.get_moles(0), 5.5);
}
}
| {
self.heat_capacity() * self.temperature
} | identifier_body |
mixture.rs | use itertools::{
Either,
EitherOrBoth::{Both, Left, Right},
Itertools,
};
use std::{
cell::Cell,
sync::atomic::{AtomicU64, Ordering::Relaxed},
};
use tinyvec::TinyVec;
use crate::reaction::ReactionIdentifier;
use super::{
constants::*, gas_visibility, total_num_gases, with_reactions, with_specific_heats, GasIDX,
};
type SpecificFireInfo = (usize, f32, f32);
struct VisHash(AtomicU64);
impl Clone for VisHash {
fn clone(&self) -> Self {
VisHash(AtomicU64::new(self.0.load(Relaxed)))
}
}
/// The data structure representing a Space Station 13 gas mixture.
/// Unlike Monstermos, this doesn't have the archive built-in; instead,
/// the archive is a feature of the turf grid, only existing during
/// turf processing.
/// Also missing is `last_share`; due to the usage of Rust,
/// processing no longer requires sleeping turfs. Instead, we're using
/// a proper, fully-simulated FDM system, much like LINDA but without
/// sleeping turfs.
#[derive(Clone)]
pub struct Mixture {
temperature: f32,
pub volume: f32,
min_heat_capacity: f32,
immutable: bool,
moles: TinyVec<[f32; 8]>,
cached_heat_capacity: Cell<Option<f32>>,
cached_vis_hash: VisHash,
}
/*
Cell is not thread-safe. However, we use it only for caching heat capacity. The worst case race condition
is thus thread A and B try to access heat capacity at the same time; both find that it's currently
uncached, so both go to calculate it; both calculate it, and both calculate it to the same value,
then one sets the cache to that value, then the other does.
Technically, a worse one would be thread A mutates the gas mixture, changing a gas amount,
while thread B tries to get its heat capacity; thread B finds a well-defined heat capacity,
which is not correct, and uses it for a calculation, but this cannot happen: thread A would
have a write lock, precluding thread B from accessing it.
*/
unsafe impl Sync for Mixture {}
impl Default for Mixture {
fn default() -> Self {
Self::new()
}
}
impl Mixture {
/// Makes an empty gas mixture.
pub fn new() -> Self {
Self {
moles: TinyVec::new(),
temperature: 2.7,
volume: 2500.0,
min_heat_capacity: 0.0,
immutable: false,
cached_heat_capacity: Cell::new(None),
cached_vis_hash: VisHash(AtomicU64::new(0)),
}
}
/// Makes an empty gas mixture with the given volume.
pub fn from_vol(vol: f32) -> Self {
let mut ret = Self::new();
ret.volume = vol;
ret
}
/// Returns if any data is corrupt.
pub fn is_corrupt(&self) -> bool {
!self.temperature.is_normal() || self.moles.len() > total_num_gases()
}
/// Fixes any corruption found.
pub fn fix_corruption(&mut self) {
self.garbage_collect();
if self.temperature < 2.7 || !self.temperature.is_normal() {
self.set_temperature(293.15);
}
}
/// Returns the temperature of the mix. T
pub fn get_temperature(&self) -> f32 {
self.temperature
}
/// Sets the temperature, if the mix isn't immutable. T
pub fn set_temperature(&mut self, temp: f32) {
if !self.immutable && temp.is_normal() {
self.temperature = temp;
}
}
/// Sets the minimum heat capacity of this mix.
pub fn set_min_heat_capacity(&mut self, amt: f32) {
self.min_heat_capacity = amt;
}
/// Returns an iterator over the gas keys and mole amounts thereof.
pub fn enumerate(&self) -> impl Iterator<Item = (GasIDX, f32)> + '_ {
self.moles.iter().copied().enumerate()
}
/// Allows closures to iterate over each gas.
pub fn for_each_gas(
&self,
mut f: impl FnMut(GasIDX, f32) -> Result<(), auxtools::Runtime>,
) -> Result<(), auxtools::Runtime> {
for (i, g) in self.enumerate() {
f(i, g)?;
}
Ok(())
}
/// Returns (by value) the amount of moles of a given index the mix has. M
pub fn get_moles(&self, idx: GasIDX) -> f32 {
self.moles.get(idx).copied().unwrap_or(0.0)
}
/// Sets the mix to be internally immutable. Rust doesn't know about any of this, obviously.
pub fn mark_immutable(&mut self) {
self.immutable = true;
}
/// Returns whether this gas mixture is immutable.
pub fn is_immutable(&self) -> bool {
self.immutable
}
fn maybe_expand(&mut self, size: usize) {
if self.moles.len() < size {
self.moles.resize(size, 0.0);
}
}
/// If mix is not immutable, sets the gas at the given `idx` to the given `amt`.
pub fn set_moles(&mut self, idx: GasIDX, amt: f32) {
if !self.immutable
&& idx < total_num_gases()
&& (idx <= self.moles.len() || (amt > GAS_MIN_MOLES && amt.is_normal()))
{
self.maybe_expand((idx + 1) as usize);
unsafe {
*self.moles.get_unchecked_mut(idx) = amt;
};
self.cached_heat_capacity.set(None);
}
}
pub fn adjust_moles(&mut self, idx: GasIDX, amt: f32) {
if !self.immutable && amt.is_normal() && idx < total_num_gases() {
self.maybe_expand((idx + 1) as usize);
let r = unsafe { self.moles.get_unchecked_mut(idx) };
*r += amt;
if amt < 0.0 {
self.garbage_collect();
}
self.cached_heat_capacity.set(None);
}
}
#[inline(never)] // mostly this makes it so that heat_capacity itself is inlined
fn slow_heat_capacity(&self) -> f32 {
let heat_cap = with_specific_heats(|heats| {
self.moles
.iter()
.copied()
.zip(heats.iter())
.fold(0.0, |acc, (amt, cap)| cap.mul_add(amt, acc))
})
.max(self.min_heat_capacity);
self.cached_heat_capacity.set(Some(heat_cap));
heat_cap
}
/// The heat capacity of the material. [joules?]/mole-kelvin.
pub fn heat_capacity(&self) -> f32 {
self.cached_heat_capacity
.get()
.filter(|cap| cap.is_finite() && cap.is_sign_positive())
.unwrap_or_else(|| self.slow_heat_capacity())
}
/// Heat capacity of exactly one gas in this mix.
pub fn partial_heat_capacity(&self, idx: GasIDX) -> f32 {
self.moles
.get(idx)
.filter(|amt| amt.is_normal())
.map_or(0.0, |amt| amt * with_specific_heats(|heats| heats[idx]))
}
/// The total mole count of the mixture. Moles.
pub fn total_moles(&self) -> f32 {
self.moles.iter().sum()
}
/// Pressure. Kilopascals.
pub fn return_pressure(&self) -> f32 {
self.total_moles() * R_IDEAL_GAS_EQUATION * self.temperature / self.volume
}
/// Thermal energy. Joules?
pub fn thermal_energy(&self) -> f32 {
self.heat_capacity() * self.temperature
}
/// Merges one gas mixture into another.
pub fn merge(&mut self, giver: &Self) {
if self.immutable {
return;
}
let our_heat_capacity = self.heat_capacity();
let other_heat_capacity = giver.heat_capacity();
self.maybe_expand(giver.moles.len());
for (a, b) in self.moles.iter_mut().zip(giver.moles.iter()) {
*a += b;
}
let combined_heat_capacity = our_heat_capacity + other_heat_capacity;
if combined_heat_capacity > MINIMUM_HEAT_CAPACITY {
self.set_temperature(
(our_heat_capacity * self.temperature + other_heat_capacity * giver.temperature)
/ (combined_heat_capacity),
);
}
self.cached_heat_capacity.set(Some(combined_heat_capacity));
}
/// Transfers only the given gases from us to another mix.
pub fn transfer_gases_to(&mut self, r: f32, gases: &[GasIDX], into: &mut Self) {
let ratio = r.clamp(0.0, 1.0);
let initial_energy = into.thermal_energy();
let mut heat_transfer = 0.0;
with_specific_heats(|heats| {
for i in gases.iter().copied() {
if let Some(orig) = self.moles.get_mut(i) {
let delta = *orig * ratio;
heat_transfer += delta * self.temperature * heats[i];
*orig -= delta;
into.adjust_moles(i, delta);
}
}
});
self.cached_heat_capacity.set(None);
into.cached_heat_capacity.set(None);
into.set_temperature((initial_energy + heat_transfer) / into.heat_capacity());
}
/// Takes a percentage of this gas mixture's moles and puts it into another mixture. if this mix is mutable, also removes those moles from the original.
pub fn remove_ratio_into(&mut self, mut ratio: f32, into: &mut Self) {
if ratio <= 0.0 {
return;
}
if ratio >= 1.0 {
ratio = 1.0;
}
let orig_temp = self.temperature;
into.copy_from_mutable(self);
into.multiply(ratio);
self.multiply(1.0 - ratio);
self.temperature = orig_temp;
into.temperature = orig_temp;
}
/// As `remove_ratio_into`, but a raw number of moles instead of a ratio.
pub fn remove_into(&mut self, amount: f32, into: &mut Self) {
self.remove_ratio_into(amount / self.total_moles(), into);
}
/// A convenience function that makes the mixture for `remove_ratio_into` on the spot and returns it.
pub fn remove_ratio(&mut self, ratio: f32) -> Self {
let mut removed = Self::from_vol(self.volume);
self.remove_ratio_into(ratio, &mut removed);
removed
}
/// Like `remove_ratio`, but with moles.
pub fn remove(&mut self, amount: f32) -> Self {
self.remove_ratio(amount / self.total_moles())
}
/// Copies from a given gas mixture, if we're mutable.
pub fn copy_from_mutable(&mut self, sample: &Self) {
if self.immutable {
return;
}
self.moles = sample.moles.clone();
self.temperature = sample.temperature;
self.cached_heat_capacity
.set(sample.cached_heat_capacity.get());
}
/// A very simple finite difference solution to the heat transfer equation.
/// Works well enough for our purposes, though perhaps called less often
/// than it ought to be while we're working in Rust.
/// Differs from the original by not using archive, since we don't put the archive into the gas mix itself anymore.
pub fn temperature_share(&mut self, sharer: &mut Self, conduction_coefficient: f32) -> f32 {
let temperature_delta = self.temperature - sharer.temperature;
if temperature_delta.abs() > MINIMUM_TEMPERATURE_DELTA_TO_CONSIDER {
let self_heat_capacity = self.heat_capacity();
let sharer_heat_capacity = sharer.heat_capacity();
if sharer_heat_capacity > MINIMUM_HEAT_CAPACITY
&& self_heat_capacity > MINIMUM_HEAT_CAPACITY
{
let heat = conduction_coefficient
* temperature_delta * (self_heat_capacity * sharer_heat_capacity
/ (self_heat_capacity + sharer_heat_capacity));
if !self.immutable {
self.set_temperature((self.temperature - heat / self_heat_capacity).max(TCMB));
}
if !sharer.immutable {
sharer.set_temperature(
(sharer.temperature + heat / sharer_heat_capacity).max(TCMB),
);
}
}
}
sharer.temperature
}
/// As above, but you may put in any arbitrary coefficient, temp, heat capacity.
/// Only used for superconductivity as of right now.
pub fn temperature_share_non_gas(
&mut self,
conduction_coefficient: f32,
sharer_temperature: f32,
sharer_heat_capacity: f32,
) -> f32 {
let temperature_delta = self.temperature - sharer_temperature;
if temperature_delta.abs() > MINIMUM_TEMPERATURE_DELTA_TO_CONSIDER {
let self_heat_capacity = self.heat_capacity();
if sharer_heat_capacity > MINIMUM_HEAT_CAPACITY
&& self_heat_capacity > MINIMUM_HEAT_CAPACITY
{
let heat = conduction_coefficient
* temperature_delta * (self_heat_capacity * sharer_heat_capacity
/ (self_heat_capacity + sharer_heat_capacity));
if !self.immutable {
self.set_temperature((self.temperature - heat / self_heat_capacity).max(TCMB));
}
return (sharer_temperature + heat / sharer_heat_capacity).max(TCMB);
}
}
sharer_temperature
}
/// The second part of old compare(). Compares temperature, but only if this gas has sufficiently high moles.
pub fn temperature_compare(&self, sample: &Self) -> bool {
(self.get_temperature() - sample.get_temperature()).abs()
> MINIMUM_TEMPERATURE_DELTA_TO_SUSPEND
&& (self.total_moles() > MINIMUM_MOLES_DELTA_TO_MOVE)
}
/// Returns the maximum mole delta for an individual gas.
pub fn compare(&self, sample: &Self) -> f32 {
self.moles
.iter()
.copied()
.zip_longest(sample.moles.iter().copied())
.fold(0.0, |acc, pair| acc.max(pair.reduce(|a, b| (b - a).abs())))
}
pub fn compare_with(&self, sample: &Self, amt: f32) -> bool {
self.moles
.as_slice()
.iter()
.zip_longest(sample.moles.as_slice().iter())
.rev()
.any(|pair| match pair {
Left(a) => a >= &amt,
Right(b) => b >= &amt,
Both(a, b) => a != b && (a - b).abs() >= amt,
})
}
/// Clears the moles from the gas.
pub fn clear(&mut self) {
if !self.immutable {
self.moles.clear();
self.cached_heat_capacity.set(None);
}
}
/// Resets the gas mixture to an initialized-with-volume state.
pub fn clear_with_vol(&mut self, vol: f32) {
self.temperature = 2.7;
self.volume = vol;
self.min_heat_capacity = 0.0;
self.immutable = false;
self.clear();
}
/// Multiplies every gas molage with this value.
pub fn multiply(&mut self, multiplier: f32) {
if !self.immutable {
for amt in self.moles.iter_mut() {
*amt *= multiplier;
}
self.cached_heat_capacity.set(None);
self.garbage_collect();
}
}
/// Checks if the proc can react with any reactions.
pub fn can_react(&self) -> bool {
with_reactions(|reactions| reactions.iter().any(|r| r.check_conditions(self)))
}
/// Gets all of the reactions this mix should do.
pub fn all_reactable(&self) -> Vec<ReactionIdentifier> {
with_reactions(|reactions| {
reactions
.iter()
.filter_map(|r| r.check_conditions(self).then(|| r.get_id()))
.collect()
})
}
/// Returns a tuple with oxidation power and fuel amount of this gas mixture.
pub fn get_burnability(&self) -> (f32, f32) {
use crate::types::FireInfo;
super::with_gas_info(|gas_info| {
self.moles
.iter()
.zip(gas_info)
.fold((0.0, 0.0), |mut acc, (&amt, this_gas_info)| {
if amt > GAS_MIN_MOLES {
match this_gas_info.fire_info {
FireInfo::Oxidation(oxidation) => {
if self.temperature > oxidation.temperature() {
let amount = amt
* (1.0 - oxidation.temperature() / self.temperature)
.max(0.0);
acc.0 += amount * oxidation.power();
}
}
FireInfo::Fuel(fire) => {
if self.temperature > fire.temperature() {
let amount = amt
* (1.0 - fire.temperature() / self.temperature).max(0.0);
acc.1 += amount / fire.burn_rate();
}
}
FireInfo::None => (),
}
}
acc
})
})
}
/// Returns only the oxidation power. Since this calculates burnability anyway, prefer `get_burnability`.
pub fn get_oxidation_power(&self) -> f32 {
self.get_burnability().0
}
/// Returns only fuel amount. Since this calculates burnability anyway, prefer `get_burnability`.
pub fn get_fuel_amount(&self) -> f32 {
self.get_burnability().1
}
/// Like `get_fire_info`, but takes a reference to a gas info vector,
/// so one doesn't need to do a recursive lock on the global list.
pub fn get_fire_info_with_lock(
&self,
gas_info: &[super::GasType],
) -> (Vec<SpecificFireInfo>, Vec<SpecificFireInfo>) {
use crate::types::FireInfo;
self.moles
.iter()
.zip(gas_info)
.enumerate()
.filter_map(|(i, (&amt, this_gas_info))| {
(amt > GAS_MIN_MOLES)
.then(|| match this_gas_info.fire_info {
FireInfo::Oxidation(oxidation) => (self.get_temperature()
> oxidation.temperature())
.then(|| {
let amount = amt
* (1.0 - oxidation.temperature() / self.get_temperature()).max(0.0);
Either::Right((i, amount, amount * oxidation.power()))
}),
FireInfo::Fuel(fuel) => {
(self.get_temperature() > fuel.temperature()).then(|| {
let amount = amt
* (1.0 - fuel.temperature() / self.get_temperature()).max(0.0);
Either::Left((i, amount, amount / fuel.burn_rate()))
})
}
FireInfo::None => None,
})
.flatten()
})
.partition_map(|r| r)
}
/// Returns two vectors:
/// The first contains all oxidizers in this list, as well as their actual mole amounts and how much fuel they can oxidize.
/// The second contains all fuel sources in this list, as well as their actual mole amounts and how much oxidizer they can react with.
pub fn get_fire_info(&self) -> (Vec<SpecificFireInfo>, Vec<SpecificFireInfo>) {
super::with_gas_info(|gas_info| self.get_fire_info_with_lock(gas_info))
}
/// Adds heat directly to the gas mixture, in joules (probably).
pub fn | (&mut self, heat: f32) {
let cap = self.heat_capacity();
self.set_temperature(((cap * self.temperature) + heat) / cap);
}
/// Returns true if there's a visible gas in this mix.
pub fn is_visible(&self) -> bool {
self.enumerate()
.any(|(i, gas)| gas_visibility(i as usize).map_or(false, |amt| gas >= amt))
}
/// A hashed representation of the visibility of a gas, so that it only needs to update vis when actually changed.
pub fn vis_hash_changed(&self, gas_visibility: &[Option<f32>]) -> bool {
use std::hash::Hasher;
let mut hasher: ahash::AHasher = ahash::AHasher::default();
for (i, gas) in self.enumerate() {
if let Some(amt) = unsafe { gas_visibility.get_unchecked(i) }.filter(|&amt| gas >= amt)
{
hasher.write_usize(i);
hasher.write_usize((FACTOR_GAS_VISIBLE_MAX).min((gas / amt).ceil()) as usize);
}
}
let cur_hash = hasher.finish();
self.cached_vis_hash.0.swap(cur_hash, Relaxed) != cur_hash
}
// Removes all redundant zeroes from the gas mixture.
pub fn garbage_collect(&mut self) {
let mut last_valid_found = 0;
for (i, amt) in self.moles.iter_mut().enumerate() {
if *amt > GAS_MIN_MOLES {
last_valid_found = i;
} else {
*amt = 0.0;
}
}
self.moles.truncate(last_valid_found + 1);
}
}
use std::ops::{Add, Mul};
/// Takes a copy of the mix, merges the right hand side, then returns the copy.
impl Add<&Mixture> for Mixture {
type Output = Self;
fn add(self, rhs: &Mixture) -> Self {
let mut ret = self;
ret.merge(rhs);
ret
}
}
/// Takes a copy of the mix, merges the right hand side, then returns the copy.
impl<'a, 'b> Add<&'a Mixture> for &'b Mixture {
type Output = Mixture;
fn add(self, rhs: &Mixture) -> Mixture {
let mut ret = self.clone();
ret.merge(rhs);
ret
}
}
/// Makes a copy of the given mix, multiplied by a scalar.
impl Mul<f32> for Mixture {
type Output = Self;
fn mul(self, rhs: f32) -> Self {
let mut ret = self;
ret.multiply(rhs);
ret
}
}
/// Makes a copy of the given mix, multiplied by a scalar.
impl<'a> Mul<f32> for &'a Mixture {
type Output = Mixture;
fn mul(self, rhs: f32) -> Mixture {
let mut ret = self.clone();
ret.multiply(rhs);
ret
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_merge() {
let mut into = Mixture::new();
into.set_moles(0, 82.0);
into.set_moles(1, 22.0);
into.set_temperature(293.15);
let mut source = Mixture::new();
source.set_moles(3, 100.0);
source.set_temperature(313.15);
into.merge(&source);
// make sure that the merge successfuly moved the moles
assert_eq!(into.get_moles(3), 100.0);
assert_eq!(source.get_moles(3), 100.0); // source is not modified by merge
/*
make sure that the merge successfuly changed the temperature of the mix merged into:
test gases have heat capacities of 2,080 and 20,000 respectively, so total thermal energies of
609,752 and 6,263,000 respectively once multiplied by temperatures. add those together,
then divide by new total heat capacity:
(609,752 + 6,263,000)/(2,080 + 20,000) =
6,872,752 / 2,2080 ~
311.265942
so we compare to see if it's relatively close to 311.266, cause of floating point precision
*/
assert!(
(into.get_temperature() - 311.266).abs() < 0.01,
"{} should be near 311.266, is {}",
into.get_temperature(),
(into.get_temperature() - 311.266)
);
}
#[test]
fn test_remove() {
// also tests multiply, copy_from_mutable
let mut removed = Mixture::new();
removed.set_moles(0, 22.0);
removed.set_moles(1, 82.0);
let new = removed.remove_ratio(0.5);
assert_eq!(removed.compare(&new) >= MINIMUM_MOLES_DELTA_TO_MOVE, false);
assert_eq!(removed.get_moles(0), 11.0);
assert_eq!(removed.get_moles(1), 41.0);
removed.mark_immutable();
let new_two = removed.remove_ratio(0.5);
assert_eq!(
removed.compare(&new_two) >= MINIMUM_MOLES_DELTA_TO_MOVE,
true
);
assert_eq!(removed.get_moles(0), 11.0);
assert_eq!(removed.get_moles(1), 41.0);
assert_eq!(new_two.get_moles(0), 5.5);
}
}
| adjust_heat | identifier_name |
object.rs | //! Represents an object in PHP. Allows for overriding the internal object used
//! by classes, allowing users to store Rust data inside a PHP object.
use std::{convert::TryInto, fmt::Debug, ops::DerefMut};
use crate::{
boxed::{ZBox, ZBoxable},
class::RegisteredClass,
convert::{FromZendObject, FromZval, FromZvalMut, IntoZval},
error::{Error, Result},
ffi::{
ext_php_rs_zend_object_release, zend_call_known_function, zend_object, zend_objects_new,
HashTable, ZEND_ISEMPTY, ZEND_PROPERTY_EXISTS, ZEND_PROPERTY_ISSET,
},
flags::DataType,
rc::PhpRc,
types::{ZendClassObject, ZendStr, Zval},
zend::{ce, ClassEntry, ExecutorGlobals, ZendObjectHandlers},
};
/// A PHP object.
///
/// This type does not maintain any information about its type, for example,
/// classes with have associated Rust structs cannot be accessed through this
/// type. [`ZendClassObject`] is used for this purpose, and you can convert
/// between the two.
pub type ZendObject = zend_object;
impl ZendObject {
/// Creates a new [`ZendObject`], returned inside an [`ZBox<ZendObject>`]
/// wrapper.
///
/// # Parameters
///
/// * `ce` - The type of class the new object should be an instance of.
///
/// # Panics
///
/// Panics when allocating memory for the new object fails.
pub fn new(ce: &ClassEntry) -> ZBox<Self> {
// SAFETY: Using emalloc to allocate memory inside Zend arena. Casting `ce` to
// `*mut` is valid as the function will not mutate `ce`.
unsafe {
let ptr = zend_objects_new(ce as *const _ as *mut _);
ZBox::from_raw(
ptr.as_mut()
.expect("Failed to allocate memory for Zend object"),
)
}
}
/// Creates a new `stdClass` instance, returned inside an
/// [`ZBox<ZendObject>`] wrapper.
///
/// # Panics
///
/// Panics if allocating memory for the object fails, or if the `stdClass`
/// class entry has not been registered with PHP yet.
///
/// # Example
///
/// ```no_run
/// use ext_php_rs::types::ZendObject;
///
/// let mut obj = ZendObject::new_stdclass();
///
/// obj.set_property("hello", "world");
/// ```
pub fn new_stdclass() -> ZBox<Self> {
// SAFETY: This will be `NULL` until it is initialized. `as_ref()` checks for
// null, so we can panic if it's null.
Self::new(ce::stdclass())
}
/// Converts a class object into an owned [`ZendObject`]. This removes any
/// possibility of accessing the underlying attached Rust struct.
pub fn from_class_object<T: RegisteredClass>(obj: ZBox<ZendClassObject<T>>) -> ZBox<Self> {
let this = obj.into_raw();
// SAFETY: Consumed box must produce a well-aligned non-null pointer.
unsafe { ZBox::from_raw(this.get_mut_zend_obj()) }
}
/// Attempts to retrieve the class name of the object.
pub fn get_class_name(&self) -> Result<String> {
unsafe {
self.handlers()?
.get_class_name
.and_then(|f| f(self).as_ref())
.ok_or(Error::InvalidScope)
.and_then(|s| s.try_into())
}
}
/// Checks if the given object is an instance of a registered class with
/// Rust type `T`.
pub fn is_instance<T: RegisteredClass>(&self) -> bool {
(self.ce as *const ClassEntry).eq(&(T::get_metadata().ce() as *const _))
}
/// Attempts to read a property from the Object. Returns a result containing
/// the value of the property if it exists and can be read, and an
/// [`Error`] otherwise.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `query` - The type of query to use when attempting to get a property.
pub fn get_property<'a, T>(&'a self, name: &str) -> Result<T>
where
T: FromZval<'a>,
{
if !self.has_property(name, PropertyQuery::Exists)? {
return Err(Error::InvalidProperty);
}
let mut name = ZendStr::new(name, false)?;
let mut rv = Zval::new();
let zv = unsafe {
self.handlers()?.read_property.ok_or(Error::InvalidScope)?(
self.mut_ptr(),
name.deref_mut(),
1,
std::ptr::null_mut(),
&mut rv,
)
.as_ref()
}
.ok_or(Error::InvalidScope)?;
T::from_zval(zv).ok_or_else(|| Error::ZvalConversion(zv.get_type()))
}
/// Attempts to set a property on the object.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `value` - The value to set the property to.
pub fn set_property(&mut self, name: &str, value: impl IntoZval) -> Result<()> {
let mut name = ZendStr::new(name, false)?;
let mut value = value.into_zval(false)?;
unsafe {
self.handlers()?.write_property.ok_or(Error::InvalidScope)?(
self,
name.deref_mut(),
&mut value,
std::ptr::null_mut(),
)
.as_ref()
}
.ok_or(Error::InvalidScope)?;
Ok(())
}
/// Checks if a property exists on an object. Takes a property name and
/// query parameter, which defines what classifies if a property exists
/// or not. See [`PropertyQuery`] for more information.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `query` - The 'query' to classify if a property exists.
pub fn has_property(&self, name: &str, query: PropertyQuery) -> Result<bool> {
let mut name = ZendStr::new(name, false)?;
Ok(unsafe {
self.handlers()?.has_property.ok_or(Error::InvalidScope)?(
self.mut_ptr(),
name.deref_mut(),
query as _,
std::ptr::null_mut(),
)
} > 0)
}
/// Attempts to retrieve the properties of the object. Returned inside a
/// Zend Hashtable.
pub fn get_properties(&self) -> Result<&HashTable> {
unsafe {
self.handlers()?
.get_properties
.and_then(|props| props(self.mut_ptr()).as_ref())
.ok_or(Error::InvalidScope)
}
}
/// Extracts some type from a Zend object.
///
/// This is a wrapper function around `FromZendObject::extract()`.
pub fn extract<'a, T>(&'a self) -> Result<T>
where
T: FromZendObject<'a>,
{
T::from_zend_object(self)
}
/// Attempts to retrieve a reference to the object handlers.
#[inline]
unsafe fn handlers(&self) -> Result<&ZendObjectHandlers> {
self.handlers.as_ref().ok_or(Error::InvalidScope)
}
/// Returns a mutable pointer to `self`, regardless of the type of
/// reference. Only to be used in situations where a C function requires
/// a mutable pointer but does not modify the underlying data.
#[inline]
fn mut_ptr(&self) -> *mut Self {
(self as *const Self) as *mut Self
}
}
unsafe impl ZBoxable for ZendObject {
fn free(&mut self) {
unsafe { ext_php_rs_zend_object_release(self) }
}
}
impl Debug for ZendObject {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut dbg = f.debug_struct(
self.get_class_name()
.unwrap_or_else(|_| "ZendObject".to_string())
.as_str(),
);
if let Ok(props) = self.get_properties() {
for (id, key, val) in props.iter() {
dbg.field(key.unwrap_or_else(|| id.to_string()).as_str(), val);
}
}
dbg.finish()
}
}
impl<'a> FromZval<'a> for &'a ZendObject {
const TYPE: DataType = DataType::Object(None);
fn | (zval: &'a Zval) -> Option<Self> {
zval.object()
}
}
impl<'a> FromZvalMut<'a> for &'a mut ZendObject {
const TYPE: DataType = DataType::Object(None);
fn from_zval_mut(zval: &'a mut Zval) -> Option<Self> {
zval.object_mut()
}
}
impl IntoZval for ZBox<ZendObject> {
const TYPE: DataType = DataType::Object(None);
#[inline]
fn set_zval(mut self, zv: &mut Zval, _: bool) -> Result<()> {
// We must decrement the refcounter on the object before inserting into the
// zval, as the reference counter will be incremented on add.
// NOTE(david): again is this needed, we increment in `set_object`.
self.dec_count();
zv.set_object(self.into_raw());
Ok(())
}
}
impl<'a> IntoZval for &'a mut ZendObject {
const TYPE: DataType = DataType::Object(None);
#[inline]
fn set_zval(self, zv: &mut Zval, _: bool) -> Result<()> {
zv.set_object(self);
Ok(())
}
}
impl FromZendObject<'_> for String {
fn from_zend_object(obj: &ZendObject) -> Result<Self> {
let mut ret = Zval::new();
unsafe {
zend_call_known_function(
(*obj.ce).__tostring,
obj as *const _ as *mut _,
obj.ce,
&mut ret,
0,
std::ptr::null_mut(),
std::ptr::null_mut(),
);
}
if let Some(err) = ExecutorGlobals::take_exception() {
// TODO: become an error
let class_name = obj.get_class_name();
panic!(
"Uncaught exception during call to {}::__toString(): {:?}",
class_name.expect("unable to determine class name"),
err
);
} else if let Some(output) = ret.extract() {
Ok(output)
} else {
// TODO: become an error
let class_name = obj.get_class_name();
panic!(
"{}::__toString() must return a string",
class_name.expect("unable to determine class name"),
);
}
}
}
impl<T: RegisteredClass> From<ZBox<ZendClassObject<T>>> for ZBox<ZendObject> {
#[inline]
fn from(obj: ZBox<ZendClassObject<T>>) -> Self {
ZendObject::from_class_object(obj)
}
}
/// Different ways to query if a property exists.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(u32)]
pub enum PropertyQuery {
/// Property exists and is not NULL.
Isset = ZEND_PROPERTY_ISSET,
/// Property is not empty.
NotEmpty = ZEND_ISEMPTY,
/// Property exists.
Exists = ZEND_PROPERTY_EXISTS,
}
| from_zval | identifier_name |
object.rs | //! Represents an object in PHP. Allows for overriding the internal object used
//! by classes, allowing users to store Rust data inside a PHP object.
use std::{convert::TryInto, fmt::Debug, ops::DerefMut};
use crate::{
boxed::{ZBox, ZBoxable},
class::RegisteredClass,
convert::{FromZendObject, FromZval, FromZvalMut, IntoZval},
error::{Error, Result},
ffi::{
ext_php_rs_zend_object_release, zend_call_known_function, zend_object, zend_objects_new,
HashTable, ZEND_ISEMPTY, ZEND_PROPERTY_EXISTS, ZEND_PROPERTY_ISSET,
},
flags::DataType,
rc::PhpRc,
types::{ZendClassObject, ZendStr, Zval},
zend::{ce, ClassEntry, ExecutorGlobals, ZendObjectHandlers},
};
/// A PHP object.
///
/// This type does not maintain any information about its type, for example,
/// classes with have associated Rust structs cannot be accessed through this
/// type. [`ZendClassObject`] is used for this purpose, and you can convert
/// between the two.
pub type ZendObject = zend_object;
impl ZendObject {
/// Creates a new [`ZendObject`], returned inside an [`ZBox<ZendObject>`]
/// wrapper.
///
/// # Parameters
///
/// * `ce` - The type of class the new object should be an instance of.
///
/// # Panics
///
/// Panics when allocating memory for the new object fails.
pub fn new(ce: &ClassEntry) -> ZBox<Self> {
// SAFETY: Using emalloc to allocate memory inside Zend arena. Casting `ce` to
// `*mut` is valid as the function will not mutate `ce`.
unsafe {
let ptr = zend_objects_new(ce as *const _ as *mut _);
ZBox::from_raw(
ptr.as_mut()
.expect("Failed to allocate memory for Zend object"),
)
}
}
/// Creates a new `stdClass` instance, returned inside an
/// [`ZBox<ZendObject>`] wrapper.
///
/// # Panics
///
/// Panics if allocating memory for the object fails, or if the `stdClass`
/// class entry has not been registered with PHP yet.
///
/// # Example
///
/// ```no_run
/// use ext_php_rs::types::ZendObject;
///
/// let mut obj = ZendObject::new_stdclass();
///
/// obj.set_property("hello", "world");
/// ```
pub fn new_stdclass() -> ZBox<Self> {
// SAFETY: This will be `NULL` until it is initialized. `as_ref()` checks for
// null, so we can panic if it's null.
Self::new(ce::stdclass())
}
/// Converts a class object into an owned [`ZendObject`]. This removes any
/// possibility of accessing the underlying attached Rust struct.
pub fn from_class_object<T: RegisteredClass>(obj: ZBox<ZendClassObject<T>>) -> ZBox<Self> {
let this = obj.into_raw();
// SAFETY: Consumed box must produce a well-aligned non-null pointer.
unsafe { ZBox::from_raw(this.get_mut_zend_obj()) }
}
/// Attempts to retrieve the class name of the object.
pub fn get_class_name(&self) -> Result<String> {
unsafe {
self.handlers()?
.get_class_name
.and_then(|f| f(self).as_ref())
.ok_or(Error::InvalidScope)
.and_then(|s| s.try_into())
}
}
/// Checks if the given object is an instance of a registered class with
/// Rust type `T`.
pub fn is_instance<T: RegisteredClass>(&self) -> bool {
(self.ce as *const ClassEntry).eq(&(T::get_metadata().ce() as *const _))
}
/// Attempts to read a property from the Object. Returns a result containing
/// the value of the property if it exists and can be read, and an
/// [`Error`] otherwise.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `query` - The type of query to use when attempting to get a property.
pub fn get_property<'a, T>(&'a self, name: &str) -> Result<T>
where
T: FromZval<'a>,
{
if !self.has_property(name, PropertyQuery::Exists)? {
return Err(Error::InvalidProperty);
}
let mut name = ZendStr::new(name, false)?;
let mut rv = Zval::new();
let zv = unsafe {
self.handlers()?.read_property.ok_or(Error::InvalidScope)?(
self.mut_ptr(),
name.deref_mut(),
1,
std::ptr::null_mut(), | .as_ref()
}
.ok_or(Error::InvalidScope)?;
T::from_zval(zv).ok_or_else(|| Error::ZvalConversion(zv.get_type()))
}
/// Attempts to set a property on the object.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `value` - The value to set the property to.
pub fn set_property(&mut self, name: &str, value: impl IntoZval) -> Result<()> {
let mut name = ZendStr::new(name, false)?;
let mut value = value.into_zval(false)?;
unsafe {
self.handlers()?.write_property.ok_or(Error::InvalidScope)?(
self,
name.deref_mut(),
&mut value,
std::ptr::null_mut(),
)
.as_ref()
}
.ok_or(Error::InvalidScope)?;
Ok(())
}
/// Checks if a property exists on an object. Takes a property name and
/// query parameter, which defines what classifies if a property exists
/// or not. See [`PropertyQuery`] for more information.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `query` - The 'query' to classify if a property exists.
pub fn has_property(&self, name: &str, query: PropertyQuery) -> Result<bool> {
let mut name = ZendStr::new(name, false)?;
Ok(unsafe {
self.handlers()?.has_property.ok_or(Error::InvalidScope)?(
self.mut_ptr(),
name.deref_mut(),
query as _,
std::ptr::null_mut(),
)
} > 0)
}
/// Attempts to retrieve the properties of the object. Returned inside a
/// Zend Hashtable.
pub fn get_properties(&self) -> Result<&HashTable> {
unsafe {
self.handlers()?
.get_properties
.and_then(|props| props(self.mut_ptr()).as_ref())
.ok_or(Error::InvalidScope)
}
}
/// Extracts some type from a Zend object.
///
/// This is a wrapper function around `FromZendObject::extract()`.
pub fn extract<'a, T>(&'a self) -> Result<T>
where
T: FromZendObject<'a>,
{
T::from_zend_object(self)
}
/// Attempts to retrieve a reference to the object handlers.
#[inline]
unsafe fn handlers(&self) -> Result<&ZendObjectHandlers> {
self.handlers.as_ref().ok_or(Error::InvalidScope)
}
/// Returns a mutable pointer to `self`, regardless of the type of
/// reference. Only to be used in situations where a C function requires
/// a mutable pointer but does not modify the underlying data.
#[inline]
fn mut_ptr(&self) -> *mut Self {
(self as *const Self) as *mut Self
}
}
unsafe impl ZBoxable for ZendObject {
fn free(&mut self) {
unsafe { ext_php_rs_zend_object_release(self) }
}
}
impl Debug for ZendObject {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut dbg = f.debug_struct(
self.get_class_name()
.unwrap_or_else(|_| "ZendObject".to_string())
.as_str(),
);
if let Ok(props) = self.get_properties() {
for (id, key, val) in props.iter() {
dbg.field(key.unwrap_or_else(|| id.to_string()).as_str(), val);
}
}
dbg.finish()
}
}
impl<'a> FromZval<'a> for &'a ZendObject {
const TYPE: DataType = DataType::Object(None);
fn from_zval(zval: &'a Zval) -> Option<Self> {
zval.object()
}
}
impl<'a> FromZvalMut<'a> for &'a mut ZendObject {
const TYPE: DataType = DataType::Object(None);
fn from_zval_mut(zval: &'a mut Zval) -> Option<Self> {
zval.object_mut()
}
}
impl IntoZval for ZBox<ZendObject> {
const TYPE: DataType = DataType::Object(None);
#[inline]
fn set_zval(mut self, zv: &mut Zval, _: bool) -> Result<()> {
// We must decrement the refcounter on the object before inserting into the
// zval, as the reference counter will be incremented on add.
// NOTE(david): again is this needed, we increment in `set_object`.
self.dec_count();
zv.set_object(self.into_raw());
Ok(())
}
}
impl<'a> IntoZval for &'a mut ZendObject {
const TYPE: DataType = DataType::Object(None);
#[inline]
fn set_zval(self, zv: &mut Zval, _: bool) -> Result<()> {
zv.set_object(self);
Ok(())
}
}
impl FromZendObject<'_> for String {
fn from_zend_object(obj: &ZendObject) -> Result<Self> {
let mut ret = Zval::new();
unsafe {
zend_call_known_function(
(*obj.ce).__tostring,
obj as *const _ as *mut _,
obj.ce,
&mut ret,
0,
std::ptr::null_mut(),
std::ptr::null_mut(),
);
}
if let Some(err) = ExecutorGlobals::take_exception() {
// TODO: become an error
let class_name = obj.get_class_name();
panic!(
"Uncaught exception during call to {}::__toString(): {:?}",
class_name.expect("unable to determine class name"),
err
);
} else if let Some(output) = ret.extract() {
Ok(output)
} else {
// TODO: become an error
let class_name = obj.get_class_name();
panic!(
"{}::__toString() must return a string",
class_name.expect("unable to determine class name"),
);
}
}
}
impl<T: RegisteredClass> From<ZBox<ZendClassObject<T>>> for ZBox<ZendObject> {
#[inline]
fn from(obj: ZBox<ZendClassObject<T>>) -> Self {
ZendObject::from_class_object(obj)
}
}
/// Different ways to query if a property exists.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(u32)]
pub enum PropertyQuery {
/// Property exists and is not NULL.
Isset = ZEND_PROPERTY_ISSET,
/// Property is not empty.
NotEmpty = ZEND_ISEMPTY,
/// Property exists.
Exists = ZEND_PROPERTY_EXISTS,
} | &mut rv,
) | random_line_split |
object.rs | //! Represents an object in PHP. Allows for overriding the internal object used
//! by classes, allowing users to store Rust data inside a PHP object.
use std::{convert::TryInto, fmt::Debug, ops::DerefMut};
use crate::{
boxed::{ZBox, ZBoxable},
class::RegisteredClass,
convert::{FromZendObject, FromZval, FromZvalMut, IntoZval},
error::{Error, Result},
ffi::{
ext_php_rs_zend_object_release, zend_call_known_function, zend_object, zend_objects_new,
HashTable, ZEND_ISEMPTY, ZEND_PROPERTY_EXISTS, ZEND_PROPERTY_ISSET,
},
flags::DataType,
rc::PhpRc,
types::{ZendClassObject, ZendStr, Zval},
zend::{ce, ClassEntry, ExecutorGlobals, ZendObjectHandlers},
};
/// A PHP object.
///
/// This type does not maintain any information about its type, for example,
/// classes with have associated Rust structs cannot be accessed through this
/// type. [`ZendClassObject`] is used for this purpose, and you can convert
/// between the two.
pub type ZendObject = zend_object;
impl ZendObject {
/// Creates a new [`ZendObject`], returned inside an [`ZBox<ZendObject>`]
/// wrapper.
///
/// # Parameters
///
/// * `ce` - The type of class the new object should be an instance of.
///
/// # Panics
///
/// Panics when allocating memory for the new object fails.
pub fn new(ce: &ClassEntry) -> ZBox<Self> {
// SAFETY: Using emalloc to allocate memory inside Zend arena. Casting `ce` to
// `*mut` is valid as the function will not mutate `ce`.
unsafe {
let ptr = zend_objects_new(ce as *const _ as *mut _);
ZBox::from_raw(
ptr.as_mut()
.expect("Failed to allocate memory for Zend object"),
)
}
}
/// Creates a new `stdClass` instance, returned inside an
/// [`ZBox<ZendObject>`] wrapper.
///
/// # Panics
///
/// Panics if allocating memory for the object fails, or if the `stdClass`
/// class entry has not been registered with PHP yet.
///
/// # Example
///
/// ```no_run
/// use ext_php_rs::types::ZendObject;
///
/// let mut obj = ZendObject::new_stdclass();
///
/// obj.set_property("hello", "world");
/// ```
pub fn new_stdclass() -> ZBox<Self> {
// SAFETY: This will be `NULL` until it is initialized. `as_ref()` checks for
// null, so we can panic if it's null.
Self::new(ce::stdclass())
}
/// Converts a class object into an owned [`ZendObject`]. This removes any
/// possibility of accessing the underlying attached Rust struct.
pub fn from_class_object<T: RegisteredClass>(obj: ZBox<ZendClassObject<T>>) -> ZBox<Self> {
let this = obj.into_raw();
// SAFETY: Consumed box must produce a well-aligned non-null pointer.
unsafe { ZBox::from_raw(this.get_mut_zend_obj()) }
}
/// Attempts to retrieve the class name of the object.
pub fn get_class_name(&self) -> Result<String> {
unsafe {
self.handlers()?
.get_class_name
.and_then(|f| f(self).as_ref())
.ok_or(Error::InvalidScope)
.and_then(|s| s.try_into())
}
}
/// Checks if the given object is an instance of a registered class with
/// Rust type `T`.
pub fn is_instance<T: RegisteredClass>(&self) -> bool {
(self.ce as *const ClassEntry).eq(&(T::get_metadata().ce() as *const _))
}
/// Attempts to read a property from the Object. Returns a result containing
/// the value of the property if it exists and can be read, and an
/// [`Error`] otherwise.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `query` - The type of query to use when attempting to get a property.
pub fn get_property<'a, T>(&'a self, name: &str) -> Result<T>
where
T: FromZval<'a>,
{
if !self.has_property(name, PropertyQuery::Exists)? {
return Err(Error::InvalidProperty);
}
let mut name = ZendStr::new(name, false)?;
let mut rv = Zval::new();
let zv = unsafe {
self.handlers()?.read_property.ok_or(Error::InvalidScope)?(
self.mut_ptr(),
name.deref_mut(),
1,
std::ptr::null_mut(),
&mut rv,
)
.as_ref()
}
.ok_or(Error::InvalidScope)?;
T::from_zval(zv).ok_or_else(|| Error::ZvalConversion(zv.get_type()))
}
/// Attempts to set a property on the object.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `value` - The value to set the property to.
pub fn set_property(&mut self, name: &str, value: impl IntoZval) -> Result<()> {
let mut name = ZendStr::new(name, false)?;
let mut value = value.into_zval(false)?;
unsafe {
self.handlers()?.write_property.ok_or(Error::InvalidScope)?(
self,
name.deref_mut(),
&mut value,
std::ptr::null_mut(),
)
.as_ref()
}
.ok_or(Error::InvalidScope)?;
Ok(())
}
/// Checks if a property exists on an object. Takes a property name and
/// query parameter, which defines what classifies if a property exists
/// or not. See [`PropertyQuery`] for more information.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `query` - The 'query' to classify if a property exists.
pub fn has_property(&self, name: &str, query: PropertyQuery) -> Result<bool> {
let mut name = ZendStr::new(name, false)?;
Ok(unsafe {
self.handlers()?.has_property.ok_or(Error::InvalidScope)?(
self.mut_ptr(),
name.deref_mut(),
query as _,
std::ptr::null_mut(),
)
} > 0)
}
/// Attempts to retrieve the properties of the object. Returned inside a
/// Zend Hashtable.
pub fn get_properties(&self) -> Result<&HashTable> {
unsafe {
self.handlers()?
.get_properties
.and_then(|props| props(self.mut_ptr()).as_ref())
.ok_or(Error::InvalidScope)
}
}
/// Extracts some type from a Zend object.
///
/// This is a wrapper function around `FromZendObject::extract()`.
pub fn extract<'a, T>(&'a self) -> Result<T>
where
T: FromZendObject<'a>,
{
T::from_zend_object(self)
}
/// Attempts to retrieve a reference to the object handlers.
#[inline]
unsafe fn handlers(&self) -> Result<&ZendObjectHandlers> {
self.handlers.as_ref().ok_or(Error::InvalidScope)
}
/// Returns a mutable pointer to `self`, regardless of the type of
/// reference. Only to be used in situations where a C function requires
/// a mutable pointer but does not modify the underlying data.
#[inline]
fn mut_ptr(&self) -> *mut Self {
(self as *const Self) as *mut Self
}
}
unsafe impl ZBoxable for ZendObject {
fn free(&mut self) {
unsafe { ext_php_rs_zend_object_release(self) }
}
}
impl Debug for ZendObject {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut dbg = f.debug_struct(
self.get_class_name()
.unwrap_or_else(|_| "ZendObject".to_string())
.as_str(),
);
if let Ok(props) = self.get_properties() {
for (id, key, val) in props.iter() {
dbg.field(key.unwrap_or_else(|| id.to_string()).as_str(), val);
}
}
dbg.finish()
}
}
impl<'a> FromZval<'a> for &'a ZendObject {
const TYPE: DataType = DataType::Object(None);
fn from_zval(zval: &'a Zval) -> Option<Self> {
zval.object()
}
}
impl<'a> FromZvalMut<'a> for &'a mut ZendObject {
const TYPE: DataType = DataType::Object(None);
fn from_zval_mut(zval: &'a mut Zval) -> Option<Self> {
zval.object_mut()
}
}
impl IntoZval for ZBox<ZendObject> {
const TYPE: DataType = DataType::Object(None);
#[inline]
fn set_zval(mut self, zv: &mut Zval, _: bool) -> Result<()> {
// We must decrement the refcounter on the object before inserting into the
// zval, as the reference counter will be incremented on add.
// NOTE(david): again is this needed, we increment in `set_object`.
self.dec_count();
zv.set_object(self.into_raw());
Ok(())
}
}
impl<'a> IntoZval for &'a mut ZendObject {
const TYPE: DataType = DataType::Object(None);
#[inline]
fn set_zval(self, zv: &mut Zval, _: bool) -> Result<()> {
zv.set_object(self);
Ok(())
}
}
impl FromZendObject<'_> for String {
fn from_zend_object(obj: &ZendObject) -> Result<Self> {
let mut ret = Zval::new();
unsafe {
zend_call_known_function(
(*obj.ce).__tostring,
obj as *const _ as *mut _,
obj.ce,
&mut ret,
0,
std::ptr::null_mut(),
std::ptr::null_mut(),
);
}
if let Some(err) = ExecutorGlobals::take_exception() {
// TODO: become an error
let class_name = obj.get_class_name();
panic!(
"Uncaught exception during call to {}::__toString(): {:?}",
class_name.expect("unable to determine class name"),
err
);
} else if let Some(output) = ret.extract() {
Ok(output)
} else |
}
}
impl<T: RegisteredClass> From<ZBox<ZendClassObject<T>>> for ZBox<ZendObject> {
#[inline]
fn from(obj: ZBox<ZendClassObject<T>>) -> Self {
ZendObject::from_class_object(obj)
}
}
/// Different ways to query if a property exists.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(u32)]
pub enum PropertyQuery {
/// Property exists and is not NULL.
Isset = ZEND_PROPERTY_ISSET,
/// Property is not empty.
NotEmpty = ZEND_ISEMPTY,
/// Property exists.
Exists = ZEND_PROPERTY_EXISTS,
}
| {
// TODO: become an error
let class_name = obj.get_class_name();
panic!(
"{}::__toString() must return a string",
class_name.expect("unable to determine class name"),
);
} | conditional_block |
object.rs | //! Represents an object in PHP. Allows for overriding the internal object used
//! by classes, allowing users to store Rust data inside a PHP object.
use std::{convert::TryInto, fmt::Debug, ops::DerefMut};
use crate::{
boxed::{ZBox, ZBoxable},
class::RegisteredClass,
convert::{FromZendObject, FromZval, FromZvalMut, IntoZval},
error::{Error, Result},
ffi::{
ext_php_rs_zend_object_release, zend_call_known_function, zend_object, zend_objects_new,
HashTable, ZEND_ISEMPTY, ZEND_PROPERTY_EXISTS, ZEND_PROPERTY_ISSET,
},
flags::DataType,
rc::PhpRc,
types::{ZendClassObject, ZendStr, Zval},
zend::{ce, ClassEntry, ExecutorGlobals, ZendObjectHandlers},
};
/// A PHP object.
///
/// This type does not maintain any information about its type, for example,
/// classes with have associated Rust structs cannot be accessed through this
/// type. [`ZendClassObject`] is used for this purpose, and you can convert
/// between the two.
pub type ZendObject = zend_object;
impl ZendObject {
/// Creates a new [`ZendObject`], returned inside an [`ZBox<ZendObject>`]
/// wrapper.
///
/// # Parameters
///
/// * `ce` - The type of class the new object should be an instance of.
///
/// # Panics
///
/// Panics when allocating memory for the new object fails.
pub fn new(ce: &ClassEntry) -> ZBox<Self> {
// SAFETY: Using emalloc to allocate memory inside Zend arena. Casting `ce` to
// `*mut` is valid as the function will not mutate `ce`.
unsafe {
let ptr = zend_objects_new(ce as *const _ as *mut _);
ZBox::from_raw(
ptr.as_mut()
.expect("Failed to allocate memory for Zend object"),
)
}
}
/// Creates a new `stdClass` instance, returned inside an
/// [`ZBox<ZendObject>`] wrapper.
///
/// # Panics
///
/// Panics if allocating memory for the object fails, or if the `stdClass`
/// class entry has not been registered with PHP yet.
///
/// # Example
///
/// ```no_run
/// use ext_php_rs::types::ZendObject;
///
/// let mut obj = ZendObject::new_stdclass();
///
/// obj.set_property("hello", "world");
/// ```
pub fn new_stdclass() -> ZBox<Self> {
// SAFETY: This will be `NULL` until it is initialized. `as_ref()` checks for
// null, so we can panic if it's null.
Self::new(ce::stdclass())
}
/// Converts a class object into an owned [`ZendObject`]. This removes any
/// possibility of accessing the underlying attached Rust struct.
pub fn from_class_object<T: RegisteredClass>(obj: ZBox<ZendClassObject<T>>) -> ZBox<Self> {
let this = obj.into_raw();
// SAFETY: Consumed box must produce a well-aligned non-null pointer.
unsafe { ZBox::from_raw(this.get_mut_zend_obj()) }
}
/// Attempts to retrieve the class name of the object.
pub fn get_class_name(&self) -> Result<String> {
unsafe {
self.handlers()?
.get_class_name
.and_then(|f| f(self).as_ref())
.ok_or(Error::InvalidScope)
.and_then(|s| s.try_into())
}
}
/// Checks if the given object is an instance of a registered class with
/// Rust type `T`.
pub fn is_instance<T: RegisteredClass>(&self) -> bool {
(self.ce as *const ClassEntry).eq(&(T::get_metadata().ce() as *const _))
}
/// Attempts to read a property from the Object. Returns a result containing
/// the value of the property if it exists and can be read, and an
/// [`Error`] otherwise.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `query` - The type of query to use when attempting to get a property.
pub fn get_property<'a, T>(&'a self, name: &str) -> Result<T>
where
T: FromZval<'a>,
{
if !self.has_property(name, PropertyQuery::Exists)? {
return Err(Error::InvalidProperty);
}
let mut name = ZendStr::new(name, false)?;
let mut rv = Zval::new();
let zv = unsafe {
self.handlers()?.read_property.ok_or(Error::InvalidScope)?(
self.mut_ptr(),
name.deref_mut(),
1,
std::ptr::null_mut(),
&mut rv,
)
.as_ref()
}
.ok_or(Error::InvalidScope)?;
T::from_zval(zv).ok_or_else(|| Error::ZvalConversion(zv.get_type()))
}
/// Attempts to set a property on the object.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `value` - The value to set the property to.
pub fn set_property(&mut self, name: &str, value: impl IntoZval) -> Result<()> {
let mut name = ZendStr::new(name, false)?;
let mut value = value.into_zval(false)?;
unsafe {
self.handlers()?.write_property.ok_or(Error::InvalidScope)?(
self,
name.deref_mut(),
&mut value,
std::ptr::null_mut(),
)
.as_ref()
}
.ok_or(Error::InvalidScope)?;
Ok(())
}
/// Checks if a property exists on an object. Takes a property name and
/// query parameter, which defines what classifies if a property exists
/// or not. See [`PropertyQuery`] for more information.
///
/// # Parameters
///
/// * `name` - The name of the property.
/// * `query` - The 'query' to classify if a property exists.
pub fn has_property(&self, name: &str, query: PropertyQuery) -> Result<bool> {
let mut name = ZendStr::new(name, false)?;
Ok(unsafe {
self.handlers()?.has_property.ok_or(Error::InvalidScope)?(
self.mut_ptr(),
name.deref_mut(),
query as _,
std::ptr::null_mut(),
)
} > 0)
}
/// Attempts to retrieve the properties of the object. Returned inside a
/// Zend Hashtable.
pub fn get_properties(&self) -> Result<&HashTable> {
unsafe {
self.handlers()?
.get_properties
.and_then(|props| props(self.mut_ptr()).as_ref())
.ok_or(Error::InvalidScope)
}
}
/// Extracts some type from a Zend object.
///
/// This is a wrapper function around `FromZendObject::extract()`.
pub fn extract<'a, T>(&'a self) -> Result<T>
where
T: FromZendObject<'a>,
{
T::from_zend_object(self)
}
/// Attempts to retrieve a reference to the object handlers.
#[inline]
unsafe fn handlers(&self) -> Result<&ZendObjectHandlers> {
self.handlers.as_ref().ok_or(Error::InvalidScope)
}
/// Returns a mutable pointer to `self`, regardless of the type of
/// reference. Only to be used in situations where a C function requires
/// a mutable pointer but does not modify the underlying data.
#[inline]
fn mut_ptr(&self) -> *mut Self {
(self as *const Self) as *mut Self
}
}
unsafe impl ZBoxable for ZendObject {
fn free(&mut self) {
unsafe { ext_php_rs_zend_object_release(self) }
}
}
impl Debug for ZendObject {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut dbg = f.debug_struct(
self.get_class_name()
.unwrap_or_else(|_| "ZendObject".to_string())
.as_str(),
);
if let Ok(props) = self.get_properties() {
for (id, key, val) in props.iter() {
dbg.field(key.unwrap_or_else(|| id.to_string()).as_str(), val);
}
}
dbg.finish()
}
}
impl<'a> FromZval<'a> for &'a ZendObject {
const TYPE: DataType = DataType::Object(None);
fn from_zval(zval: &'a Zval) -> Option<Self> {
zval.object()
}
}
impl<'a> FromZvalMut<'a> for &'a mut ZendObject {
const TYPE: DataType = DataType::Object(None);
fn from_zval_mut(zval: &'a mut Zval) -> Option<Self> {
zval.object_mut()
}
}
impl IntoZval for ZBox<ZendObject> {
const TYPE: DataType = DataType::Object(None);
#[inline]
fn set_zval(mut self, zv: &mut Zval, _: bool) -> Result<()> {
// We must decrement the refcounter on the object before inserting into the
// zval, as the reference counter will be incremented on add.
// NOTE(david): again is this needed, we increment in `set_object`.
self.dec_count();
zv.set_object(self.into_raw());
Ok(())
}
}
impl<'a> IntoZval for &'a mut ZendObject {
const TYPE: DataType = DataType::Object(None);
#[inline]
fn set_zval(self, zv: &mut Zval, _: bool) -> Result<()> |
}
impl FromZendObject<'_> for String {
fn from_zend_object(obj: &ZendObject) -> Result<Self> {
let mut ret = Zval::new();
unsafe {
zend_call_known_function(
(*obj.ce).__tostring,
obj as *const _ as *mut _,
obj.ce,
&mut ret,
0,
std::ptr::null_mut(),
std::ptr::null_mut(),
);
}
if let Some(err) = ExecutorGlobals::take_exception() {
// TODO: become an error
let class_name = obj.get_class_name();
panic!(
"Uncaught exception during call to {}::__toString(): {:?}",
class_name.expect("unable to determine class name"),
err
);
} else if let Some(output) = ret.extract() {
Ok(output)
} else {
// TODO: become an error
let class_name = obj.get_class_name();
panic!(
"{}::__toString() must return a string",
class_name.expect("unable to determine class name"),
);
}
}
}
impl<T: RegisteredClass> From<ZBox<ZendClassObject<T>>> for ZBox<ZendObject> {
#[inline]
fn from(obj: ZBox<ZendClassObject<T>>) -> Self {
ZendObject::from_class_object(obj)
}
}
/// Different ways to query if a property exists.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(u32)]
pub enum PropertyQuery {
/// Property exists and is not NULL.
Isset = ZEND_PROPERTY_ISSET,
/// Property is not empty.
NotEmpty = ZEND_ISEMPTY,
/// Property exists.
Exists = ZEND_PROPERTY_EXISTS,
}
| {
zv.set_object(self);
Ok(())
} | identifier_body |
provider.go | package provider
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/hashicorp/go-azure-helpers/authentication"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/internal/resourceproviders"
"github.com/hashicorp/terraform-provider-azurerm/internal/sdk"
"github.com/hashicorp/terraform-provider-azurerm/utils"
)
func AzureProvider() *schema.Provider {
return azureProvider(false)
}
func TestAzureProvider() *schema.Provider {
return azureProvider(true)
}
func ValidatePartnerID(i interface{}, k string) ([]string, []error) {
// ValidatePartnerID checks if partner_id is any of the following:
// * a valid UUID - will add "pid-" prefix to the ID if it is not already present
// * a valid UUID prefixed with "pid-"
// * a valid UUID prefixed with "pid-" and suffixed with "-partnercenter"
debugLog := func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
v, ok := i.(string)
if !ok {
return nil, []error{fmt.Errorf("expected type of %q to be string", k)}
}
if v == "" {
return nil, nil
}
// Check for pid=<guid>-partnercenter format
if strings.HasPrefix(v, "pid-") && strings.HasSuffix(v, "-partnercenter") {
g := strings.TrimPrefix(v, "pid-")
g = strings.TrimSuffix(g, "-partnercenter")
if _, err := validation.IsUUID(g, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to contain a valid UUID", v)}
}
debugLog("[DEBUG] %q partner_id matches pid-<GUID>-partnercenter...", v)
return nil, nil
}
// Check for pid=<guid> (without the -partnercenter suffix)
if strings.HasPrefix(v, "pid-") && !strings.HasSuffix(v, "-partnercenter") {
g := strings.TrimPrefix(v, "pid-")
if _, err := validation.IsUUID(g, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to be a valid UUID", k)}
}
debugLog("[DEBUG] %q partner_id matches pid-<GUID>...", v)
return nil, nil
}
// Check for straight UUID
if _, err := validation.IsUUID(v, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to be a valid UUID", k)}
} else {
debugLog("[DEBUG] %q partner_id is an un-prefixed UUID...", v)
return nil, nil
}
}
func azureProvider(supportLegacyTestSuite bool) *schema.Provider {
// avoids this showing up in test output
debugLog := func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
dataSources := make(map[string]*schema.Resource)
resources := make(map[string]*schema.Resource)
// first handle the typed services
for _, service := range SupportedTypedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for _, ds := range service.DataSources() {
key := ds.ResourceType()
if existing := dataSources[key]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", key))
}
wrapper := sdk.NewDataSourceWrapper(ds)
dataSource, err := wrapper.DataSource()
if err != nil {
panic(fmt.Errorf("creating Wrapper for Data Source %q: %+v", key, err))
}
dataSources[key] = dataSource
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for _, r := range service.Resources() {
key := r.ResourceType()
if existing := resources[key]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", key))
}
wrapper := sdk.NewResourceWrapper(r)
resource, err := wrapper.Resource()
if err != nil |
resources[key] = resource
}
}
// then handle the untyped services
for _, service := range SupportedUntypedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for k, v := range service.SupportedDataSources() {
if existing := dataSources[k]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", k))
}
dataSources[k] = v
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for k, v := range service.SupportedResources() {
if existing := resources[k]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", k))
}
resources[k] = v
}
}
p := &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
Description: "The Subscription ID which should be used.",
},
"client_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
Description: "The Client ID which should be used.",
},
"tenant_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
Description: "The Tenant ID which should be used.",
},
"auxiliary_tenant_ids": {
Type: schema.TypeList,
Optional: true,
MaxItems: 3,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
Description: "The Cloud Environment which should be used. Possible values are public, usgovernment, and china. Defaults to public.",
},
"metadata_host": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_METADATA_HOSTNAME", ""),
Description: "The Hostname which should be used for the Azure Metadata Service.",
},
// Client Certificate specific fields
"client_certificate_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""),
Description: "The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service Principal using a Client Certificate.",
},
"client_certificate_password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""),
Description: "The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client Certificate",
},
// Client Secret specific fields
"client_secret": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
Description: "The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.",
},
// OIDC specifc fields
"oidc_request_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_TOKEN", "ACTIONS_ID_TOKEN_REQUEST_TOKEN"}, ""),
Description: "The bearer token for the request to the OIDC provider. For use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_request_url": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_URL", "ACTIONS_ID_TOKEN_REQUEST_URL"}, ""),
Description: "The URL for the OIDC provider from which to request an ID token. For use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN", ""),
Description: "The OIDC ID token for use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_token_file_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN_FILE_PATH", ""),
Description: "The path to a file containing an OIDC ID token for use when authenticating as a Service Principal using OpenID Connect.",
},
"use_oidc": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_OIDC", false),
Description: "Allow OpenID Connect to be used for authentication",
},
// Managed Service Identity specific fields
"use_msi": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false),
Description: "Allowed Managed Service Identity be used for Authentication.",
},
"msi_endpoint": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""),
Description: "The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected automatically. ",
},
// Managed Tracking GUID for User-agent
"partner_id": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.Any(ValidatePartnerID, validation.StringIsEmpty),
DefaultFunc: schema.EnvDefaultFunc("ARM_PARTNER_ID", ""),
Description: "A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.",
},
"disable_correlation_request_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_CORRELATION_REQUEST_ID", false),
Description: "This will disable the x-ms-correlation-request-id header.",
},
"disable_terraform_partner_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_TERRAFORM_PARTNER_ID", false),
Description: "This will disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.",
},
"features": schemaFeatures(supportLegacyTestSuite),
// Advanced feature flags
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
Description: "Should the AzureRM Provider skip registering all of the Resource Providers that it supports, if they're not already registered?",
},
"storage_use_azuread": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_STORAGE_USE_AZUREAD", false),
Description: "Should the AzureRM Provider use AzureAD to access the Storage Data Plane API's?",
},
},
DataSourcesMap: dataSources,
ResourcesMap: resources,
}
p.ConfigureContextFunc = providerConfigure(p)
return p
}
func providerConfigure(p *schema.Provider) schema.ConfigureContextFunc {
return func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) {
var auxTenants []string
if v, ok := d.Get("auxiliary_tenant_ids").([]interface{}); ok && len(v) > 0 {
auxTenants = *utils.ExpandStringSlice(v)
} else if v := os.Getenv("ARM_AUXILIARY_TENANT_IDS"); v != "" {
auxTenants = strings.Split(v, ";")
}
if len(auxTenants) > 3 {
return nil, diag.Errorf("The provider only supports 3 auxiliary tenant IDs")
}
metadataHost := d.Get("metadata_host").(string)
builder := &authentication.Builder{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
AuxiliaryTenantIDs: auxTenants,
Environment: d.Get("environment").(string),
MetadataHost: metadataHost,
MsiEndpoint: d.Get("msi_endpoint").(string),
ClientCertPassword: d.Get("client_certificate_password").(string),
ClientCertPath: d.Get("client_certificate_path").(string),
IDTokenRequestToken: d.Get("oidc_request_token").(string),
IDTokenRequestURL: d.Get("oidc_request_url").(string),
IDToken: d.Get("oidc_token").(string),
IDTokenFilePath: d.Get("oidc_token_file_path").(string),
// Feature Toggles
SupportsClientCertAuth: true,
SupportsClientSecretAuth: true,
SupportsOIDCAuth: d.Get("use_oidc").(bool),
SupportsManagedServiceIdentity: d.Get("use_msi").(bool),
SupportsAzureCliToken: true,
SupportsAuxiliaryTenants: len(auxTenants) > 0,
// Doc Links
ClientSecretDocsLink: "https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_client_secret",
// Use MSAL
UseMicrosoftGraph: true,
}
config, err := builder.Build()
if err != nil {
return nil, diag.Errorf("building AzureRM Client: %s", err)
}
terraformVersion := p.TerraformVersion
if terraformVersion == "" {
// Terraform 0.12 introduced this field to the protocol
// We can therefore assume that if it's missing it's 0.10 or 0.11
terraformVersion = "0.11+compatible"
}
skipProviderRegistration := d.Get("skip_provider_registration").(bool)
clientBuilder := clients.ClientBuilder{
AuthConfig: config,
SkipProviderRegistration: skipProviderRegistration,
TerraformVersion: terraformVersion,
PartnerId: d.Get("partner_id").(string),
DisableCorrelationRequestID: d.Get("disable_correlation_request_id").(bool),
DisableTerraformPartnerID: d.Get("disable_terraform_partner_id").(bool),
Features: expandFeatures(d.Get("features").([]interface{})),
StorageUseAzureAD: d.Get("storage_use_azuread").(bool),
// this field is intentionally not exposed in the provider block, since it's only used for
// platform level tracing
CustomCorrelationRequestID: os.Getenv("ARM_CORRELATION_REQUEST_ID"),
}
//lint:ignore SA1019 SDKv2 migration - staticcheck's own linter directives are currently being ignored under golanci-lint
stopCtx, ok := schema.StopContext(ctx) //nolint:staticcheck
if !ok {
stopCtx = ctx
}
client, err := clients.Build(stopCtx, clientBuilder)
if err != nil {
return nil, diag.FromErr(err)
}
client.StopContext = stopCtx
if !skipProviderRegistration {
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.Resource.ProvidersClient.List(ctx, nil, "")
if err != nil {
return nil, diag.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
availableResourceProviders := providerList.Values()
requiredResourceProviders := resourceproviders.Required()
if err := resourceproviders.EnsureRegistered(ctx, *client.Resource.ProvidersClient, availableResourceProviders, requiredResourceProviders); err != nil {
return nil, diag.Errorf(resourceProviderRegistrationErrorFmt, err)
}
}
return client, nil
}
}
const resourceProviderRegistrationErrorFmt = `Error ensuring Resource Providers are registered.
Terraform automatically attempts to register the Resource Providers it supports to
ensure it's able to provision resources.
If you don't have permission to register Resource Providers you may wish to use the
"skip_provider_registration" flag in the Provider block to disable this functionality.
Please note that if you opt out of Resource Provider Registration and Terraform tries
to provision a resource from a Resource Provider which is unregistered, then the errors
may appear misleading - for example:
> API version 2019-XX-XX was not found for Microsoft.Foo
Could indicate either that the Resource Provider "Microsoft.Foo" requires registration,
but this could also indicate that this Azure Region doesn't support this API version.
More information on the "skip_provider_registration" flag can be found here:
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#skip_provider_registration
Original Error: %s`
| {
panic(fmt.Errorf("creating Wrapper for Resource %q: %+v", key, err))
} | conditional_block |
provider.go | package provider
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/hashicorp/go-azure-helpers/authentication"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/internal/resourceproviders"
"github.com/hashicorp/terraform-provider-azurerm/internal/sdk"
"github.com/hashicorp/terraform-provider-azurerm/utils"
)
func AzureProvider() *schema.Provider {
return azureProvider(false)
}
func TestAzureProvider() *schema.Provider {
return azureProvider(true)
}
func ValidatePartnerID(i interface{}, k string) ([]string, []error) {
// ValidatePartnerID checks if partner_id is any of the following:
// * a valid UUID - will add "pid-" prefix to the ID if it is not already present
// * a valid UUID prefixed with "pid-"
// * a valid UUID prefixed with "pid-" and suffixed with "-partnercenter"
debugLog := func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
v, ok := i.(string)
if !ok {
return nil, []error{fmt.Errorf("expected type of %q to be string", k)}
}
if v == "" {
return nil, nil
}
// Check for pid=<guid>-partnercenter format
if strings.HasPrefix(v, "pid-") && strings.HasSuffix(v, "-partnercenter") {
g := strings.TrimPrefix(v, "pid-")
g = strings.TrimSuffix(g, "-partnercenter")
if _, err := validation.IsUUID(g, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to contain a valid UUID", v)}
}
debugLog("[DEBUG] %q partner_id matches pid-<GUID>-partnercenter...", v)
return nil, nil
}
// Check for pid=<guid> (without the -partnercenter suffix)
if strings.HasPrefix(v, "pid-") && !strings.HasSuffix(v, "-partnercenter") {
g := strings.TrimPrefix(v, "pid-")
if _, err := validation.IsUUID(g, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to be a valid UUID", k)}
}
debugLog("[DEBUG] %q partner_id matches pid-<GUID>...", v)
return nil, nil
}
// Check for straight UUID
if _, err := validation.IsUUID(v, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to be a valid UUID", k)}
} else {
debugLog("[DEBUG] %q partner_id is an un-prefixed UUID...", v)
return nil, nil
}
}
func azureProvider(supportLegacyTestSuite bool) *schema.Provider {
// avoids this showing up in test output
debugLog := func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
dataSources := make(map[string]*schema.Resource)
resources := make(map[string]*schema.Resource)
// first handle the typed services
for _, service := range SupportedTypedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for _, ds := range service.DataSources() {
key := ds.ResourceType()
if existing := dataSources[key]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", key))
}
wrapper := sdk.NewDataSourceWrapper(ds)
dataSource, err := wrapper.DataSource()
if err != nil {
panic(fmt.Errorf("creating Wrapper for Data Source %q: %+v", key, err))
}
dataSources[key] = dataSource
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for _, r := range service.Resources() {
key := r.ResourceType()
if existing := resources[key]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", key))
}
wrapper := sdk.NewResourceWrapper(r)
resource, err := wrapper.Resource()
if err != nil {
panic(fmt.Errorf("creating Wrapper for Resource %q: %+v", key, err))
}
resources[key] = resource
}
}
// then handle the untyped services
for _, service := range SupportedUntypedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for k, v := range service.SupportedDataSources() {
if existing := dataSources[k]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", k))
}
dataSources[k] = v
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for k, v := range service.SupportedResources() {
if existing := resources[k]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", k))
}
resources[k] = v
}
}
p := &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
Description: "The Subscription ID which should be used.",
},
"client_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
Description: "The Client ID which should be used.",
},
"tenant_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
Description: "The Tenant ID which should be used.",
},
"auxiliary_tenant_ids": {
Type: schema.TypeList,
Optional: true,
MaxItems: 3,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
Description: "The Cloud Environment which should be used. Possible values are public, usgovernment, and china. Defaults to public.",
},
"metadata_host": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_METADATA_HOSTNAME", ""),
Description: "The Hostname which should be used for the Azure Metadata Service.",
},
// Client Certificate specific fields
"client_certificate_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""),
Description: "The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service Principal using a Client Certificate.",
},
"client_certificate_password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""),
Description: "The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client Certificate",
},
// Client Secret specific fields
"client_secret": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
Description: "The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.",
},
// OIDC specifc fields
"oidc_request_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_TOKEN", "ACTIONS_ID_TOKEN_REQUEST_TOKEN"}, ""),
Description: "The bearer token for the request to the OIDC provider. For use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_request_url": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_URL", "ACTIONS_ID_TOKEN_REQUEST_URL"}, ""),
Description: "The URL for the OIDC provider from which to request an ID token. For use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN", ""),
Description: "The OIDC ID token for use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_token_file_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN_FILE_PATH", ""),
Description: "The path to a file containing an OIDC ID token for use when authenticating as a Service Principal using OpenID Connect.",
},
"use_oidc": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_OIDC", false),
Description: "Allow OpenID Connect to be used for authentication",
},
// Managed Service Identity specific fields
"use_msi": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false),
Description: "Allowed Managed Service Identity be used for Authentication.",
},
"msi_endpoint": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""),
Description: "The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected automatically. ",
},
// Managed Tracking GUID for User-agent
"partner_id": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.Any(ValidatePartnerID, validation.StringIsEmpty),
DefaultFunc: schema.EnvDefaultFunc("ARM_PARTNER_ID", ""),
Description: "A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.",
},
"disable_correlation_request_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_CORRELATION_REQUEST_ID", false),
Description: "This will disable the x-ms-correlation-request-id header.",
},
"disable_terraform_partner_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_TERRAFORM_PARTNER_ID", false),
Description: "This will disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.",
},
"features": schemaFeatures(supportLegacyTestSuite),
// Advanced feature flags
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
Description: "Should the AzureRM Provider skip registering all of the Resource Providers that it supports, if they're not already registered?",
},
"storage_use_azuread": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_STORAGE_USE_AZUREAD", false),
Description: "Should the AzureRM Provider use AzureAD to access the Storage Data Plane API's?",
},
},
DataSourcesMap: dataSources,
ResourcesMap: resources, | p.ConfigureContextFunc = providerConfigure(p)
return p
}
func providerConfigure(p *schema.Provider) schema.ConfigureContextFunc {
return func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) {
var auxTenants []string
if v, ok := d.Get("auxiliary_tenant_ids").([]interface{}); ok && len(v) > 0 {
auxTenants = *utils.ExpandStringSlice(v)
} else if v := os.Getenv("ARM_AUXILIARY_TENANT_IDS"); v != "" {
auxTenants = strings.Split(v, ";")
}
if len(auxTenants) > 3 {
return nil, diag.Errorf("The provider only supports 3 auxiliary tenant IDs")
}
metadataHost := d.Get("metadata_host").(string)
builder := &authentication.Builder{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
AuxiliaryTenantIDs: auxTenants,
Environment: d.Get("environment").(string),
MetadataHost: metadataHost,
MsiEndpoint: d.Get("msi_endpoint").(string),
ClientCertPassword: d.Get("client_certificate_password").(string),
ClientCertPath: d.Get("client_certificate_path").(string),
IDTokenRequestToken: d.Get("oidc_request_token").(string),
IDTokenRequestURL: d.Get("oidc_request_url").(string),
IDToken: d.Get("oidc_token").(string),
IDTokenFilePath: d.Get("oidc_token_file_path").(string),
// Feature Toggles
SupportsClientCertAuth: true,
SupportsClientSecretAuth: true,
SupportsOIDCAuth: d.Get("use_oidc").(bool),
SupportsManagedServiceIdentity: d.Get("use_msi").(bool),
SupportsAzureCliToken: true,
SupportsAuxiliaryTenants: len(auxTenants) > 0,
// Doc Links
ClientSecretDocsLink: "https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_client_secret",
// Use MSAL
UseMicrosoftGraph: true,
}
config, err := builder.Build()
if err != nil {
return nil, diag.Errorf("building AzureRM Client: %s", err)
}
terraformVersion := p.TerraformVersion
if terraformVersion == "" {
// Terraform 0.12 introduced this field to the protocol
// We can therefore assume that if it's missing it's 0.10 or 0.11
terraformVersion = "0.11+compatible"
}
skipProviderRegistration := d.Get("skip_provider_registration").(bool)
clientBuilder := clients.ClientBuilder{
AuthConfig: config,
SkipProviderRegistration: skipProviderRegistration,
TerraformVersion: terraformVersion,
PartnerId: d.Get("partner_id").(string),
DisableCorrelationRequestID: d.Get("disable_correlation_request_id").(bool),
DisableTerraformPartnerID: d.Get("disable_terraform_partner_id").(bool),
Features: expandFeatures(d.Get("features").([]interface{})),
StorageUseAzureAD: d.Get("storage_use_azuread").(bool),
// this field is intentionally not exposed in the provider block, since it's only used for
// platform level tracing
CustomCorrelationRequestID: os.Getenv("ARM_CORRELATION_REQUEST_ID"),
}
//lint:ignore SA1019 SDKv2 migration - staticcheck's own linter directives are currently being ignored under golanci-lint
stopCtx, ok := schema.StopContext(ctx) //nolint:staticcheck
if !ok {
stopCtx = ctx
}
client, err := clients.Build(stopCtx, clientBuilder)
if err != nil {
return nil, diag.FromErr(err)
}
client.StopContext = stopCtx
if !skipProviderRegistration {
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.Resource.ProvidersClient.List(ctx, nil, "")
if err != nil {
return nil, diag.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
availableResourceProviders := providerList.Values()
requiredResourceProviders := resourceproviders.Required()
if err := resourceproviders.EnsureRegistered(ctx, *client.Resource.ProvidersClient, availableResourceProviders, requiredResourceProviders); err != nil {
return nil, diag.Errorf(resourceProviderRegistrationErrorFmt, err)
}
}
return client, nil
}
}
const resourceProviderRegistrationErrorFmt = `Error ensuring Resource Providers are registered.
Terraform automatically attempts to register the Resource Providers it supports to
ensure it's able to provision resources.
If you don't have permission to register Resource Providers you may wish to use the
"skip_provider_registration" flag in the Provider block to disable this functionality.
Please note that if you opt out of Resource Provider Registration and Terraform tries
to provision a resource from a Resource Provider which is unregistered, then the errors
may appear misleading - for example:
> API version 2019-XX-XX was not found for Microsoft.Foo
Could indicate either that the Resource Provider "Microsoft.Foo" requires registration,
but this could also indicate that this Azure Region doesn't support this API version.
More information on the "skip_provider_registration" flag can be found here:
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#skip_provider_registration
Original Error: %s` | }
| random_line_split |
provider.go | package provider
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/hashicorp/go-azure-helpers/authentication"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/internal/resourceproviders"
"github.com/hashicorp/terraform-provider-azurerm/internal/sdk"
"github.com/hashicorp/terraform-provider-azurerm/utils"
)
func AzureProvider() *schema.Provider {
return azureProvider(false)
}
func | () *schema.Provider {
return azureProvider(true)
}
func ValidatePartnerID(i interface{}, k string) ([]string, []error) {
// ValidatePartnerID checks if partner_id is any of the following:
// * a valid UUID - will add "pid-" prefix to the ID if it is not already present
// * a valid UUID prefixed with "pid-"
// * a valid UUID prefixed with "pid-" and suffixed with "-partnercenter"
debugLog := func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
v, ok := i.(string)
if !ok {
return nil, []error{fmt.Errorf("expected type of %q to be string", k)}
}
if v == "" {
return nil, nil
}
// Check for pid=<guid>-partnercenter format
if strings.HasPrefix(v, "pid-") && strings.HasSuffix(v, "-partnercenter") {
g := strings.TrimPrefix(v, "pid-")
g = strings.TrimSuffix(g, "-partnercenter")
if _, err := validation.IsUUID(g, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to contain a valid UUID", v)}
}
debugLog("[DEBUG] %q partner_id matches pid-<GUID>-partnercenter...", v)
return nil, nil
}
// Check for pid=<guid> (without the -partnercenter suffix)
if strings.HasPrefix(v, "pid-") && !strings.HasSuffix(v, "-partnercenter") {
g := strings.TrimPrefix(v, "pid-")
if _, err := validation.IsUUID(g, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to be a valid UUID", k)}
}
debugLog("[DEBUG] %q partner_id matches pid-<GUID>...", v)
return nil, nil
}
// Check for straight UUID
if _, err := validation.IsUUID(v, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to be a valid UUID", k)}
} else {
debugLog("[DEBUG] %q partner_id is an un-prefixed UUID...", v)
return nil, nil
}
}
func azureProvider(supportLegacyTestSuite bool) *schema.Provider {
// avoids this showing up in test output
debugLog := func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
dataSources := make(map[string]*schema.Resource)
resources := make(map[string]*schema.Resource)
// first handle the typed services
for _, service := range SupportedTypedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for _, ds := range service.DataSources() {
key := ds.ResourceType()
if existing := dataSources[key]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", key))
}
wrapper := sdk.NewDataSourceWrapper(ds)
dataSource, err := wrapper.DataSource()
if err != nil {
panic(fmt.Errorf("creating Wrapper for Data Source %q: %+v", key, err))
}
dataSources[key] = dataSource
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for _, r := range service.Resources() {
key := r.ResourceType()
if existing := resources[key]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", key))
}
wrapper := sdk.NewResourceWrapper(r)
resource, err := wrapper.Resource()
if err != nil {
panic(fmt.Errorf("creating Wrapper for Resource %q: %+v", key, err))
}
resources[key] = resource
}
}
// then handle the untyped services
for _, service := range SupportedUntypedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for k, v := range service.SupportedDataSources() {
if existing := dataSources[k]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", k))
}
dataSources[k] = v
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for k, v := range service.SupportedResources() {
if existing := resources[k]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", k))
}
resources[k] = v
}
}
p := &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
Description: "The Subscription ID which should be used.",
},
"client_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
Description: "The Client ID which should be used.",
},
"tenant_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
Description: "The Tenant ID which should be used.",
},
"auxiliary_tenant_ids": {
Type: schema.TypeList,
Optional: true,
MaxItems: 3,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
Description: "The Cloud Environment which should be used. Possible values are public, usgovernment, and china. Defaults to public.",
},
"metadata_host": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_METADATA_HOSTNAME", ""),
Description: "The Hostname which should be used for the Azure Metadata Service.",
},
// Client Certificate specific fields
"client_certificate_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""),
Description: "The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service Principal using a Client Certificate.",
},
"client_certificate_password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""),
Description: "The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client Certificate",
},
// Client Secret specific fields
"client_secret": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
Description: "The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.",
},
// OIDC specifc fields
"oidc_request_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_TOKEN", "ACTIONS_ID_TOKEN_REQUEST_TOKEN"}, ""),
Description: "The bearer token for the request to the OIDC provider. For use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_request_url": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_URL", "ACTIONS_ID_TOKEN_REQUEST_URL"}, ""),
Description: "The URL for the OIDC provider from which to request an ID token. For use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN", ""),
Description: "The OIDC ID token for use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_token_file_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN_FILE_PATH", ""),
Description: "The path to a file containing an OIDC ID token for use when authenticating as a Service Principal using OpenID Connect.",
},
"use_oidc": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_OIDC", false),
Description: "Allow OpenID Connect to be used for authentication",
},
// Managed Service Identity specific fields
"use_msi": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false),
Description: "Allowed Managed Service Identity be used for Authentication.",
},
"msi_endpoint": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""),
Description: "The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected automatically. ",
},
// Managed Tracking GUID for User-agent
"partner_id": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.Any(ValidatePartnerID, validation.StringIsEmpty),
DefaultFunc: schema.EnvDefaultFunc("ARM_PARTNER_ID", ""),
Description: "A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.",
},
"disable_correlation_request_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_CORRELATION_REQUEST_ID", false),
Description: "This will disable the x-ms-correlation-request-id header.",
},
"disable_terraform_partner_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_TERRAFORM_PARTNER_ID", false),
Description: "This will disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.",
},
"features": schemaFeatures(supportLegacyTestSuite),
// Advanced feature flags
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
Description: "Should the AzureRM Provider skip registering all of the Resource Providers that it supports, if they're not already registered?",
},
"storage_use_azuread": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_STORAGE_USE_AZUREAD", false),
Description: "Should the AzureRM Provider use AzureAD to access the Storage Data Plane API's?",
},
},
DataSourcesMap: dataSources,
ResourcesMap: resources,
}
p.ConfigureContextFunc = providerConfigure(p)
return p
}
func providerConfigure(p *schema.Provider) schema.ConfigureContextFunc {
return func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) {
var auxTenants []string
if v, ok := d.Get("auxiliary_tenant_ids").([]interface{}); ok && len(v) > 0 {
auxTenants = *utils.ExpandStringSlice(v)
} else if v := os.Getenv("ARM_AUXILIARY_TENANT_IDS"); v != "" {
auxTenants = strings.Split(v, ";")
}
if len(auxTenants) > 3 {
return nil, diag.Errorf("The provider only supports 3 auxiliary tenant IDs")
}
metadataHost := d.Get("metadata_host").(string)
builder := &authentication.Builder{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
AuxiliaryTenantIDs: auxTenants,
Environment: d.Get("environment").(string),
MetadataHost: metadataHost,
MsiEndpoint: d.Get("msi_endpoint").(string),
ClientCertPassword: d.Get("client_certificate_password").(string),
ClientCertPath: d.Get("client_certificate_path").(string),
IDTokenRequestToken: d.Get("oidc_request_token").(string),
IDTokenRequestURL: d.Get("oidc_request_url").(string),
IDToken: d.Get("oidc_token").(string),
IDTokenFilePath: d.Get("oidc_token_file_path").(string),
// Feature Toggles
SupportsClientCertAuth: true,
SupportsClientSecretAuth: true,
SupportsOIDCAuth: d.Get("use_oidc").(bool),
SupportsManagedServiceIdentity: d.Get("use_msi").(bool),
SupportsAzureCliToken: true,
SupportsAuxiliaryTenants: len(auxTenants) > 0,
// Doc Links
ClientSecretDocsLink: "https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_client_secret",
// Use MSAL
UseMicrosoftGraph: true,
}
config, err := builder.Build()
if err != nil {
return nil, diag.Errorf("building AzureRM Client: %s", err)
}
terraformVersion := p.TerraformVersion
if terraformVersion == "" {
// Terraform 0.12 introduced this field to the protocol
// We can therefore assume that if it's missing it's 0.10 or 0.11
terraformVersion = "0.11+compatible"
}
skipProviderRegistration := d.Get("skip_provider_registration").(bool)
clientBuilder := clients.ClientBuilder{
AuthConfig: config,
SkipProviderRegistration: skipProviderRegistration,
TerraformVersion: terraformVersion,
PartnerId: d.Get("partner_id").(string),
DisableCorrelationRequestID: d.Get("disable_correlation_request_id").(bool),
DisableTerraformPartnerID: d.Get("disable_terraform_partner_id").(bool),
Features: expandFeatures(d.Get("features").([]interface{})),
StorageUseAzureAD: d.Get("storage_use_azuread").(bool),
// this field is intentionally not exposed in the provider block, since it's only used for
// platform level tracing
CustomCorrelationRequestID: os.Getenv("ARM_CORRELATION_REQUEST_ID"),
}
//lint:ignore SA1019 SDKv2 migration - staticcheck's own linter directives are currently being ignored under golanci-lint
stopCtx, ok := schema.StopContext(ctx) //nolint:staticcheck
if !ok {
stopCtx = ctx
}
client, err := clients.Build(stopCtx, clientBuilder)
if err != nil {
return nil, diag.FromErr(err)
}
client.StopContext = stopCtx
if !skipProviderRegistration {
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.Resource.ProvidersClient.List(ctx, nil, "")
if err != nil {
return nil, diag.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
availableResourceProviders := providerList.Values()
requiredResourceProviders := resourceproviders.Required()
if err := resourceproviders.EnsureRegistered(ctx, *client.Resource.ProvidersClient, availableResourceProviders, requiredResourceProviders); err != nil {
return nil, diag.Errorf(resourceProviderRegistrationErrorFmt, err)
}
}
return client, nil
}
}
const resourceProviderRegistrationErrorFmt = `Error ensuring Resource Providers are registered.
Terraform automatically attempts to register the Resource Providers it supports to
ensure it's able to provision resources.
If you don't have permission to register Resource Providers you may wish to use the
"skip_provider_registration" flag in the Provider block to disable this functionality.
Please note that if you opt out of Resource Provider Registration and Terraform tries
to provision a resource from a Resource Provider which is unregistered, then the errors
may appear misleading - for example:
> API version 2019-XX-XX was not found for Microsoft.Foo
Could indicate either that the Resource Provider "Microsoft.Foo" requires registration,
but this could also indicate that this Azure Region doesn't support this API version.
More information on the "skip_provider_registration" flag can be found here:
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#skip_provider_registration
Original Error: %s`
| TestAzureProvider | identifier_name |
provider.go | package provider
import (
"context"
"fmt"
"log"
"os"
"strings"
"github.com/hashicorp/go-azure-helpers/authentication"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/internal/resourceproviders"
"github.com/hashicorp/terraform-provider-azurerm/internal/sdk"
"github.com/hashicorp/terraform-provider-azurerm/utils"
)
func AzureProvider() *schema.Provider {
return azureProvider(false)
}
func TestAzureProvider() *schema.Provider |
func ValidatePartnerID(i interface{}, k string) ([]string, []error) {
// ValidatePartnerID checks if partner_id is any of the following:
// * a valid UUID - will add "pid-" prefix to the ID if it is not already present
// * a valid UUID prefixed with "pid-"
// * a valid UUID prefixed with "pid-" and suffixed with "-partnercenter"
debugLog := func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
v, ok := i.(string)
if !ok {
return nil, []error{fmt.Errorf("expected type of %q to be string", k)}
}
if v == "" {
return nil, nil
}
// Check for pid=<guid>-partnercenter format
if strings.HasPrefix(v, "pid-") && strings.HasSuffix(v, "-partnercenter") {
g := strings.TrimPrefix(v, "pid-")
g = strings.TrimSuffix(g, "-partnercenter")
if _, err := validation.IsUUID(g, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to contain a valid UUID", v)}
}
debugLog("[DEBUG] %q partner_id matches pid-<GUID>-partnercenter...", v)
return nil, nil
}
// Check for pid=<guid> (without the -partnercenter suffix)
if strings.HasPrefix(v, "pid-") && !strings.HasSuffix(v, "-partnercenter") {
g := strings.TrimPrefix(v, "pid-")
if _, err := validation.IsUUID(g, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to be a valid UUID", k)}
}
debugLog("[DEBUG] %q partner_id matches pid-<GUID>...", v)
return nil, nil
}
// Check for straight UUID
if _, err := validation.IsUUID(v, ""); err != nil {
return nil, []error{fmt.Errorf("expected %q to be a valid UUID", k)}
} else {
debugLog("[DEBUG] %q partner_id is an un-prefixed UUID...", v)
return nil, nil
}
}
func azureProvider(supportLegacyTestSuite bool) *schema.Provider {
// avoids this showing up in test output
debugLog := func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
dataSources := make(map[string]*schema.Resource)
resources := make(map[string]*schema.Resource)
// first handle the typed services
for _, service := range SupportedTypedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for _, ds := range service.DataSources() {
key := ds.ResourceType()
if existing := dataSources[key]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", key))
}
wrapper := sdk.NewDataSourceWrapper(ds)
dataSource, err := wrapper.DataSource()
if err != nil {
panic(fmt.Errorf("creating Wrapper for Data Source %q: %+v", key, err))
}
dataSources[key] = dataSource
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for _, r := range service.Resources() {
key := r.ResourceType()
if existing := resources[key]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", key))
}
wrapper := sdk.NewResourceWrapper(r)
resource, err := wrapper.Resource()
if err != nil {
panic(fmt.Errorf("creating Wrapper for Resource %q: %+v", key, err))
}
resources[key] = resource
}
}
// then handle the untyped services
for _, service := range SupportedUntypedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for k, v := range service.SupportedDataSources() {
if existing := dataSources[k]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", k))
}
dataSources[k] = v
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for k, v := range service.SupportedResources() {
if existing := resources[k]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", k))
}
resources[k] = v
}
}
p := &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
Description: "The Subscription ID which should be used.",
},
"client_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
Description: "The Client ID which should be used.",
},
"tenant_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
Description: "The Tenant ID which should be used.",
},
"auxiliary_tenant_ids": {
Type: schema.TypeList,
Optional: true,
MaxItems: 3,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
Description: "The Cloud Environment which should be used. Possible values are public, usgovernment, and china. Defaults to public.",
},
"metadata_host": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_METADATA_HOSTNAME", ""),
Description: "The Hostname which should be used for the Azure Metadata Service.",
},
// Client Certificate specific fields
"client_certificate_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""),
Description: "The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service Principal using a Client Certificate.",
},
"client_certificate_password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""),
Description: "The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client Certificate",
},
// Client Secret specific fields
"client_secret": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
Description: "The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.",
},
// OIDC specifc fields
"oidc_request_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_TOKEN", "ACTIONS_ID_TOKEN_REQUEST_TOKEN"}, ""),
Description: "The bearer token for the request to the OIDC provider. For use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_request_url": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{"ARM_OIDC_REQUEST_URL", "ACTIONS_ID_TOKEN_REQUEST_URL"}, ""),
Description: "The URL for the OIDC provider from which to request an ID token. For use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_token": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN", ""),
Description: "The OIDC ID token for use when authenticating as a Service Principal using OpenID Connect.",
},
"oidc_token_file_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_OIDC_TOKEN_FILE_PATH", ""),
Description: "The path to a file containing an OIDC ID token for use when authenticating as a Service Principal using OpenID Connect.",
},
"use_oidc": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_OIDC", false),
Description: "Allow OpenID Connect to be used for authentication",
},
// Managed Service Identity specific fields
"use_msi": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false),
Description: "Allowed Managed Service Identity be used for Authentication.",
},
"msi_endpoint": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""),
Description: "The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected automatically. ",
},
// Managed Tracking GUID for User-agent
"partner_id": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.Any(ValidatePartnerID, validation.StringIsEmpty),
DefaultFunc: schema.EnvDefaultFunc("ARM_PARTNER_ID", ""),
Description: "A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.",
},
"disable_correlation_request_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_CORRELATION_REQUEST_ID", false),
Description: "This will disable the x-ms-correlation-request-id header.",
},
"disable_terraform_partner_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_TERRAFORM_PARTNER_ID", false),
Description: "This will disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.",
},
"features": schemaFeatures(supportLegacyTestSuite),
// Advanced feature flags
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
Description: "Should the AzureRM Provider skip registering all of the Resource Providers that it supports, if they're not already registered?",
},
"storage_use_azuread": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_STORAGE_USE_AZUREAD", false),
Description: "Should the AzureRM Provider use AzureAD to access the Storage Data Plane API's?",
},
},
DataSourcesMap: dataSources,
ResourcesMap: resources,
}
p.ConfigureContextFunc = providerConfigure(p)
return p
}
func providerConfigure(p *schema.Provider) schema.ConfigureContextFunc {
return func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) {
var auxTenants []string
if v, ok := d.Get("auxiliary_tenant_ids").([]interface{}); ok && len(v) > 0 {
auxTenants = *utils.ExpandStringSlice(v)
} else if v := os.Getenv("ARM_AUXILIARY_TENANT_IDS"); v != "" {
auxTenants = strings.Split(v, ";")
}
if len(auxTenants) > 3 {
return nil, diag.Errorf("The provider only supports 3 auxiliary tenant IDs")
}
metadataHost := d.Get("metadata_host").(string)
builder := &authentication.Builder{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
AuxiliaryTenantIDs: auxTenants,
Environment: d.Get("environment").(string),
MetadataHost: metadataHost,
MsiEndpoint: d.Get("msi_endpoint").(string),
ClientCertPassword: d.Get("client_certificate_password").(string),
ClientCertPath: d.Get("client_certificate_path").(string),
IDTokenRequestToken: d.Get("oidc_request_token").(string),
IDTokenRequestURL: d.Get("oidc_request_url").(string),
IDToken: d.Get("oidc_token").(string),
IDTokenFilePath: d.Get("oidc_token_file_path").(string),
// Feature Toggles
SupportsClientCertAuth: true,
SupportsClientSecretAuth: true,
SupportsOIDCAuth: d.Get("use_oidc").(bool),
SupportsManagedServiceIdentity: d.Get("use_msi").(bool),
SupportsAzureCliToken: true,
SupportsAuxiliaryTenants: len(auxTenants) > 0,
// Doc Links
ClientSecretDocsLink: "https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_client_secret",
// Use MSAL
UseMicrosoftGraph: true,
}
config, err := builder.Build()
if err != nil {
return nil, diag.Errorf("building AzureRM Client: %s", err)
}
terraformVersion := p.TerraformVersion
if terraformVersion == "" {
// Terraform 0.12 introduced this field to the protocol
// We can therefore assume that if it's missing it's 0.10 or 0.11
terraformVersion = "0.11+compatible"
}
skipProviderRegistration := d.Get("skip_provider_registration").(bool)
clientBuilder := clients.ClientBuilder{
AuthConfig: config,
SkipProviderRegistration: skipProviderRegistration,
TerraformVersion: terraformVersion,
PartnerId: d.Get("partner_id").(string),
DisableCorrelationRequestID: d.Get("disable_correlation_request_id").(bool),
DisableTerraformPartnerID: d.Get("disable_terraform_partner_id").(bool),
Features: expandFeatures(d.Get("features").([]interface{})),
StorageUseAzureAD: d.Get("storage_use_azuread").(bool),
// this field is intentionally not exposed in the provider block, since it's only used for
// platform level tracing
CustomCorrelationRequestID: os.Getenv("ARM_CORRELATION_REQUEST_ID"),
}
//lint:ignore SA1019 SDKv2 migration - staticcheck's own linter directives are currently being ignored under golanci-lint
stopCtx, ok := schema.StopContext(ctx) //nolint:staticcheck
if !ok {
stopCtx = ctx
}
client, err := clients.Build(stopCtx, clientBuilder)
if err != nil {
return nil, diag.FromErr(err)
}
client.StopContext = stopCtx
if !skipProviderRegistration {
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
providerList, err := client.Resource.ProvidersClient.List(ctx, nil, "")
if err != nil {
return nil, diag.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
availableResourceProviders := providerList.Values()
requiredResourceProviders := resourceproviders.Required()
if err := resourceproviders.EnsureRegistered(ctx, *client.Resource.ProvidersClient, availableResourceProviders, requiredResourceProviders); err != nil {
return nil, diag.Errorf(resourceProviderRegistrationErrorFmt, err)
}
}
return client, nil
}
}
const resourceProviderRegistrationErrorFmt = `Error ensuring Resource Providers are registered.
Terraform automatically attempts to register the Resource Providers it supports to
ensure it's able to provision resources.
If you don't have permission to register Resource Providers you may wish to use the
"skip_provider_registration" flag in the Provider block to disable this functionality.
Please note that if you opt out of Resource Provider Registration and Terraform tries
to provision a resource from a Resource Provider which is unregistered, then the errors
may appear misleading - for example:
> API version 2019-XX-XX was not found for Microsoft.Foo
Could indicate either that the Resource Provider "Microsoft.Foo" requires registration,
but this could also indicate that this Azure Region doesn't support this API version.
More information on the "skip_provider_registration" flag can be found here:
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#skip_provider_registration
Original Error: %s`
| {
return azureProvider(true)
} | identifier_body |
feature_extraction.py | import numpy as np
import os
import pandas as pd
from Bio.Seq import Seq
from Bio import SeqIO
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import uuid
from joblib import Parallel, delayed
import argparse
import matplotlib
matplotlib.use('agg')
import seaborn as sns
import matplotlib.pyplot as plt
from pkg_resources import resource_filename
from janggu.data import Bioseq
from janggu.data import ReduceDim
import numpy as np
from janggu import inputlayer
from janggu import outputconv
from janggu import DnaConv2D
from janggu.data import ReduceDim
from janggu.data import Cover
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
from sklearn.model_selection import KFold,StratifiedKFold
import warnings
from sklearn.metrics import roc_curve,roc_auc_score,average_precision_score,accuracy_score
import warnings
warnings.filterwarnings('ignore')
# warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge
import scipy
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge,Lars,BayesianRidge
from copy import deepcopy as dp
"""
Feature extraction (Top motif scores)
1. using janggu get DNA one-hot
3. read meme get motif PWMs in both strands
4. scan motifs get score_list, max(pos_strand,neg_strand)
with tree-based methods, we don't need to do normalization here
5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
Dependency
----------
meme (to get motif revcomp)
bedtools (to get fasta sequences for gkm_svm)
python library
--------------
janggu (tensorflow + keras)
biopython
sklearn
joblib
"""
def read_fasta(f):
my_dict = {}
for r in SeqIO.parse(f, "fasta"):
my_dict[r.id] = str(r.seq).upper()
return my_dict
def read_motif(meme_file):
revcomp_file = "/tmp/"+str(uuid.uuid4())
os.system("meme-get-motif -rc -all %s > %s"%(meme_file,revcomp_file))
original_motif_label = "++original++"
revcomp_motif_label = "--revcomp--"
dict1 = parse_meme(meme_file,label=original_motif_label)
dict2 = parse_meme(revcomp_file,label=revcomp_motif_label)
myDict = {}
for k in dict1:
motif_name = k.replace(original_motif_label,"")
myDict[motif_name]=[dict1[k].T.values,dict2[k.replace(original_motif_label,revcomp_motif_label)].T.values]
return myDict
def parse_meme(file,label=""):
"""function to read meme file to pd.DataFrame"""
lines = open(file).readlines()
i = 0
myDict = {}
while i < len(lines):
myList = lines[i].strip().split()
if len(myList) < 1:
i = i + 1
continue
if myList[0] == "MOTIF":
if lines[i+1].strip() == "":
desc = lines[i+2].strip().split()
flag = True
else:
desc = lines[i+1].strip().split()
flag = False
try:
motifLength = int(desc[5])
except:
print (desc)
i = i+1
continue
if flag:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+3:i+3+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
i = i+3+motifLength
continue
else:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+2:i+2+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
i = i+2+motifLength
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
continue
i = i+1
return myDict
def motif_scan(s,m):
## s, m are numpy array
## s.shape = L*4
## m.shape = 4*W
L = s.shape[0]
W = m.shape[1]
score_list = []
for i in range(L-W):
sub = np.matmul(s[i:i+W,:],m)
# if i < 3:
# print ("DNA seq",s[i:i+W,:])
# print ("motif",m)
# print ("mapping score: ",np.trace(sub))
score_list.append(np.trace(sub))
return score_list
def DNA_motif_scan(DNA_array,m1,m2):
score_list = []
# print (m1)
# print (m2)
for i in range(DNA_array.shape[0]):
score_list_1 = motif_scan(DNA_array[i,:,:],m1)
# print ("score_list_1",score_list_1)
score_list_2 = motif_scan(DNA_array[i,:,:],m2)
# print ("score_list_2",score_list_2)
for j in range(len(score_list_1)):
if score_list_2[j] > score_list_1[j]:
score_list_1[j] = score_list_2[j]
score_list.append(score_list_1)
# print (score_list)
out = np.array(score_list)
print ("DNA scanning out shape",out.shape)
return out
def get_roi(myList):
## roi is region of interest, term used by janggu
# chr19:13180899-13180900+
# strand = [list(x)[-1] for x in myList]
strand = [x[-1] for x in myList]
# print (strand)
chr = [x[:-1].split(":")[0] for x in myList]
start = [int(x[:-1].split(":")[-1].split("-")[0]) for x in myList]
end = [int(x[:-1].split(":")[-1].split("-")[1]) for x in myList]
roi_A = []
roi = []
for i in range(len(chr)):
roi_A.append([chr[i],start[i],end[i],myList[i],".",strand[i]])
roi.append([chr[i],start[i],end[i]])
return roi_A,roi
def get_high_low_data(input,pos_cutoff,neg_cutoff):
df = pd.read_csv(input,index_col=0)
# pos = df[df['HbFBase']>=pos_cutoff].index.tolist()
pos = df[df['HbFBase']>pos_cutoff].index.tolist()
neg = df[df['HbFBase']<=neg_cutoff].index.tolist()
print ("Pos size %s. Neg size %s"%(len(pos),len(neg)))
return df.loc[pos+neg],pos,neg
def roi2fasta(roi,genome_fa,flank):
df = pd.DataFrame(roi)
df[1] = df[1]-flank
df[2] = df[2]+flank
df.to_csv("tmp.bed",sep="\t",header=False,index=False)
os.system("bedtools getfasta -fi %s -fo tmp.fa -bed tmp.bed -s -name"%(genome_fa))
seq = read_fasta("tmp.fa")
os.system("rm tmp.fa tmp.bed")
return seq
## Define parameters
# high_hbf = 50
high_hbf = 0
low_hbf = 0
input = "Editable_A_scores.combined.scores.csv"
flank = 100
refgenome="/home/yli11/Data/Human/hg19/fasta/hg19.fa"
bw_file="/home/yli11/Projects/Li_gRNA/footprint/H1_H2_GM12878_Tn5_bw/Hudep2.bw"
meme_file = "selected_motifs.meme"
top_n=5 # number of features for each motif
## read data
data,high,low = get_high_low_data(input,high_hbf,low_hbf)
roi_A,roi = get_roi(high+low)
seq = roi2fasta(roi_A,refgenome,flank)
test = pd.DataFrame.from_dict(seq,orient='index')
data['seq'] = test[0]
# 1. using janggu get DNA one-hot
## get one-hot data and ATAC feature matrix
dna_A = Bioseq.create_from_refgenome(name='dna',refgenome=refgenome,roi=roi_A,flank=flank)
Tn5 = Cover.create_from_bigwig('bigwig_coverage',bigwigfiles=bw_file,roi=roi,binsize=1,stepsize=1,flank=flank)
## ReShape
dna_A=np.reshape(dna_A,(len(high+low),flank*2+1,4))
bw_values=np.reshape(Tn5,(len(high+low),flank*2+1))
## get motif PWM, 3. read meme get motif PWMs in both strands
motifs = read_motif(meme_file)
# 4. scan motifs get score_list, max(pos_strand,neg_strand)
score_list_A = Parallel(n_jobs=-1)(delayed(DNA_motif_scan)(dna_A,motifs[m][0],motifs[m][1]) for m in motifs)
def get_footprint_score(s,l,footprint_score):
flanking=2
# print (s,l)
left_start = s-flanking
# print ("left_start:",left_start)
if left_start >= 0:
left = list(footprint_score[left_start:s])
else:
left = [np.nan]
right_end = s+l+flanking
# print ("right_end:",right_end)
# print ("len(footprint_score):",len(footprint_score))
if right_end <= len(footprint_score):
right = list(footprint_score[s+l:right_end])
else:
right = [np.nan]
flanking = np.nanmean(left+right)
# print ("left",left,"right",right)
# print ("flanking",flanking,"left+right",left+right)
occ = np.nanmean(footprint_score[s:s+l])
# print ("all:",footprint_score[s:s+l],"occ:",occ)
return flanking - occ
def get_top_n_motif_scores(score_list,top_n):
"""score_list.shape = L * 1
return
------
pos, value list
"""
return score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]
# 5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
def get_adjusted_motif_score(motif_score,footprint_score,n):
"""motif_score and footprint_score are same shape, N * L"""
out = []
# print ("motif_score",motif_score)
motif_length = footprint_score.shape[1] - motif_score.shape[1]
for i in range(motif_score.shape[0]):
pos,value = get_top_n_motif_scores(motif_score[i],n)
# print ("pos,:",pos)
# print ("value,:",value)
FOS_list = [get_footprint_score(s,motif_length,footprint_score[i]) for s in pos]
# print ("FOS_list:",FOS_list)
value = [value[i]*FOS_list[i] for i in range(len(value))]
out.append(value)
return out
adjusted_scores = Parallel(n_jobs=-1)(delayed(get_adjusted_motif_score)(motif_score,bw_values,top_n) for motif_score in score_list_A)
def | (motifs,top_n,label):
out = []
for i in motifs:
for j in range(top_n):
out.append("%s_%s_%s"%(label,i,j))
return out
## get feature table
adjusted_scores = np.array(adjusted_scores)
adjusted_scores = np.swapaxes(adjusted_scores,0,1)
adjusted_scores = adjusted_scores.reshape((len(high+low),top_n*len(motifs)))
adjusted_scores = pd.DataFrame(adjusted_scores)
adjusted_scores.columns = set_col_names(motifs,top_n,"motif_footprint_score")
adjusted_scores.index = high+low
df = pd.concat([adjusted_scores,data],axis=1)
# df.to_csv("ML_data.csv")
df.to_csv("all_A_features.csv")
| set_col_names | identifier_name |
feature_extraction.py | import numpy as np
import os
import pandas as pd
from Bio.Seq import Seq
from Bio import SeqIO
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import uuid
from joblib import Parallel, delayed
import argparse
import matplotlib
matplotlib.use('agg')
import seaborn as sns
import matplotlib.pyplot as plt
from pkg_resources import resource_filename
from janggu.data import Bioseq
from janggu.data import ReduceDim
import numpy as np
from janggu import inputlayer
from janggu import outputconv
from janggu import DnaConv2D
from janggu.data import ReduceDim
from janggu.data import Cover
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
from sklearn.model_selection import KFold,StratifiedKFold
import warnings
from sklearn.metrics import roc_curve,roc_auc_score,average_precision_score,accuracy_score
import warnings
warnings.filterwarnings('ignore')
# warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge
import scipy
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge,Lars,BayesianRidge
from copy import deepcopy as dp
"""
Feature extraction (Top motif scores)
1. using janggu get DNA one-hot
3. read meme get motif PWMs in both strands
4. scan motifs get score_list, max(pos_strand,neg_strand)
with tree-based methods, we don't need to do normalization here
5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
Dependency
----------
meme (to get motif revcomp)
bedtools (to get fasta sequences for gkm_svm)
python library
--------------
janggu (tensorflow + keras)
biopython
sklearn
joblib
"""
def read_fasta(f):
my_dict = {}
for r in SeqIO.parse(f, "fasta"):
my_dict[r.id] = str(r.seq).upper()
return my_dict
def read_motif(meme_file):
revcomp_file = "/tmp/"+str(uuid.uuid4())
os.system("meme-get-motif -rc -all %s > %s"%(meme_file,revcomp_file))
original_motif_label = "++original++"
revcomp_motif_label = "--revcomp--"
dict1 = parse_meme(meme_file,label=original_motif_label)
dict2 = parse_meme(revcomp_file,label=revcomp_motif_label)
myDict = {}
for k in dict1:
motif_name = k.replace(original_motif_label,"")
myDict[motif_name]=[dict1[k].T.values,dict2[k.replace(original_motif_label,revcomp_motif_label)].T.values]
return myDict
def parse_meme(file,label=""):
"""function to read meme file to pd.DataFrame"""
lines = open(file).readlines()
i = 0
myDict = {}
while i < len(lines):
myList = lines[i].strip().split()
if len(myList) < 1:
i = i + 1
continue
if myList[0] == "MOTIF":
if lines[i+1].strip() == "":
desc = lines[i+2].strip().split()
flag = True
else:
desc = lines[i+1].strip().split()
flag = False
try:
motifLength = int(desc[5])
except:
print (desc)
i = i+1
continue
if flag:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+3:i+3+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
i = i+3+motifLength
continue
else:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+2:i+2+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
i = i+2+motifLength
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
continue
i = i+1
return myDict
def motif_scan(s,m):
## s, m are numpy array
## s.shape = L*4
## m.shape = 4*W
L = s.shape[0]
W = m.shape[1]
score_list = []
for i in range(L-W):
sub = np.matmul(s[i:i+W,:],m)
# if i < 3:
# print ("DNA seq",s[i:i+W,:])
# print ("motif",m)
# print ("mapping score: ",np.trace(sub))
score_list.append(np.trace(sub))
return score_list
def DNA_motif_scan(DNA_array,m1,m2):
score_list = []
# print (m1)
# print (m2)
for i in range(DNA_array.shape[0]):
score_list_1 = motif_scan(DNA_array[i,:,:],m1)
# print ("score_list_1",score_list_1)
score_list_2 = motif_scan(DNA_array[i,:,:],m2)
# print ("score_list_2",score_list_2)
for j in range(len(score_list_1)):
if score_list_2[j] > score_list_1[j]:
score_list_1[j] = score_list_2[j]
score_list.append(score_list_1)
# print (score_list)
out = np.array(score_list)
print ("DNA scanning out shape",out.shape)
return out
def get_roi(myList):
## roi is region of interest, term used by janggu
# chr19:13180899-13180900+
# strand = [list(x)[-1] for x in myList]
strand = [x[-1] for x in myList]
# print (strand)
chr = [x[:-1].split(":")[0] for x in myList]
start = [int(x[:-1].split(":")[-1].split("-")[0]) for x in myList]
end = [int(x[:-1].split(":")[-1].split("-")[1]) for x in myList]
roi_A = []
roi = []
for i in range(len(chr)):
roi_A.append([chr[i],start[i],end[i],myList[i],".",strand[i]])
roi.append([chr[i],start[i],end[i]])
return roi_A,roi
def get_high_low_data(input,pos_cutoff,neg_cutoff):
df = pd.read_csv(input,index_col=0)
# pos = df[df['HbFBase']>=pos_cutoff].index.tolist()
pos = df[df['HbFBase']>pos_cutoff].index.tolist()
neg = df[df['HbFBase']<=neg_cutoff].index.tolist()
print ("Pos size %s. Neg size %s"%(len(pos),len(neg)))
return df.loc[pos+neg],pos,neg
def roi2fasta(roi,genome_fa,flank):
df = pd.DataFrame(roi)
df[1] = df[1]-flank
df[2] = df[2]+flank
df.to_csv("tmp.bed",sep="\t",header=False,index=False)
os.system("bedtools getfasta -fi %s -fo tmp.fa -bed tmp.bed -s -name"%(genome_fa))
seq = read_fasta("tmp.fa")
os.system("rm tmp.fa tmp.bed")
return seq
## Define parameters
# high_hbf = 50
high_hbf = 0
low_hbf = 0
input = "Editable_A_scores.combined.scores.csv"
flank = 100
refgenome="/home/yli11/Data/Human/hg19/fasta/hg19.fa"
bw_file="/home/yli11/Projects/Li_gRNA/footprint/H1_H2_GM12878_Tn5_bw/Hudep2.bw"
meme_file = "selected_motifs.meme"
top_n=5 # number of features for each motif
## read data
data,high,low = get_high_low_data(input,high_hbf,low_hbf)
roi_A,roi = get_roi(high+low)
seq = roi2fasta(roi_A,refgenome,flank)
test = pd.DataFrame.from_dict(seq,orient='index')
data['seq'] = test[0]
# 1. using janggu get DNA one-hot
## get one-hot data and ATAC feature matrix
dna_A = Bioseq.create_from_refgenome(name='dna',refgenome=refgenome,roi=roi_A,flank=flank)
Tn5 = Cover.create_from_bigwig('bigwig_coverage',bigwigfiles=bw_file,roi=roi,binsize=1,stepsize=1,flank=flank)
## ReShape
dna_A=np.reshape(dna_A,(len(high+low),flank*2+1,4))
bw_values=np.reshape(Tn5,(len(high+low),flank*2+1))
## get motif PWM, 3. read meme get motif PWMs in both strands
motifs = read_motif(meme_file)
# 4. scan motifs get score_list, max(pos_strand,neg_strand)
score_list_A = Parallel(n_jobs=-1)(delayed(DNA_motif_scan)(dna_A,motifs[m][0],motifs[m][1]) for m in motifs)
def get_footprint_score(s,l,footprint_score):
flanking=2
# print (s,l)
left_start = s-flanking
# print ("left_start:",left_start)
if left_start >= 0:
left = list(footprint_score[left_start:s])
else:
left = [np.nan]
right_end = s+l+flanking
# print ("right_end:",right_end)
# print ("len(footprint_score):",len(footprint_score))
if right_end <= len(footprint_score):
right = list(footprint_score[s+l:right_end])
else:
right = [np.nan]
flanking = np.nanmean(left+right)
# print ("left",left,"right",right)
# print ("flanking",flanking,"left+right",left+right)
occ = np.nanmean(footprint_score[s:s+l])
# print ("all:",footprint_score[s:s+l],"occ:",occ)
return flanking - occ
def get_top_n_motif_scores(score_list,top_n):
"""score_list.shape = L * 1
return
| return score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]
# 5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
def get_adjusted_motif_score(motif_score,footprint_score,n):
"""motif_score and footprint_score are same shape, N * L"""
out = []
# print ("motif_score",motif_score)
motif_length = footprint_score.shape[1] - motif_score.shape[1]
for i in range(motif_score.shape[0]):
pos,value = get_top_n_motif_scores(motif_score[i],n)
# print ("pos,:",pos)
# print ("value,:",value)
FOS_list = [get_footprint_score(s,motif_length,footprint_score[i]) for s in pos]
# print ("FOS_list:",FOS_list)
value = [value[i]*FOS_list[i] for i in range(len(value))]
out.append(value)
return out
adjusted_scores = Parallel(n_jobs=-1)(delayed(get_adjusted_motif_score)(motif_score,bw_values,top_n) for motif_score in score_list_A)
def set_col_names(motifs,top_n,label):
out = []
for i in motifs:
for j in range(top_n):
out.append("%s_%s_%s"%(label,i,j))
return out
## get feature table
adjusted_scores = np.array(adjusted_scores)
adjusted_scores = np.swapaxes(adjusted_scores,0,1)
adjusted_scores = adjusted_scores.reshape((len(high+low),top_n*len(motifs)))
adjusted_scores = pd.DataFrame(adjusted_scores)
adjusted_scores.columns = set_col_names(motifs,top_n,"motif_footprint_score")
adjusted_scores.index = high+low
df = pd.concat([adjusted_scores,data],axis=1)
# df.to_csv("ML_data.csv")
df.to_csv("all_A_features.csv") | ------
pos, value list
"""
| random_line_split |
feature_extraction.py | import numpy as np
import os
import pandas as pd
from Bio.Seq import Seq
from Bio import SeqIO
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import uuid
from joblib import Parallel, delayed
import argparse
import matplotlib
matplotlib.use('agg')
import seaborn as sns
import matplotlib.pyplot as plt
from pkg_resources import resource_filename
from janggu.data import Bioseq
from janggu.data import ReduceDim
import numpy as np
from janggu import inputlayer
from janggu import outputconv
from janggu import DnaConv2D
from janggu.data import ReduceDim
from janggu.data import Cover
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
from sklearn.model_selection import KFold,StratifiedKFold
import warnings
from sklearn.metrics import roc_curve,roc_auc_score,average_precision_score,accuracy_score
import warnings
warnings.filterwarnings('ignore')
# warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge
import scipy
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge,Lars,BayesianRidge
from copy import deepcopy as dp
"""
Feature extraction (Top motif scores)
1. using janggu get DNA one-hot
3. read meme get motif PWMs in both strands
4. scan motifs get score_list, max(pos_strand,neg_strand)
with tree-based methods, we don't need to do normalization here
5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
Dependency
----------
meme (to get motif revcomp)
bedtools (to get fasta sequences for gkm_svm)
python library
--------------
janggu (tensorflow + keras)
biopython
sklearn
joblib
"""
def read_fasta(f):
my_dict = {}
for r in SeqIO.parse(f, "fasta"):
my_dict[r.id] = str(r.seq).upper()
return my_dict
def read_motif(meme_file):
revcomp_file = "/tmp/"+str(uuid.uuid4())
os.system("meme-get-motif -rc -all %s > %s"%(meme_file,revcomp_file))
original_motif_label = "++original++"
revcomp_motif_label = "--revcomp--"
dict1 = parse_meme(meme_file,label=original_motif_label)
dict2 = parse_meme(revcomp_file,label=revcomp_motif_label)
myDict = {}
for k in dict1:
motif_name = k.replace(original_motif_label,"")
myDict[motif_name]=[dict1[k].T.values,dict2[k.replace(original_motif_label,revcomp_motif_label)].T.values]
return myDict
def parse_meme(file,label=""):
"""function to read meme file to pd.DataFrame"""
lines = open(file).readlines()
i = 0
myDict = {}
while i < len(lines):
myList = lines[i].strip().split()
if len(myList) < 1:
i = i + 1
continue
if myList[0] == "MOTIF":
if lines[i+1].strip() == "":
desc = lines[i+2].strip().split()
flag = True
else:
desc = lines[i+1].strip().split()
flag = False
try:
motifLength = int(desc[5])
except:
print (desc)
i = i+1
continue
if flag:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+3:i+3+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
i = i+3+motifLength
continue
else:
|
i = i+1
return myDict
def motif_scan(s,m):
## s, m are numpy array
## s.shape = L*4
## m.shape = 4*W
L = s.shape[0]
W = m.shape[1]
score_list = []
for i in range(L-W):
sub = np.matmul(s[i:i+W,:],m)
# if i < 3:
# print ("DNA seq",s[i:i+W,:])
# print ("motif",m)
# print ("mapping score: ",np.trace(sub))
score_list.append(np.trace(sub))
return score_list
def DNA_motif_scan(DNA_array,m1,m2):
score_list = []
# print (m1)
# print (m2)
for i in range(DNA_array.shape[0]):
score_list_1 = motif_scan(DNA_array[i,:,:],m1)
# print ("score_list_1",score_list_1)
score_list_2 = motif_scan(DNA_array[i,:,:],m2)
# print ("score_list_2",score_list_2)
for j in range(len(score_list_1)):
if score_list_2[j] > score_list_1[j]:
score_list_1[j] = score_list_2[j]
score_list.append(score_list_1)
# print (score_list)
out = np.array(score_list)
print ("DNA scanning out shape",out.shape)
return out
def get_roi(myList):
## roi is region of interest, term used by janggu
# chr19:13180899-13180900+
# strand = [list(x)[-1] for x in myList]
strand = [x[-1] for x in myList]
# print (strand)
chr = [x[:-1].split(":")[0] for x in myList]
start = [int(x[:-1].split(":")[-1].split("-")[0]) for x in myList]
end = [int(x[:-1].split(":")[-1].split("-")[1]) for x in myList]
roi_A = []
roi = []
for i in range(len(chr)):
roi_A.append([chr[i],start[i],end[i],myList[i],".",strand[i]])
roi.append([chr[i],start[i],end[i]])
return roi_A,roi
def get_high_low_data(input,pos_cutoff,neg_cutoff):
df = pd.read_csv(input,index_col=0)
# pos = df[df['HbFBase']>=pos_cutoff].index.tolist()
pos = df[df['HbFBase']>pos_cutoff].index.tolist()
neg = df[df['HbFBase']<=neg_cutoff].index.tolist()
print ("Pos size %s. Neg size %s"%(len(pos),len(neg)))
return df.loc[pos+neg],pos,neg
def roi2fasta(roi,genome_fa,flank):
df = pd.DataFrame(roi)
df[1] = df[1]-flank
df[2] = df[2]+flank
df.to_csv("tmp.bed",sep="\t",header=False,index=False)
os.system("bedtools getfasta -fi %s -fo tmp.fa -bed tmp.bed -s -name"%(genome_fa))
seq = read_fasta("tmp.fa")
os.system("rm tmp.fa tmp.bed")
return seq
## Define parameters
# high_hbf = 50
high_hbf = 0
low_hbf = 0
input = "Editable_A_scores.combined.scores.csv"
flank = 100
refgenome="/home/yli11/Data/Human/hg19/fasta/hg19.fa"
bw_file="/home/yli11/Projects/Li_gRNA/footprint/H1_H2_GM12878_Tn5_bw/Hudep2.bw"
meme_file = "selected_motifs.meme"
top_n=5 # number of features for each motif
## read data
data,high,low = get_high_low_data(input,high_hbf,low_hbf)
roi_A,roi = get_roi(high+low)
seq = roi2fasta(roi_A,refgenome,flank)
test = pd.DataFrame.from_dict(seq,orient='index')
data['seq'] = test[0]
# 1. using janggu get DNA one-hot
## get one-hot data and ATAC feature matrix
dna_A = Bioseq.create_from_refgenome(name='dna',refgenome=refgenome,roi=roi_A,flank=flank)
Tn5 = Cover.create_from_bigwig('bigwig_coverage',bigwigfiles=bw_file,roi=roi,binsize=1,stepsize=1,flank=flank)
## ReShape
dna_A=np.reshape(dna_A,(len(high+low),flank*2+1,4))
bw_values=np.reshape(Tn5,(len(high+low),flank*2+1))
## get motif PWM, 3. read meme get motif PWMs in both strands
motifs = read_motif(meme_file)
# 4. scan motifs get score_list, max(pos_strand,neg_strand)
score_list_A = Parallel(n_jobs=-1)(delayed(DNA_motif_scan)(dna_A,motifs[m][0],motifs[m][1]) for m in motifs)
def get_footprint_score(s,l,footprint_score):
flanking=2
# print (s,l)
left_start = s-flanking
# print ("left_start:",left_start)
if left_start >= 0:
left = list(footprint_score[left_start:s])
else:
left = [np.nan]
right_end = s+l+flanking
# print ("right_end:",right_end)
# print ("len(footprint_score):",len(footprint_score))
if right_end <= len(footprint_score):
right = list(footprint_score[s+l:right_end])
else:
right = [np.nan]
flanking = np.nanmean(left+right)
# print ("left",left,"right",right)
# print ("flanking",flanking,"left+right",left+right)
occ = np.nanmean(footprint_score[s:s+l])
# print ("all:",footprint_score[s:s+l],"occ:",occ)
return flanking - occ
def get_top_n_motif_scores(score_list,top_n):
"""score_list.shape = L * 1
return
------
pos, value list
"""
return score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]
# 5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
def get_adjusted_motif_score(motif_score,footprint_score,n):
"""motif_score and footprint_score are same shape, N * L"""
out = []
# print ("motif_score",motif_score)
motif_length = footprint_score.shape[1] - motif_score.shape[1]
for i in range(motif_score.shape[0]):
pos,value = get_top_n_motif_scores(motif_score[i],n)
# print ("pos,:",pos)
# print ("value,:",value)
FOS_list = [get_footprint_score(s,motif_length,footprint_score[i]) for s in pos]
# print ("FOS_list:",FOS_list)
value = [value[i]*FOS_list[i] for i in range(len(value))]
out.append(value)
return out
adjusted_scores = Parallel(n_jobs=-1)(delayed(get_adjusted_motif_score)(motif_score,bw_values,top_n) for motif_score in score_list_A)
def set_col_names(motifs,top_n,label):
out = []
for i in motifs:
for j in range(top_n):
out.append("%s_%s_%s"%(label,i,j))
return out
## get feature table
adjusted_scores = np.array(adjusted_scores)
adjusted_scores = np.swapaxes(adjusted_scores,0,1)
adjusted_scores = adjusted_scores.reshape((len(high+low),top_n*len(motifs)))
adjusted_scores = pd.DataFrame(adjusted_scores)
adjusted_scores.columns = set_col_names(motifs,top_n,"motif_footprint_score")
adjusted_scores.index = high+low
df = pd.concat([adjusted_scores,data],axis=1)
# df.to_csv("ML_data.csv")
df.to_csv("all_A_features.csv")
| myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+2:i+2+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
i = i+2+motifLength
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
continue | conditional_block |
feature_extraction.py | import numpy as np
import os
import pandas as pd
from Bio.Seq import Seq
from Bio import SeqIO
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import uuid
from joblib import Parallel, delayed
import argparse
import matplotlib
matplotlib.use('agg')
import seaborn as sns
import matplotlib.pyplot as plt
from pkg_resources import resource_filename
from janggu.data import Bioseq
from janggu.data import ReduceDim
import numpy as np
from janggu import inputlayer
from janggu import outputconv
from janggu import DnaConv2D
from janggu.data import ReduceDim
from janggu.data import Cover
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
from sklearn.model_selection import KFold,StratifiedKFold
import warnings
from sklearn.metrics import roc_curve,roc_auc_score,average_precision_score,accuracy_score
import warnings
warnings.filterwarnings('ignore')
# warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge
import scipy
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge,Lars,BayesianRidge
from copy import deepcopy as dp
"""
Feature extraction (Top motif scores)
1. using janggu get DNA one-hot
3. read meme get motif PWMs in both strands
4. scan motifs get score_list, max(pos_strand,neg_strand)
with tree-based methods, we don't need to do normalization here
5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
Dependency
----------
meme (to get motif revcomp)
bedtools (to get fasta sequences for gkm_svm)
python library
--------------
janggu (tensorflow + keras)
biopython
sklearn
joblib
"""
def read_fasta(f):
my_dict = {}
for r in SeqIO.parse(f, "fasta"):
my_dict[r.id] = str(r.seq).upper()
return my_dict
def read_motif(meme_file):
revcomp_file = "/tmp/"+str(uuid.uuid4())
os.system("meme-get-motif -rc -all %s > %s"%(meme_file,revcomp_file))
original_motif_label = "++original++"
revcomp_motif_label = "--revcomp--"
dict1 = parse_meme(meme_file,label=original_motif_label)
dict2 = parse_meme(revcomp_file,label=revcomp_motif_label)
myDict = {}
for k in dict1:
motif_name = k.replace(original_motif_label,"")
myDict[motif_name]=[dict1[k].T.values,dict2[k.replace(original_motif_label,revcomp_motif_label)].T.values]
return myDict
def parse_meme(file,label=""):
"""function to read meme file to pd.DataFrame"""
lines = open(file).readlines()
i = 0
myDict = {}
while i < len(lines):
myList = lines[i].strip().split()
if len(myList) < 1:
i = i + 1
continue
if myList[0] == "MOTIF":
if lines[i+1].strip() == "":
desc = lines[i+2].strip().split()
flag = True
else:
desc = lines[i+1].strip().split()
flag = False
try:
motifLength = int(desc[5])
except:
print (desc)
i = i+1
continue
if flag:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+3:i+3+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
i = i+3+motifLength
continue
else:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+2:i+2+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
i = i+2+motifLength
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
continue
i = i+1
return myDict
def motif_scan(s,m):
## s, m are numpy array
## s.shape = L*4
## m.shape = 4*W
L = s.shape[0]
W = m.shape[1]
score_list = []
for i in range(L-W):
sub = np.matmul(s[i:i+W,:],m)
# if i < 3:
# print ("DNA seq",s[i:i+W,:])
# print ("motif",m)
# print ("mapping score: ",np.trace(sub))
score_list.append(np.trace(sub))
return score_list
def DNA_motif_scan(DNA_array,m1,m2):
score_list = []
# print (m1)
# print (m2)
for i in range(DNA_array.shape[0]):
score_list_1 = motif_scan(DNA_array[i,:,:],m1)
# print ("score_list_1",score_list_1)
score_list_2 = motif_scan(DNA_array[i,:,:],m2)
# print ("score_list_2",score_list_2)
for j in range(len(score_list_1)):
if score_list_2[j] > score_list_1[j]:
score_list_1[j] = score_list_2[j]
score_list.append(score_list_1)
# print (score_list)
out = np.array(score_list)
print ("DNA scanning out shape",out.shape)
return out
def get_roi(myList):
## roi is region of interest, term used by janggu
# chr19:13180899-13180900+
# strand = [list(x)[-1] for x in myList]
strand = [x[-1] for x in myList]
# print (strand)
chr = [x[:-1].split(":")[0] for x in myList]
start = [int(x[:-1].split(":")[-1].split("-")[0]) for x in myList]
end = [int(x[:-1].split(":")[-1].split("-")[1]) for x in myList]
roi_A = []
roi = []
for i in range(len(chr)):
roi_A.append([chr[i],start[i],end[i],myList[i],".",strand[i]])
roi.append([chr[i],start[i],end[i]])
return roi_A,roi
def get_high_low_data(input,pos_cutoff,neg_cutoff):
df = pd.read_csv(input,index_col=0)
# pos = df[df['HbFBase']>=pos_cutoff].index.tolist()
pos = df[df['HbFBase']>pos_cutoff].index.tolist()
neg = df[df['HbFBase']<=neg_cutoff].index.tolist()
print ("Pos size %s. Neg size %s"%(len(pos),len(neg)))
return df.loc[pos+neg],pos,neg
def roi2fasta(roi,genome_fa,flank):
|
## Define parameters
# high_hbf = 50
high_hbf = 0
low_hbf = 0
input = "Editable_A_scores.combined.scores.csv"
flank = 100
refgenome="/home/yli11/Data/Human/hg19/fasta/hg19.fa"
bw_file="/home/yli11/Projects/Li_gRNA/footprint/H1_H2_GM12878_Tn5_bw/Hudep2.bw"
meme_file = "selected_motifs.meme"
top_n=5 # number of features for each motif
## read data
data,high,low = get_high_low_data(input,high_hbf,low_hbf)
roi_A,roi = get_roi(high+low)
seq = roi2fasta(roi_A,refgenome,flank)
test = pd.DataFrame.from_dict(seq,orient='index')
data['seq'] = test[0]
# 1. using janggu get DNA one-hot
## get one-hot data and ATAC feature matrix
dna_A = Bioseq.create_from_refgenome(name='dna',refgenome=refgenome,roi=roi_A,flank=flank)
Tn5 = Cover.create_from_bigwig('bigwig_coverage',bigwigfiles=bw_file,roi=roi,binsize=1,stepsize=1,flank=flank)
## ReShape
dna_A=np.reshape(dna_A,(len(high+low),flank*2+1,4))
bw_values=np.reshape(Tn5,(len(high+low),flank*2+1))
## get motif PWM, 3. read meme get motif PWMs in both strands
motifs = read_motif(meme_file)
# 4. scan motifs get score_list, max(pos_strand,neg_strand)
score_list_A = Parallel(n_jobs=-1)(delayed(DNA_motif_scan)(dna_A,motifs[m][0],motifs[m][1]) for m in motifs)
def get_footprint_score(s,l,footprint_score):
flanking=2
# print (s,l)
left_start = s-flanking
# print ("left_start:",left_start)
if left_start >= 0:
left = list(footprint_score[left_start:s])
else:
left = [np.nan]
right_end = s+l+flanking
# print ("right_end:",right_end)
# print ("len(footprint_score):",len(footprint_score))
if right_end <= len(footprint_score):
right = list(footprint_score[s+l:right_end])
else:
right = [np.nan]
flanking = np.nanmean(left+right)
# print ("left",left,"right",right)
# print ("flanking",flanking,"left+right",left+right)
occ = np.nanmean(footprint_score[s:s+l])
# print ("all:",footprint_score[s:s+l],"occ:",occ)
return flanking - occ
def get_top_n_motif_scores(score_list,top_n):
"""score_list.shape = L * 1
return
------
pos, value list
"""
return score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]
# 5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
def get_adjusted_motif_score(motif_score,footprint_score,n):
"""motif_score and footprint_score are same shape, N * L"""
out = []
# print ("motif_score",motif_score)
motif_length = footprint_score.shape[1] - motif_score.shape[1]
for i in range(motif_score.shape[0]):
pos,value = get_top_n_motif_scores(motif_score[i],n)
# print ("pos,:",pos)
# print ("value,:",value)
FOS_list = [get_footprint_score(s,motif_length,footprint_score[i]) for s in pos]
# print ("FOS_list:",FOS_list)
value = [value[i]*FOS_list[i] for i in range(len(value))]
out.append(value)
return out
adjusted_scores = Parallel(n_jobs=-1)(delayed(get_adjusted_motif_score)(motif_score,bw_values,top_n) for motif_score in score_list_A)
def set_col_names(motifs,top_n,label):
out = []
for i in motifs:
for j in range(top_n):
out.append("%s_%s_%s"%(label,i,j))
return out
## get feature table
adjusted_scores = np.array(adjusted_scores)
adjusted_scores = np.swapaxes(adjusted_scores,0,1)
adjusted_scores = adjusted_scores.reshape((len(high+low),top_n*len(motifs)))
adjusted_scores = pd.DataFrame(adjusted_scores)
adjusted_scores.columns = set_col_names(motifs,top_n,"motif_footprint_score")
adjusted_scores.index = high+low
df = pd.concat([adjusted_scores,data],axis=1)
# df.to_csv("ML_data.csv")
df.to_csv("all_A_features.csv")
| df = pd.DataFrame(roi)
df[1] = df[1]-flank
df[2] = df[2]+flank
df.to_csv("tmp.bed",sep="\t",header=False,index=False)
os.system("bedtools getfasta -fi %s -fo tmp.fa -bed tmp.bed -s -name"%(genome_fa))
seq = read_fasta("tmp.fa")
os.system("rm tmp.fa tmp.bed")
return seq | identifier_body |
public_key.rs | use crate::key::{KeyError, PublicKey};
use crate::ssh::decode::SshComplexTypeDecode;
use crate::ssh::encode::SshComplexTypeEncode;
use std::str::FromStr;
use std::{io, string};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum SshPublicKeyError {
#[error(transparent)]
IoError(#[from] io::Error),
#[error(transparent)]
FromUtf8Error(#[from] string::FromUtf8Error),
#[error(transparent)]
RsaError(#[from] rsa::errors::Error),
#[error(transparent)]
Base64DecodeError(#[from] base64::DecodeError),
#[error("Unknown key type. We only support RSA")]
UnknownKeyType,
#[error(transparent)]
KeyError(#[from] KeyError),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum SshBasePublicKey {
Rsa(PublicKey),
Ec(PublicKey),
Ed(PublicKey),
}
#[derive(Debug, Clone, PartialEq, Eq)] | pub struct SshPublicKey {
pub inner_key: SshBasePublicKey,
pub comment: String,
}
impl SshPublicKey {
pub fn to_string(&self) -> Result<String, SshPublicKeyError> {
let mut buffer = Vec::with_capacity(1024);
self.encode(&mut buffer)?;
Ok(String::from_utf8(buffer)?)
}
}
impl FromStr for SshPublicKey {
type Err = SshPublicKeyError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
SshComplexTypeDecode::decode(s.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_files;
use num_bigint_dig::BigUint;
use rstest::rstest;
#[test]
fn decode_ssh_rsa_4096_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDbUCK4dH1n4dOFBv/sjfMma4q5qe7SZ49j2GODGKr8DueZMWYLTck61uUMMlVBT3XyX6me6X4WsBoijzQWvgwpLCGTqlhQTntm5FphXHHkKxFvjMhPzCnHNS+L0ebzewcecsY5rtgw+6BhFwdZGhFBfif1/6s9q7y7+8Ge3hUIEqLdiMDDzxc66zIaW26jZxO4BMHuKp7Xln2JeDjsRHvz0vBNAddOfkvtp+gM72OH4tm9wS/V8bVOZ68oU0os8DuiEGnwA5RnjOjaFdHWt1mD8B+nRINxI8zYyQcqp3t4p552P0Frhvjgixi67Ryax0DUNuzN2MpQ0ORUgRkfy/xWvImUseP/BfqvNiWkFAWHNDDSsc50Wmr+g0JicG2gowHLYPxKRjLIbOq+JgxHrE4TdaA2NJoeUppJgWU4yuGl5fx1G+Bcdr0C+lsMj14Hp+aGajEOLQ7Mq3HzWEox9G1KgN4r266Mofd8T4vrjF6Ja9E+pp0pXgEv2cvtYJLP0qdrHWafb3lWsP4hJWnv/NaXP6ZAxiEeHsigrY98kmgZbHm/6AmiBJ7bKQ/S/PelYj3mTL0aYkGF79qVtAzSl7yI9yVyHsl7dt5jdmp6+IofuEtNfnAcfoaSLu0Ojotp9VBMvil6ojScbJNLBL8tGN4+urIcsNUvVjAOnwc3nothKw== [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
assert_eq!("[email protected]".to_owned(), public_key.comment);
assert_eq!(
SshBasePublicKey::Rsa(PublicKey::from_rsa_components(
&BigUint::from_bytes_be(&[
219, 80, 34, 184, 116, 125, 103, 225, 211, 133, 6, 255, 236, 141, 243, 38, 107, 138, 185, 169, 238,
210, 103, 143, 99, 216, 99, 131, 24, 170, 252, 14, 231, 153, 49, 102, 11, 77, 201, 58, 214, 229,
12, 50, 85, 65, 79, 117, 242, 95, 169, 158, 233, 126, 22, 176, 26, 34, 143, 52, 22, 190, 12, 41,
44, 33, 147, 170, 88, 80, 78, 123, 102, 228, 90, 97, 92, 113, 228, 43, 17, 111, 140, 200, 79, 204,
41, 199, 53, 47, 139, 209, 230, 243, 123, 7, 30, 114, 198, 57, 174, 216, 48, 251, 160, 97, 23, 7,
89, 26, 17, 65, 126, 39, 245, 255, 171, 61, 171, 188, 187, 251, 193, 158, 222, 21, 8, 18, 162, 221,
136, 192, 195, 207, 23, 58, 235, 50, 26, 91, 110, 163, 103, 19, 184, 4, 193, 238, 42, 158, 215,
150, 125, 137, 120, 56, 236, 68, 123, 243, 210, 240, 77, 1, 215, 78, 126, 75, 237, 167, 232, 12,
239, 99, 135, 226, 217, 189, 193, 47, 213, 241, 181, 78, 103, 175, 40, 83, 74, 44, 240, 59, 162,
16, 105, 240, 3, 148, 103, 140, 232, 218, 21, 209, 214, 183, 89, 131, 240, 31, 167, 68, 131, 113,
35, 204, 216, 201, 7, 42, 167, 123, 120, 167, 158, 118, 63, 65, 107, 134, 248, 224, 139, 24, 186,
237, 28, 154, 199, 64, 212, 54, 236, 205, 216, 202, 80, 208, 228, 84, 129, 25, 31, 203, 252, 86,
188, 137, 148, 177, 227, 255, 5, 250, 175, 54, 37, 164, 20, 5, 135, 52, 48, 210, 177, 206, 116, 90,
106, 254, 131, 66, 98, 112, 109, 160, 163, 1, 203, 96, 252, 74, 70, 50, 200, 108, 234, 190, 38, 12,
71, 172, 78, 19, 117, 160, 54, 52, 154, 30, 82, 154, 73, 129, 101, 56, 202, 225, 165, 229, 252,
117, 27, 224, 92, 118, 189, 2, 250, 91, 12, 143, 94, 7, 167, 230, 134, 106, 49, 14, 45, 14, 204,
171, 113, 243, 88, 74, 49, 244, 109, 74, 128, 222, 43, 219, 174, 140, 161, 247, 124, 79, 139, 235,
140, 94, 137, 107, 209, 62, 166, 157, 41, 94, 1, 47, 217, 203, 237, 96, 146, 207, 210, 167, 107,
29, 102, 159, 111, 121, 86, 176, 254, 33, 37, 105, 239, 252, 214, 151, 63, 166, 64, 198, 33, 30,
30, 200, 160, 173, 143, 124, 146, 104, 25, 108, 121, 191, 232, 9, 162, 4, 158, 219, 41, 15, 210,
252, 247, 165, 98, 61, 230, 76, 189, 26, 98, 65, 133, 239, 218, 149, 180, 12, 210, 151, 188, 136,
247, 37, 114, 30, 201, 123, 118, 222, 99, 118, 106, 122, 248, 138, 31, 184, 75, 77, 126, 112, 28,
126, 134, 146, 46, 237, 14, 142, 139, 105, 245, 80, 76, 190, 41, 122, 162, 52, 156, 108, 147, 75,
4, 191, 45, 24, 222, 62, 186, 178, 28, 176, 213, 47, 86, 48, 14, 159, 7, 55, 158, 139, 97, 43
]),
&BigUint::from_bytes_be(&[1, 0, 1])
)),
public_key.inner_key
);
}
#[test]
fn decode_ssh_rsa_2048_public_key() {
// ssh-keygen -t rsa -b 2048 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI9ht2g2qOPgSG5huVYjFUouyaw59/6QuQqUVGwgnITlhRbM+bkvJQfcuiqcv+vD9/86Dfugk79sSfg/aVK+V/plqAAZoujz/wALDjEphSxAUcAR+t4i2F39Pa71MSc37I9L30z31tcba1X7od7hzrVMl9iurkOyBC4xcIWa1H8h0mDyoXyWPTqoTONDUe9dB1eu6GbixCfUcxvdVt0pAVJTdOmbNXKwRo5WXfMrsqKsFT2Acg4Vm4TfLShSSUW4rqM6GOBCfF6jnxFvTSDentH5hykjWL3lMCghD+1hJyOdnMHJC/5qTUGOB86MxsR4RCXqS+LZrGpMScVyDQge7r [email protected]\r\n";
let public_key: SshPublicKey = SshPublicKey::from_str(ssh_public_key).unwrap();
assert_eq!("[email protected]".to_owned(), public_key.comment);
assert_eq!(
SshBasePublicKey::Rsa(PublicKey::from_rsa_components(
&BigUint::from_bytes_be(&[
200, 246, 27, 118, 131, 106, 142, 62, 4, 134, 230, 27, 149, 98, 49, 84, 162, 236, 154, 195, 159,
127, 233, 11, 144, 169, 69, 70, 194, 9, 200, 78, 88, 81, 108, 207, 155, 146, 242, 80, 125, 203,
162, 169, 203, 254, 188, 63, 127, 243, 160, 223, 186, 9, 59, 246, 196, 159, 131, 246, 149, 43, 229,
127, 166, 90, 128, 1, 154, 46, 143, 63, 240, 0, 176, 227, 18, 152, 82, 196, 5, 28, 1, 31, 173, 226,
45, 133, 223, 211, 218, 239, 83, 18, 115, 126, 200, 244, 189, 244, 207, 125, 109, 113, 182, 181,
95, 186, 29, 238, 28, 235, 84, 201, 125, 138, 234, 228, 59, 32, 66, 227, 23, 8, 89, 173, 71, 242,
29, 38, 15, 42, 23, 201, 99, 211, 170, 132, 206, 52, 53, 30, 245, 208, 117, 122, 238, 134, 110, 44,
66, 125, 71, 49, 189, 213, 109, 210, 144, 21, 37, 55, 78, 153, 179, 87, 43, 4, 104, 229, 101, 223,
50, 187, 42, 42, 193, 83, 216, 7, 32, 225, 89, 184, 77, 242, 210, 133, 36, 148, 91, 138, 234, 51,
161, 142, 4, 39, 197, 234, 57, 241, 22, 244, 210, 13, 233, 237, 31, 152, 114, 146, 53, 139, 222,
83, 2, 130, 16, 254, 214, 18, 114, 57, 217, 204, 28, 144, 191, 230, 164, 212, 24, 224, 124, 232,
204, 108, 71, 132, 66, 94, 164, 190, 45, 154, 198, 164, 196, 156, 87, 32, 208, 129, 238, 235
]),
&BigUint::from_bytes_be(&[1, 0, 1])
)),
public_key.inner_key
);
}
#[test]
fn encode_ssh_rsa_4096_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDbUCK4dH1n4dOFBv/sjfMma4q5qe7SZ49j2GODGKr8DueZMWYLTck61uUMMlVBT3XyX6me6X4WsBoijzQWvgwpLCGTqlhQTntm5FphXHHkKxFvjMhPzCnHNS+L0ebzewcecsY5rtgw+6BhFwdZGhFBfif1/6s9q7y7+8Ge3hUIEqLdiMDDzxc66zIaW26jZxO4BMHuKp7Xln2JeDjsRHvz0vBNAddOfkvtp+gM72OH4tm9wS/V8bVOZ68oU0os8DuiEGnwA5RnjOjaFdHWt1mD8B+nRINxI8zYyQcqp3t4p552P0Frhvjgixi67Ryax0DUNuzN2MpQ0ORUgRkfy/xWvImUseP/BfqvNiWkFAWHNDDSsc50Wmr+g0JicG2gowHLYPxKRjLIbOq+JgxHrE4TdaA2NJoeUppJgWU4yuGl5fx1G+Bcdr0C+lsMj14Hp+aGajEOLQ7Mq3HzWEox9G1KgN4r266Mofd8T4vrjF6Ja9E+pp0pXgEv2cvtYJLP0qdrHWafb3lWsP4hJWnv/NaXP6ZAxiEeHsigrY98kmgZbHm/6AmiBJ7bKQ/S/PelYj3mTL0aYkGF79qVtAzSl7yI9yVyHsl7dt5jdmp6+IofuEtNfnAcfoaSLu0Ojotp9VBMvil6ojScbJNLBL8tGN4+urIcsNUvVjAOnwc3nothKw== [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(ssh_public_key, ssh_public_key_after.as_str());
}
#[test]
fn encode_ssh_rsa_2048_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI9ht2g2qOPgSG5huVYjFUouyaw59/6QuQqUVGwgnITlhRbM+bkvJQfcuiqcv+vD9/86Dfugk79sSfg/aVK+V/plqAAZoujz/wALDjEphSxAUcAR+t4i2F39Pa71MSc37I9L30z31tcba1X7od7hzrVMl9iurkOyBC4xcIWa1H8h0mDyoXyWPTqoTONDUe9dB1eu6GbixCfUcxvdVt0pAVJTdOmbNXKwRo5WXfMrsqKsFT2Acg4Vm4TfLShSSUW4rqM6GOBCfF6jnxFvTSDentH5hykjWL3lMCghD+1hJyOdnMHJC/5qTUGOB86MxsR4RCXqS+LZrGpMScVyDQge7r [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(ssh_public_key, ssh_public_key_after.as_str());
}
#[rstest]
#[case(test_files::SSH_PUBLIC_KEY_EC_P256)]
#[case(test_files::SSH_PUBLIC_KEY_EC_P384)]
#[case(test_files::SSH_PUBLIC_KEY_EC_P521)]
fn ecdsa_roundtrip(#[case] key_str: &str) {
let public_key = SshPublicKey::from_str(key_str).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(key_str, ssh_public_key_after.as_str());
}
#[test]
fn ed25519_roundtrip() {
let public_key = SshPublicKey::from_str(test_files::SSH_PUBLIC_KEY_ED25519).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(test_files::SSH_PUBLIC_KEY_ED25519, ssh_public_key_after.as_str());
}
} | random_line_split |
|
public_key.rs | use crate::key::{KeyError, PublicKey};
use crate::ssh::decode::SshComplexTypeDecode;
use crate::ssh::encode::SshComplexTypeEncode;
use std::str::FromStr;
use std::{io, string};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum SshPublicKeyError {
#[error(transparent)]
IoError(#[from] io::Error),
#[error(transparent)]
FromUtf8Error(#[from] string::FromUtf8Error),
#[error(transparent)]
RsaError(#[from] rsa::errors::Error),
#[error(transparent)]
Base64DecodeError(#[from] base64::DecodeError),
#[error("Unknown key type. We only support RSA")]
UnknownKeyType,
#[error(transparent)]
KeyError(#[from] KeyError),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum SshBasePublicKey {
Rsa(PublicKey),
Ec(PublicKey),
Ed(PublicKey),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SshPublicKey {
pub inner_key: SshBasePublicKey,
pub comment: String,
}
impl SshPublicKey {
pub fn to_string(&self) -> Result<String, SshPublicKeyError> {
let mut buffer = Vec::with_capacity(1024);
self.encode(&mut buffer)?;
Ok(String::from_utf8(buffer)?)
}
}
impl FromStr for SshPublicKey {
type Err = SshPublicKeyError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
SshComplexTypeDecode::decode(s.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_files;
use num_bigint_dig::BigUint;
use rstest::rstest;
#[test]
fn decode_ssh_rsa_4096_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDbUCK4dH1n4dOFBv/sjfMma4q5qe7SZ49j2GODGKr8DueZMWYLTck61uUMMlVBT3XyX6me6X4WsBoijzQWvgwpLCGTqlhQTntm5FphXHHkKxFvjMhPzCnHNS+L0ebzewcecsY5rtgw+6BhFwdZGhFBfif1/6s9q7y7+8Ge3hUIEqLdiMDDzxc66zIaW26jZxO4BMHuKp7Xln2JeDjsRHvz0vBNAddOfkvtp+gM72OH4tm9wS/V8bVOZ68oU0os8DuiEGnwA5RnjOjaFdHWt1mD8B+nRINxI8zYyQcqp3t4p552P0Frhvjgixi67Ryax0DUNuzN2MpQ0ORUgRkfy/xWvImUseP/BfqvNiWkFAWHNDDSsc50Wmr+g0JicG2gowHLYPxKRjLIbOq+JgxHrE4TdaA2NJoeUppJgWU4yuGl5fx1G+Bcdr0C+lsMj14Hp+aGajEOLQ7Mq3HzWEox9G1KgN4r266Mofd8T4vrjF6Ja9E+pp0pXgEv2cvtYJLP0qdrHWafb3lWsP4hJWnv/NaXP6ZAxiEeHsigrY98kmgZbHm/6AmiBJ7bKQ/S/PelYj3mTL0aYkGF79qVtAzSl7yI9yVyHsl7dt5jdmp6+IofuEtNfnAcfoaSLu0Ojotp9VBMvil6ojScbJNLBL8tGN4+urIcsNUvVjAOnwc3nothKw== [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
assert_eq!("[email protected]".to_owned(), public_key.comment);
assert_eq!(
SshBasePublicKey::Rsa(PublicKey::from_rsa_components(
&BigUint::from_bytes_be(&[
219, 80, 34, 184, 116, 125, 103, 225, 211, 133, 6, 255, 236, 141, 243, 38, 107, 138, 185, 169, 238,
210, 103, 143, 99, 216, 99, 131, 24, 170, 252, 14, 231, 153, 49, 102, 11, 77, 201, 58, 214, 229,
12, 50, 85, 65, 79, 117, 242, 95, 169, 158, 233, 126, 22, 176, 26, 34, 143, 52, 22, 190, 12, 41,
44, 33, 147, 170, 88, 80, 78, 123, 102, 228, 90, 97, 92, 113, 228, 43, 17, 111, 140, 200, 79, 204,
41, 199, 53, 47, 139, 209, 230, 243, 123, 7, 30, 114, 198, 57, 174, 216, 48, 251, 160, 97, 23, 7,
89, 26, 17, 65, 126, 39, 245, 255, 171, 61, 171, 188, 187, 251, 193, 158, 222, 21, 8, 18, 162, 221,
136, 192, 195, 207, 23, 58, 235, 50, 26, 91, 110, 163, 103, 19, 184, 4, 193, 238, 42, 158, 215,
150, 125, 137, 120, 56, 236, 68, 123, 243, 210, 240, 77, 1, 215, 78, 126, 75, 237, 167, 232, 12,
239, 99, 135, 226, 217, 189, 193, 47, 213, 241, 181, 78, 103, 175, 40, 83, 74, 44, 240, 59, 162,
16, 105, 240, 3, 148, 103, 140, 232, 218, 21, 209, 214, 183, 89, 131, 240, 31, 167, 68, 131, 113,
35, 204, 216, 201, 7, 42, 167, 123, 120, 167, 158, 118, 63, 65, 107, 134, 248, 224, 139, 24, 186,
237, 28, 154, 199, 64, 212, 54, 236, 205, 216, 202, 80, 208, 228, 84, 129, 25, 31, 203, 252, 86,
188, 137, 148, 177, 227, 255, 5, 250, 175, 54, 37, 164, 20, 5, 135, 52, 48, 210, 177, 206, 116, 90,
106, 254, 131, 66, 98, 112, 109, 160, 163, 1, 203, 96, 252, 74, 70, 50, 200, 108, 234, 190, 38, 12,
71, 172, 78, 19, 117, 160, 54, 52, 154, 30, 82, 154, 73, 129, 101, 56, 202, 225, 165, 229, 252,
117, 27, 224, 92, 118, 189, 2, 250, 91, 12, 143, 94, 7, 167, 230, 134, 106, 49, 14, 45, 14, 204,
171, 113, 243, 88, 74, 49, 244, 109, 74, 128, 222, 43, 219, 174, 140, 161, 247, 124, 79, 139, 235,
140, 94, 137, 107, 209, 62, 166, 157, 41, 94, 1, 47, 217, 203, 237, 96, 146, 207, 210, 167, 107,
29, 102, 159, 111, 121, 86, 176, 254, 33, 37, 105, 239, 252, 214, 151, 63, 166, 64, 198, 33, 30,
30, 200, 160, 173, 143, 124, 146, 104, 25, 108, 121, 191, 232, 9, 162, 4, 158, 219, 41, 15, 210,
252, 247, 165, 98, 61, 230, 76, 189, 26, 98, 65, 133, 239, 218, 149, 180, 12, 210, 151, 188, 136,
247, 37, 114, 30, 201, 123, 118, 222, 99, 118, 106, 122, 248, 138, 31, 184, 75, 77, 126, 112, 28,
126, 134, 146, 46, 237, 14, 142, 139, 105, 245, 80, 76, 190, 41, 122, 162, 52, 156, 108, 147, 75,
4, 191, 45, 24, 222, 62, 186, 178, 28, 176, 213, 47, 86, 48, 14, 159, 7, 55, 158, 139, 97, 43
]),
&BigUint::from_bytes_be(&[1, 0, 1])
)),
public_key.inner_key
);
}
#[test]
fn | () {
// ssh-keygen -t rsa -b 2048 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI9ht2g2qOPgSG5huVYjFUouyaw59/6QuQqUVGwgnITlhRbM+bkvJQfcuiqcv+vD9/86Dfugk79sSfg/aVK+V/plqAAZoujz/wALDjEphSxAUcAR+t4i2F39Pa71MSc37I9L30z31tcba1X7od7hzrVMl9iurkOyBC4xcIWa1H8h0mDyoXyWPTqoTONDUe9dB1eu6GbixCfUcxvdVt0pAVJTdOmbNXKwRo5WXfMrsqKsFT2Acg4Vm4TfLShSSUW4rqM6GOBCfF6jnxFvTSDentH5hykjWL3lMCghD+1hJyOdnMHJC/5qTUGOB86MxsR4RCXqS+LZrGpMScVyDQge7r [email protected]\r\n";
let public_key: SshPublicKey = SshPublicKey::from_str(ssh_public_key).unwrap();
assert_eq!("[email protected]".to_owned(), public_key.comment);
assert_eq!(
SshBasePublicKey::Rsa(PublicKey::from_rsa_components(
&BigUint::from_bytes_be(&[
200, 246, 27, 118, 131, 106, 142, 62, 4, 134, 230, 27, 149, 98, 49, 84, 162, 236, 154, 195, 159,
127, 233, 11, 144, 169, 69, 70, 194, 9, 200, 78, 88, 81, 108, 207, 155, 146, 242, 80, 125, 203,
162, 169, 203, 254, 188, 63, 127, 243, 160, 223, 186, 9, 59, 246, 196, 159, 131, 246, 149, 43, 229,
127, 166, 90, 128, 1, 154, 46, 143, 63, 240, 0, 176, 227, 18, 152, 82, 196, 5, 28, 1, 31, 173, 226,
45, 133, 223, 211, 218, 239, 83, 18, 115, 126, 200, 244, 189, 244, 207, 125, 109, 113, 182, 181,
95, 186, 29, 238, 28, 235, 84, 201, 125, 138, 234, 228, 59, 32, 66, 227, 23, 8, 89, 173, 71, 242,
29, 38, 15, 42, 23, 201, 99, 211, 170, 132, 206, 52, 53, 30, 245, 208, 117, 122, 238, 134, 110, 44,
66, 125, 71, 49, 189, 213, 109, 210, 144, 21, 37, 55, 78, 153, 179, 87, 43, 4, 104, 229, 101, 223,
50, 187, 42, 42, 193, 83, 216, 7, 32, 225, 89, 184, 77, 242, 210, 133, 36, 148, 91, 138, 234, 51,
161, 142, 4, 39, 197, 234, 57, 241, 22, 244, 210, 13, 233, 237, 31, 152, 114, 146, 53, 139, 222,
83, 2, 130, 16, 254, 214, 18, 114, 57, 217, 204, 28, 144, 191, 230, 164, 212, 24, 224, 124, 232,
204, 108, 71, 132, 66, 94, 164, 190, 45, 154, 198, 164, 196, 156, 87, 32, 208, 129, 238, 235
]),
&BigUint::from_bytes_be(&[1, 0, 1])
)),
public_key.inner_key
);
}
#[test]
fn encode_ssh_rsa_4096_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDbUCK4dH1n4dOFBv/sjfMma4q5qe7SZ49j2GODGKr8DueZMWYLTck61uUMMlVBT3XyX6me6X4WsBoijzQWvgwpLCGTqlhQTntm5FphXHHkKxFvjMhPzCnHNS+L0ebzewcecsY5rtgw+6BhFwdZGhFBfif1/6s9q7y7+8Ge3hUIEqLdiMDDzxc66zIaW26jZxO4BMHuKp7Xln2JeDjsRHvz0vBNAddOfkvtp+gM72OH4tm9wS/V8bVOZ68oU0os8DuiEGnwA5RnjOjaFdHWt1mD8B+nRINxI8zYyQcqp3t4p552P0Frhvjgixi67Ryax0DUNuzN2MpQ0ORUgRkfy/xWvImUseP/BfqvNiWkFAWHNDDSsc50Wmr+g0JicG2gowHLYPxKRjLIbOq+JgxHrE4TdaA2NJoeUppJgWU4yuGl5fx1G+Bcdr0C+lsMj14Hp+aGajEOLQ7Mq3HzWEox9G1KgN4r266Mofd8T4vrjF6Ja9E+pp0pXgEv2cvtYJLP0qdrHWafb3lWsP4hJWnv/NaXP6ZAxiEeHsigrY98kmgZbHm/6AmiBJ7bKQ/S/PelYj3mTL0aYkGF79qVtAzSl7yI9yVyHsl7dt5jdmp6+IofuEtNfnAcfoaSLu0Ojotp9VBMvil6ojScbJNLBL8tGN4+urIcsNUvVjAOnwc3nothKw== [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(ssh_public_key, ssh_public_key_after.as_str());
}
#[test]
fn encode_ssh_rsa_2048_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI9ht2g2qOPgSG5huVYjFUouyaw59/6QuQqUVGwgnITlhRbM+bkvJQfcuiqcv+vD9/86Dfugk79sSfg/aVK+V/plqAAZoujz/wALDjEphSxAUcAR+t4i2F39Pa71MSc37I9L30z31tcba1X7od7hzrVMl9iurkOyBC4xcIWa1H8h0mDyoXyWPTqoTONDUe9dB1eu6GbixCfUcxvdVt0pAVJTdOmbNXKwRo5WXfMrsqKsFT2Acg4Vm4TfLShSSUW4rqM6GOBCfF6jnxFvTSDentH5hykjWL3lMCghD+1hJyOdnMHJC/5qTUGOB86MxsR4RCXqS+LZrGpMScVyDQge7r [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(ssh_public_key, ssh_public_key_after.as_str());
}
#[rstest]
#[case(test_files::SSH_PUBLIC_KEY_EC_P256)]
#[case(test_files::SSH_PUBLIC_KEY_EC_P384)]
#[case(test_files::SSH_PUBLIC_KEY_EC_P521)]
fn ecdsa_roundtrip(#[case] key_str: &str) {
let public_key = SshPublicKey::from_str(key_str).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(key_str, ssh_public_key_after.as_str());
}
#[test]
fn ed25519_roundtrip() {
let public_key = SshPublicKey::from_str(test_files::SSH_PUBLIC_KEY_ED25519).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(test_files::SSH_PUBLIC_KEY_ED25519, ssh_public_key_after.as_str());
}
}
| decode_ssh_rsa_2048_public_key | identifier_name |
public_key.rs | use crate::key::{KeyError, PublicKey};
use crate::ssh::decode::SshComplexTypeDecode;
use crate::ssh::encode::SshComplexTypeEncode;
use std::str::FromStr;
use std::{io, string};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum SshPublicKeyError {
#[error(transparent)]
IoError(#[from] io::Error),
#[error(transparent)]
FromUtf8Error(#[from] string::FromUtf8Error),
#[error(transparent)]
RsaError(#[from] rsa::errors::Error),
#[error(transparent)]
Base64DecodeError(#[from] base64::DecodeError),
#[error("Unknown key type. We only support RSA")]
UnknownKeyType,
#[error(transparent)]
KeyError(#[from] KeyError),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum SshBasePublicKey {
Rsa(PublicKey),
Ec(PublicKey),
Ed(PublicKey),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SshPublicKey {
pub inner_key: SshBasePublicKey,
pub comment: String,
}
impl SshPublicKey {
pub fn to_string(&self) -> Result<String, SshPublicKeyError> {
let mut buffer = Vec::with_capacity(1024);
self.encode(&mut buffer)?;
Ok(String::from_utf8(buffer)?)
}
}
impl FromStr for SshPublicKey {
type Err = SshPublicKeyError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
SshComplexTypeDecode::decode(s.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_files;
use num_bigint_dig::BigUint;
use rstest::rstest;
#[test]
fn decode_ssh_rsa_4096_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDbUCK4dH1n4dOFBv/sjfMma4q5qe7SZ49j2GODGKr8DueZMWYLTck61uUMMlVBT3XyX6me6X4WsBoijzQWvgwpLCGTqlhQTntm5FphXHHkKxFvjMhPzCnHNS+L0ebzewcecsY5rtgw+6BhFwdZGhFBfif1/6s9q7y7+8Ge3hUIEqLdiMDDzxc66zIaW26jZxO4BMHuKp7Xln2JeDjsRHvz0vBNAddOfkvtp+gM72OH4tm9wS/V8bVOZ68oU0os8DuiEGnwA5RnjOjaFdHWt1mD8B+nRINxI8zYyQcqp3t4p552P0Frhvjgixi67Ryax0DUNuzN2MpQ0ORUgRkfy/xWvImUseP/BfqvNiWkFAWHNDDSsc50Wmr+g0JicG2gowHLYPxKRjLIbOq+JgxHrE4TdaA2NJoeUppJgWU4yuGl5fx1G+Bcdr0C+lsMj14Hp+aGajEOLQ7Mq3HzWEox9G1KgN4r266Mofd8T4vrjF6Ja9E+pp0pXgEv2cvtYJLP0qdrHWafb3lWsP4hJWnv/NaXP6ZAxiEeHsigrY98kmgZbHm/6AmiBJ7bKQ/S/PelYj3mTL0aYkGF79qVtAzSl7yI9yVyHsl7dt5jdmp6+IofuEtNfnAcfoaSLu0Ojotp9VBMvil6ojScbJNLBL8tGN4+urIcsNUvVjAOnwc3nothKw== [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
assert_eq!("[email protected]".to_owned(), public_key.comment);
assert_eq!(
SshBasePublicKey::Rsa(PublicKey::from_rsa_components(
&BigUint::from_bytes_be(&[
219, 80, 34, 184, 116, 125, 103, 225, 211, 133, 6, 255, 236, 141, 243, 38, 107, 138, 185, 169, 238,
210, 103, 143, 99, 216, 99, 131, 24, 170, 252, 14, 231, 153, 49, 102, 11, 77, 201, 58, 214, 229,
12, 50, 85, 65, 79, 117, 242, 95, 169, 158, 233, 126, 22, 176, 26, 34, 143, 52, 22, 190, 12, 41,
44, 33, 147, 170, 88, 80, 78, 123, 102, 228, 90, 97, 92, 113, 228, 43, 17, 111, 140, 200, 79, 204,
41, 199, 53, 47, 139, 209, 230, 243, 123, 7, 30, 114, 198, 57, 174, 216, 48, 251, 160, 97, 23, 7,
89, 26, 17, 65, 126, 39, 245, 255, 171, 61, 171, 188, 187, 251, 193, 158, 222, 21, 8, 18, 162, 221,
136, 192, 195, 207, 23, 58, 235, 50, 26, 91, 110, 163, 103, 19, 184, 4, 193, 238, 42, 158, 215,
150, 125, 137, 120, 56, 236, 68, 123, 243, 210, 240, 77, 1, 215, 78, 126, 75, 237, 167, 232, 12,
239, 99, 135, 226, 217, 189, 193, 47, 213, 241, 181, 78, 103, 175, 40, 83, 74, 44, 240, 59, 162,
16, 105, 240, 3, 148, 103, 140, 232, 218, 21, 209, 214, 183, 89, 131, 240, 31, 167, 68, 131, 113,
35, 204, 216, 201, 7, 42, 167, 123, 120, 167, 158, 118, 63, 65, 107, 134, 248, 224, 139, 24, 186,
237, 28, 154, 199, 64, 212, 54, 236, 205, 216, 202, 80, 208, 228, 84, 129, 25, 31, 203, 252, 86,
188, 137, 148, 177, 227, 255, 5, 250, 175, 54, 37, 164, 20, 5, 135, 52, 48, 210, 177, 206, 116, 90,
106, 254, 131, 66, 98, 112, 109, 160, 163, 1, 203, 96, 252, 74, 70, 50, 200, 108, 234, 190, 38, 12,
71, 172, 78, 19, 117, 160, 54, 52, 154, 30, 82, 154, 73, 129, 101, 56, 202, 225, 165, 229, 252,
117, 27, 224, 92, 118, 189, 2, 250, 91, 12, 143, 94, 7, 167, 230, 134, 106, 49, 14, 45, 14, 204,
171, 113, 243, 88, 74, 49, 244, 109, 74, 128, 222, 43, 219, 174, 140, 161, 247, 124, 79, 139, 235,
140, 94, 137, 107, 209, 62, 166, 157, 41, 94, 1, 47, 217, 203, 237, 96, 146, 207, 210, 167, 107,
29, 102, 159, 111, 121, 86, 176, 254, 33, 37, 105, 239, 252, 214, 151, 63, 166, 64, 198, 33, 30,
30, 200, 160, 173, 143, 124, 146, 104, 25, 108, 121, 191, 232, 9, 162, 4, 158, 219, 41, 15, 210,
252, 247, 165, 98, 61, 230, 76, 189, 26, 98, 65, 133, 239, 218, 149, 180, 12, 210, 151, 188, 136,
247, 37, 114, 30, 201, 123, 118, 222, 99, 118, 106, 122, 248, 138, 31, 184, 75, 77, 126, 112, 28,
126, 134, 146, 46, 237, 14, 142, 139, 105, 245, 80, 76, 190, 41, 122, 162, 52, 156, 108, 147, 75,
4, 191, 45, 24, 222, 62, 186, 178, 28, 176, 213, 47, 86, 48, 14, 159, 7, 55, 158, 139, 97, 43
]),
&BigUint::from_bytes_be(&[1, 0, 1])
)),
public_key.inner_key
);
}
#[test]
fn decode_ssh_rsa_2048_public_key() {
// ssh-keygen -t rsa -b 2048 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI9ht2g2qOPgSG5huVYjFUouyaw59/6QuQqUVGwgnITlhRbM+bkvJQfcuiqcv+vD9/86Dfugk79sSfg/aVK+V/plqAAZoujz/wALDjEphSxAUcAR+t4i2F39Pa71MSc37I9L30z31tcba1X7od7hzrVMl9iurkOyBC4xcIWa1H8h0mDyoXyWPTqoTONDUe9dB1eu6GbixCfUcxvdVt0pAVJTdOmbNXKwRo5WXfMrsqKsFT2Acg4Vm4TfLShSSUW4rqM6GOBCfF6jnxFvTSDentH5hykjWL3lMCghD+1hJyOdnMHJC/5qTUGOB86MxsR4RCXqS+LZrGpMScVyDQge7r [email protected]\r\n";
let public_key: SshPublicKey = SshPublicKey::from_str(ssh_public_key).unwrap();
assert_eq!("[email protected]".to_owned(), public_key.comment);
assert_eq!(
SshBasePublicKey::Rsa(PublicKey::from_rsa_components(
&BigUint::from_bytes_be(&[
200, 246, 27, 118, 131, 106, 142, 62, 4, 134, 230, 27, 149, 98, 49, 84, 162, 236, 154, 195, 159,
127, 233, 11, 144, 169, 69, 70, 194, 9, 200, 78, 88, 81, 108, 207, 155, 146, 242, 80, 125, 203,
162, 169, 203, 254, 188, 63, 127, 243, 160, 223, 186, 9, 59, 246, 196, 159, 131, 246, 149, 43, 229,
127, 166, 90, 128, 1, 154, 46, 143, 63, 240, 0, 176, 227, 18, 152, 82, 196, 5, 28, 1, 31, 173, 226,
45, 133, 223, 211, 218, 239, 83, 18, 115, 126, 200, 244, 189, 244, 207, 125, 109, 113, 182, 181,
95, 186, 29, 238, 28, 235, 84, 201, 125, 138, 234, 228, 59, 32, 66, 227, 23, 8, 89, 173, 71, 242,
29, 38, 15, 42, 23, 201, 99, 211, 170, 132, 206, 52, 53, 30, 245, 208, 117, 122, 238, 134, 110, 44,
66, 125, 71, 49, 189, 213, 109, 210, 144, 21, 37, 55, 78, 153, 179, 87, 43, 4, 104, 229, 101, 223,
50, 187, 42, 42, 193, 83, 216, 7, 32, 225, 89, 184, 77, 242, 210, 133, 36, 148, 91, 138, 234, 51,
161, 142, 4, 39, 197, 234, 57, 241, 22, 244, 210, 13, 233, 237, 31, 152, 114, 146, 53, 139, 222,
83, 2, 130, 16, 254, 214, 18, 114, 57, 217, 204, 28, 144, 191, 230, 164, 212, 24, 224, 124, 232,
204, 108, 71, 132, 66, 94, 164, 190, 45, 154, 198, 164, 196, 156, 87, 32, 208, 129, 238, 235
]),
&BigUint::from_bytes_be(&[1, 0, 1])
)),
public_key.inner_key
);
}
#[test]
fn encode_ssh_rsa_4096_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDbUCK4dH1n4dOFBv/sjfMma4q5qe7SZ49j2GODGKr8DueZMWYLTck61uUMMlVBT3XyX6me6X4WsBoijzQWvgwpLCGTqlhQTntm5FphXHHkKxFvjMhPzCnHNS+L0ebzewcecsY5rtgw+6BhFwdZGhFBfif1/6s9q7y7+8Ge3hUIEqLdiMDDzxc66zIaW26jZxO4BMHuKp7Xln2JeDjsRHvz0vBNAddOfkvtp+gM72OH4tm9wS/V8bVOZ68oU0os8DuiEGnwA5RnjOjaFdHWt1mD8B+nRINxI8zYyQcqp3t4p552P0Frhvjgixi67Ryax0DUNuzN2MpQ0ORUgRkfy/xWvImUseP/BfqvNiWkFAWHNDDSsc50Wmr+g0JicG2gowHLYPxKRjLIbOq+JgxHrE4TdaA2NJoeUppJgWU4yuGl5fx1G+Bcdr0C+lsMj14Hp+aGajEOLQ7Mq3HzWEox9G1KgN4r266Mofd8T4vrjF6Ja9E+pp0pXgEv2cvtYJLP0qdrHWafb3lWsP4hJWnv/NaXP6ZAxiEeHsigrY98kmgZbHm/6AmiBJ7bKQ/S/PelYj3mTL0aYkGF79qVtAzSl7yI9yVyHsl7dt5jdmp6+IofuEtNfnAcfoaSLu0Ojotp9VBMvil6ojScbJNLBL8tGN4+urIcsNUvVjAOnwc3nothKw== [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(ssh_public_key, ssh_public_key_after.as_str());
}
#[test]
fn encode_ssh_rsa_2048_public_key() {
// ssh-keygen -t rsa -b 4096 -C "[email protected]"
let ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI9ht2g2qOPgSG5huVYjFUouyaw59/6QuQqUVGwgnITlhRbM+bkvJQfcuiqcv+vD9/86Dfugk79sSfg/aVK+V/plqAAZoujz/wALDjEphSxAUcAR+t4i2F39Pa71MSc37I9L30z31tcba1X7od7hzrVMl9iurkOyBC4xcIWa1H8h0mDyoXyWPTqoTONDUe9dB1eu6GbixCfUcxvdVt0pAVJTdOmbNXKwRo5WXfMrsqKsFT2Acg4Vm4TfLShSSUW4rqM6GOBCfF6jnxFvTSDentH5hykjWL3lMCghD+1hJyOdnMHJC/5qTUGOB86MxsR4RCXqS+LZrGpMScVyDQge7r [email protected]\r\n";
let public_key = SshPublicKey::from_str(ssh_public_key).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(ssh_public_key, ssh_public_key_after.as_str());
}
#[rstest]
#[case(test_files::SSH_PUBLIC_KEY_EC_P256)]
#[case(test_files::SSH_PUBLIC_KEY_EC_P384)]
#[case(test_files::SSH_PUBLIC_KEY_EC_P521)]
fn ecdsa_roundtrip(#[case] key_str: &str) |
#[test]
fn ed25519_roundtrip() {
let public_key = SshPublicKey::from_str(test_files::SSH_PUBLIC_KEY_ED25519).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(test_files::SSH_PUBLIC_KEY_ED25519, ssh_public_key_after.as_str());
}
}
| {
let public_key = SshPublicKey::from_str(key_str).unwrap();
let ssh_public_key_after = public_key.to_string().unwrap();
assert_eq!(key_str, ssh_public_key_after.as_str());
} | identifier_body |
CallCenterHome.js | import React, { Component } from 'react';
import { injectIntl } from 'react-intl';
import { inject, observer } from 'mobx-react';
import { withRouter } from 'react-router-dom';
import { Page, Header, Content } from 'yqcloud-front-boot';
import { message, Button, Table, Modal, Tooltip, Icon } from 'yqcloud-ui';
import CallCenterStore from '../../../../stores/globalStores/callCenter/CallCenterStore';
import './CallCenterStyle.scss';
import CallCenterEdit from "../../../global/callCenter/callCenterEdit/CallCenterEdit";
const intlPrefix = 'organization.callCenter';
const { Sidebar } = Modal;
@inject('AppState')
@observer
class CallCenterHome extends Component{
state = this.getInitState();
| () {
return{
dataSource: [],
pagination: {
current: 1,
pageSize: 25,
total: '',
pageSizeOptions: ['25', '50', '100', '200'],
},
visible: false,
submitting: false,
edit: false,
isLoading: true,
Id: '',
nickname: '',
sort: 'isEnabled,desc'
}
}
componentWillMount() {
this.fetch(this.props);
}
componentDidMount() {
this.loadLanguage();
this.queryInfo();
}
fetch() {
CallCenterStore.getIsEnabled();
}
loadLanguage=() => {
const { AppState } = this.props;
CallCenterStore.queryLanguage(0, AppState.currentLanguage);
}
handleKeyUp = (e) => {
const { pagination } = this.state;
if (e.keyCode === 13) {
this.queryInfo(pagination);
}
};
// 查询分页信息
queryInfo=(paginationIn) => {
const { pagination: paginationState, nickname, sort } = this.state;
const { AppState } = this.props;
const { id } = AppState.currentMenuType;
const pagination = paginationIn || paginationState;
const filters = nickname;
CallCenterStore.queryCallCenterPage(
id,
pagination,
sort,
filters,
).then((data) => {
if (data.success) {
this.setState({
pagination: {
current: (data.result.number || 0) + 1,
pageSize: data.result.size || 25,
total: data.result.totalElements || '',
pageSizeOptions: ['25', '50', '100', '200'],
},
filters,
dataSource: data.result.content,
});
}
});
}
renderSideBar() {
const {Id, edit, visible} = this.state;
return (
<CallCenterEdit
id={Id}
visible={visible}
edit={edit}
onRef={(node) => {
this.editValue = node;
}}
OnUnchangedSuccess={() => {
this.setState({
visible: false,
submitting: false,
});
}}
onSubmit={() => {
this.setState({
submitting: true,
});
}}
onSuccess={() => {
this.setState({
visible: false,
submitting: false,
});
this.queryInfo();
}}
onError={() => {
this.setState({
submitting: false,
});
}}
OnCloseModel={() => {
this.setState({
visible: false,
submitting: false,
});
}}
/>
);
}
handlePageChange(pagination, filters, {field, order}, params) {
const sorter = [];
if (field) {
sorter.push(field);
if (order === 'descend') {
sorter.push('desc');
}
}
this.queryInfo(pagination, sorter.join(','), filters, params);
}
/**
* 呼叫中心标题
* @returns {*}
*/
renderSideTitle() {
if (this.state.edit) {
return CallCenterStore.languages[`${intlPrefix}.editCallCenter`];
} else {
return CallCenterStore.languages[`${intlPrefix}.createCallCenter`];
}
}
openNewPage = () => {
this.setState({
visible: true,
edit: false,
});
};
// 修改按钮
onEdit = (id) => {
this.setState({
visible: true,
edit: true,
Id: id,
});
};
// 生效快码
enabledState = (values) => {
const enabled = CallCenterStore.getEnabled;
const temp = enabled.filter(v => (v.lookupValue === `${values}`));
if (temp.length > 0) {
return temp[0].lookupMeaning;
} else {
return `${values}`;
}
}
handleAble = (record) => {
const body = {
id: record.id,
enabled: !record.enabled
}
CallCenterStore.handleEdit(body).then((data) => {
if (data.success) {
this.queryInfo();
}
})
}
render() {
const { pagination, visible, dataSource, edit, submitting } =this.state;
const enabled = CallCenterStore.getEnabled;
const tableStyleName = {
overflow: 'hidden',
textOverflow: 'ellipsis',
display: '-webkit-box',
WebkitLineClamp: 1,
WebkitBoxOrient: 'vertical',
width: '100px',
};
const column = [
{
title: CallCenterStore.languages[`${intlPrefix}.tenantCode`],
dataIndex: 'code',
key: 'code',
width: 120
},
{
title: CallCenterStore.languages[`${intlPrefix}.tenantName`],
dataIndex: 'name',
key: 'name',
width: 150
},
{
title: CallCenterStore.languages[`${intlPrefix}.socket`],
dataIndex: 'websocketAddress',
key: 'websocketAddress',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.websocketAddress}` === 'null' ? '' : `${record.websocketAddress}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages[`${intlPrefix}.domainName`],
dataIndex: 'apiAddress',
key: 'apiAddress',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.apiAddress}` === 'null' ? '' : `${record.apiAddress}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages[`${intlPrefix}.key`],
dataIndex: 'accessKey',
key: 'accessKey',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.accessKey}` === 'null' ? '' : `${record.accessKey}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages.status,
dataIndex: 'enabled',
key: 'enabled',
width: 90,
render: (values, record) => this.enabledState(record.enabled),
},
{
title: CallCenterStore.languages['publictime'],
dataIndex: 'creationDate',
key: 'creationDate',
width: 150
},
{
title: CallCenterStore.languages['updateTime'],
dataIndex: 'lastUpdateDate',
key: 'lastUpdateDate',
width: 150
},
{
title: CallCenterStore.languages.operation,
dataIndex: 'option',
key: 'option',
width: 130,
render:(text, record) =>{
const style = {
cursor: 'pointer',
};
return (
<div>
<Tooltip
title={CallCenterStore.languages["modify"]}
placement="bottom"
>
<Button
size="small"
icon="bianji-"
shape="circle"
style={{ cursor: 'pointer', color: record.enabled ? '#2196F3' : '' }}
onClick={this.onEdit.bind(this, record.id)}
disabled={!record.enabled}
/>
</Tooltip>
{record.enabled ? (
<Tooltip placement="bottom" title={CallCenterStore.languages.disable}>
<Button
key="enable"
icon="jinyongzhuangtai"
style={style}
size="small"
shape="circle"
onClick={this.handleAble.bind(this, record)}
/>
</Tooltip>
)
: (
<Tooltip placement="bottom" title={CallCenterStore.languages.enable}>
<Button
key="disable"
size="small"
shape="circle"
icon="yijieshu"
style={{ cursor: 'pointer', color: '#2196F3' }}
onClick={this.handleAble.bind(this, record)}
/>
</Tooltip>
)
}
</div>);
}
},
]
return(
<Page>
<Header title={CallCenterStore.languages[`${intlPrefix}.callCenterManagement`]} />
<Content>
<div className="callCenter-header">
<span className="callCenter-header-searchbox">
<i className="icon icon-sousuo" />
<input
className="callCenter-header-searchbox-input"
placeholder={CallCenterStore.languages[`${intlPrefix}.search.more`]}
onChange={(e) => {
this.setState({
nickname: e.target.value,
});
}}
onKeyUp={e => this.handleKeyUp(e)}
/>
</span>
<span style={{ float: 'right' }}>
<Button
type="primary"
style={{ color: '#ffffff', background: '#2196F3', borderRadius: 4, marginRight: 15, fontSize: 14 }}
onClick={this.openNewPage}
>
{CallCenterStore.languages.create}
</Button>
</span>
</div>
<Table
dataSource={dataSource}
columns={column}
pagination={pagination}
filterBar={false}
onChange={this.handlePageChange.bind(this)}
/>
<Sidebar
title={this.renderSideTitle()}
className='sidebar-modal'
visible={visible}
okText={CallCenterStore.languages[edit ? 'save' : 'create']}
cancelText={CallCenterStore.languages["cancel"]}
onOk={e => this.editValue.handleSubmit(e)}
onCancel={(e) => {
this.editValue.handleCancel(e);
}}
confirmLoading={submitting}
>
{
this.renderSideBar()
}
</Sidebar>
</Content>
</Page>
)
}
}
export default withRouter(injectIntl(CallCenterHome));
| getInitState | identifier_name |
CallCenterHome.js | import React, { Component } from 'react';
import { injectIntl } from 'react-intl';
import { inject, observer } from 'mobx-react';
import { withRouter } from 'react-router-dom';
import { Page, Header, Content } from 'yqcloud-front-boot';
import { message, Button, Table, Modal, Tooltip, Icon } from 'yqcloud-ui';
import CallCenterStore from '../../../../stores/globalStores/callCenter/CallCenterStore';
import './CallCenterStyle.scss';
import CallCenterEdit from "../../../global/callCenter/callCenterEdit/CallCenterEdit";
const intlPrefix = 'organization.callCenter';
const { Sidebar } = Modal;
@inject('AppState')
@observer
class CallCenterHome extends Component{
state = this.getInitState();
getInitState() {
return{
dataSource: [],
pagination: {
current: 1,
pageSize: 25,
total: '',
pageSizeOptions: ['25', '50', '100', '200'],
},
visible: false,
submitting: false,
edit: false,
isLoading: true,
Id: '',
nickname: '',
sort: 'isEnabled,desc'
}
}
componentWillMount() {
this.fetch(this.props);
}
componentDidMount() {
this.loadLanguage();
this.queryInfo();
}
fetch() {
CallCenterStore.getIsEnabled();
}
loadLanguage=() => {
const { AppState } = this.props;
CallCenterStore.queryLanguage(0, AppState.currentLanguage);
}
handleKeyUp = (e) => {
const { pagination } = this.state;
if (e.keyCode === 13) {
this.queryInfo(pagination);
}
};
// 查询分页信息
queryInfo=(paginationIn) => {
const { pagination: paginationState, nickname, sort } = this.state;
const { AppState } = this.props;
const { id } = AppState.currentMenuType;
const pagination = paginationIn || paginationState;
const filters = nickname;
CallCenterStore.queryCallCenterPage(
id,
pagination,
sort,
filters,
).then((data) => {
if (data.success) {
this.setState({
pagination: {
current: (data.result.number || 0) + 1,
pageSize: data.result.size || 25,
total: data.result.totalElements || '',
pageSizeOptions: ['25', '50', '100', '200'],
},
filters,
dataSource: data.result.content,
});
}
});
}
renderSideBar() {
const {Id, edit, visible} = this.state;
return (
<CallCenterEdit
id={Id}
visible={visible}
edit={edit}
onRef={(node) => {
this.editValue = node;
}}
OnUnchangedSuccess={() => {
this.setState({
visible: false,
submitting: false,
});
}}
onSubmit={() => {
this.setState({
submitting: true,
});
}}
onSuccess={() => {
this.setState({
visible: false,
submitting: false,
});
this.queryInfo();
}}
onError={() => {
this.setState({
submitting: false,
});
}}
OnCloseModel={() => {
this.setState({
visible: false,
submitting: false,
});
}}
/>
);
}
handlePageChange(pagination, filters, {field, order}, params) {
const sorter = [];
if (field) {
sorter.push(field);
if (order === 'descend') {
sorter.push('desc');
} | }
this.queryInfo(pagination, sorter.join(','), filters, params);
}
/**
* 呼叫中心标题
* @returns {*}
*/
renderSideTitle() {
if (this.state.edit) {
return CallCenterStore.languages[`${intlPrefix}.editCallCenter`];
} else {
return CallCenterStore.languages[`${intlPrefix}.createCallCenter`];
}
}
openNewPage = () => {
this.setState({
visible: true,
edit: false,
});
};
// 修改按钮
onEdit = (id) => {
this.setState({
visible: true,
edit: true,
Id: id,
});
};
// 生效快码
enabledState = (values) => {
const enabled = CallCenterStore.getEnabled;
const temp = enabled.filter(v => (v.lookupValue === `${values}`));
if (temp.length > 0) {
return temp[0].lookupMeaning;
} else {
return `${values}`;
}
}
handleAble = (record) => {
const body = {
id: record.id,
enabled: !record.enabled
}
CallCenterStore.handleEdit(body).then((data) => {
if (data.success) {
this.queryInfo();
}
})
}
render() {
const { pagination, visible, dataSource, edit, submitting } =this.state;
const enabled = CallCenterStore.getEnabled;
const tableStyleName = {
overflow: 'hidden',
textOverflow: 'ellipsis',
display: '-webkit-box',
WebkitLineClamp: 1,
WebkitBoxOrient: 'vertical',
width: '100px',
};
const column = [
{
title: CallCenterStore.languages[`${intlPrefix}.tenantCode`],
dataIndex: 'code',
key: 'code',
width: 120
},
{
title: CallCenterStore.languages[`${intlPrefix}.tenantName`],
dataIndex: 'name',
key: 'name',
width: 150
},
{
title: CallCenterStore.languages[`${intlPrefix}.socket`],
dataIndex: 'websocketAddress',
key: 'websocketAddress',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.websocketAddress}` === 'null' ? '' : `${record.websocketAddress}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages[`${intlPrefix}.domainName`],
dataIndex: 'apiAddress',
key: 'apiAddress',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.apiAddress}` === 'null' ? '' : `${record.apiAddress}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages[`${intlPrefix}.key`],
dataIndex: 'accessKey',
key: 'accessKey',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.accessKey}` === 'null' ? '' : `${record.accessKey}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages.status,
dataIndex: 'enabled',
key: 'enabled',
width: 90,
render: (values, record) => this.enabledState(record.enabled),
},
{
title: CallCenterStore.languages['publictime'],
dataIndex: 'creationDate',
key: 'creationDate',
width: 150
},
{
title: CallCenterStore.languages['updateTime'],
dataIndex: 'lastUpdateDate',
key: 'lastUpdateDate',
width: 150
},
{
title: CallCenterStore.languages.operation,
dataIndex: 'option',
key: 'option',
width: 130,
render:(text, record) =>{
const style = {
cursor: 'pointer',
};
return (
<div>
<Tooltip
title={CallCenterStore.languages["modify"]}
placement="bottom"
>
<Button
size="small"
icon="bianji-"
shape="circle"
style={{ cursor: 'pointer', color: record.enabled ? '#2196F3' : '' }}
onClick={this.onEdit.bind(this, record.id)}
disabled={!record.enabled}
/>
</Tooltip>
{record.enabled ? (
<Tooltip placement="bottom" title={CallCenterStore.languages.disable}>
<Button
key="enable"
icon="jinyongzhuangtai"
style={style}
size="small"
shape="circle"
onClick={this.handleAble.bind(this, record)}
/>
</Tooltip>
)
: (
<Tooltip placement="bottom" title={CallCenterStore.languages.enable}>
<Button
key="disable"
size="small"
shape="circle"
icon="yijieshu"
style={{ cursor: 'pointer', color: '#2196F3' }}
onClick={this.handleAble.bind(this, record)}
/>
</Tooltip>
)
}
</div>);
}
},
]
return(
<Page>
<Header title={CallCenterStore.languages[`${intlPrefix}.callCenterManagement`]} />
<Content>
<div className="callCenter-header">
<span className="callCenter-header-searchbox">
<i className="icon icon-sousuo" />
<input
className="callCenter-header-searchbox-input"
placeholder={CallCenterStore.languages[`${intlPrefix}.search.more`]}
onChange={(e) => {
this.setState({
nickname: e.target.value,
});
}}
onKeyUp={e => this.handleKeyUp(e)}
/>
</span>
<span style={{ float: 'right' }}>
<Button
type="primary"
style={{ color: '#ffffff', background: '#2196F3', borderRadius: 4, marginRight: 15, fontSize: 14 }}
onClick={this.openNewPage}
>
{CallCenterStore.languages.create}
</Button>
</span>
</div>
<Table
dataSource={dataSource}
columns={column}
pagination={pagination}
filterBar={false}
onChange={this.handlePageChange.bind(this)}
/>
<Sidebar
title={this.renderSideTitle()}
className='sidebar-modal'
visible={visible}
okText={CallCenterStore.languages[edit ? 'save' : 'create']}
cancelText={CallCenterStore.languages["cancel"]}
onOk={e => this.editValue.handleSubmit(e)}
onCancel={(e) => {
this.editValue.handleCancel(e);
}}
confirmLoading={submitting}
>
{
this.renderSideBar()
}
</Sidebar>
</Content>
</Page>
)
}
}
export default withRouter(injectIntl(CallCenterHome)); | random_line_split |
|
CallCenterHome.js | import React, { Component } from 'react';
import { injectIntl } from 'react-intl';
import { inject, observer } from 'mobx-react';
import { withRouter } from 'react-router-dom';
import { Page, Header, Content } from 'yqcloud-front-boot';
import { message, Button, Table, Modal, Tooltip, Icon } from 'yqcloud-ui';
import CallCenterStore from '../../../../stores/globalStores/callCenter/CallCenterStore';
import './CallCenterStyle.scss';
import CallCenterEdit from "../../../global/callCenter/callCenterEdit/CallCenterEdit";
const intlPrefix = 'organization.callCenter';
const { Sidebar } = Modal;
@inject('AppState')
@observer
class CallCenterHome extends Component{
state = this.getInitState();
getInitState() {
return{
dataSource: [],
pagination: {
current: 1,
pageSize: 25,
total: '',
pageSizeOptions: ['25', '50', '100', '200'],
},
visible: false,
submitting: false,
edit: false,
isLoading: true,
Id: '',
nickname: '',
sort: 'isEnabled,desc'
}
}
componentWillMount() {
this.fetch(this.props);
}
componentDidMount() {
this.loadLanguage();
this.queryInfo();
}
fetch() {
CallCenterStore.getIsEnabled();
}
loadLanguage=() => {
const { AppState } = this.props;
CallCenterStore.queryLanguage(0, AppState.currentLanguage);
}
handleKeyUp = (e) => {
const { pagination } = this.state;
if (e.keyCode === 13) {
this.queryInfo(pagination);
}
};
// 查询分页信息
queryInfo=(paginationIn) => {
const { pagination: paginationState, nickname, sort } = this.state;
const { AppState } = this.props;
const { id } = AppState.currentMenuType;
const pagination = paginationIn || paginationState;
const filters = nickname;
CallCenterStore.queryCallCenterPage(
id,
pagination,
sort,
filters,
).then((data) => {
if (data.success) {
this.setState({
pagination: {
current: (data.result.number || 0) + 1,
pageSize: data.result.size || 25,
total: data.result.totalElements || '',
pageSizeOptions: ['25', '50', '100', '200'],
},
filters,
dataSource: data.result.content,
});
}
});
}
renderSideBar() {
const {Id, edit, visible} = this.state;
return (
<CallCenterEdit
id={Id}
visible={visible}
edit={edit}
onRef={(node) => {
this.editValue = node;
}}
OnUnchangedSuccess={() => {
this.setState({
visible: false,
submitting: false,
});
}}
onSubmit={() => {
this.setState({
submitting: true,
});
}}
onSuccess={() => {
this.setState({
visible: false,
submitting: false,
});
this.queryInfo();
}}
onError={() => {
this.setState({
submitting: false,
});
}}
OnCloseModel={() => {
this.setState({
visible: false,
submitting: false,
});
}}
/>
);
}
handlePageChange(pagination, filters, {field, order}, params) {
const sorter = [];
if (field) {
sorter.push(field);
if (order === 'descend') {
sorter.push('desc');
}
}
this.queryInfo(pagination, sorter.join(','), filters, params);
}
/**
* 呼叫中心标题
* @returns {*}
*/
renderSideTitle() {
if (this.state.edit) {
return CallCenterStore.languages[`${intlPrefix}.editCallCenter`];
} else {
return CallCenterStore.languages[`${intlPrefix}.createCallCenter`];
}
}
openNewPage = () => {
this.setState({
visible: true,
edit: false,
});
};
// 修改按钮
onEdit = (id) => {
this.setState({
visible: true,
edit: true,
Id: id,
});
};
// 生效快码
enabledState = (values) => {
const enabled = CallCenterStore.getEnabled;
const temp = enabled.filter(v => (v.lookupValue === `${values}`));
if (temp.length > 0) {
return temp[0].lookupMeaning;
| }
handleAble = (record) => {
const body = {
id: record.id,
enabled: !record.enabled
}
CallCenterStore.handleEdit(body).then((data) => {
if (data.success) {
this.queryInfo();
}
})
}
render() {
const { pagination, visible, dataSource, edit, submitting } =this.state;
const enabled = CallCenterStore.getEnabled;
const tableStyleName = {
overflow: 'hidden',
textOverflow: 'ellipsis',
display: '-webkit-box',
WebkitLineClamp: 1,
WebkitBoxOrient: 'vertical',
width: '100px',
};
const column = [
{
title: CallCenterStore.languages[`${intlPrefix}.tenantCode`],
dataIndex: 'code',
key: 'code',
width: 120
},
{
title: CallCenterStore.languages[`${intlPrefix}.tenantName`],
dataIndex: 'name',
key: 'name',
width: 150
},
{
title: CallCenterStore.languages[`${intlPrefix}.socket`],
dataIndex: 'websocketAddress',
key: 'websocketAddress',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.websocketAddress}` === 'null' ? '' : `${record.websocketAddress}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages[`${intlPrefix}.domainName`],
dataIndex: 'apiAddress',
key: 'apiAddress',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.apiAddress}` === 'null' ? '' : `${record.apiAddress}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages[`${intlPrefix}.key`],
dataIndex: 'accessKey',
key: 'accessKey',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.accessKey}` === 'null' ? '' : `${record.accessKey}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages.status,
dataIndex: 'enabled',
key: 'enabled',
width: 90,
render: (values, record) => this.enabledState(record.enabled),
},
{
title: CallCenterStore.languages['publictime'],
dataIndex: 'creationDate',
key: 'creationDate',
width: 150
},
{
title: CallCenterStore.languages['updateTime'],
dataIndex: 'lastUpdateDate',
key: 'lastUpdateDate',
width: 150
},
{
title: CallCenterStore.languages.operation,
dataIndex: 'option',
key: 'option',
width: 130,
render:(text, record) =>{
const style = {
cursor: 'pointer',
};
return (
<div>
<Tooltip
title={CallCenterStore.languages["modify"]}
placement="bottom"
>
<Button
size="small"
icon="bianji-"
shape="circle"
style={{ cursor: 'pointer', color: record.enabled ? '#2196F3' : '' }}
onClick={this.onEdit.bind(this, record.id)}
disabled={!record.enabled}
/>
</Tooltip>
{record.enabled ? (
<Tooltip placement="bottom" title={CallCenterStore.languages.disable}>
<Button
key="enable"
icon="jinyongzhuangtai"
style={style}
size="small"
shape="circle"
onClick={this.handleAble.bind(this, record)}
/>
</Tooltip>
)
: (
<Tooltip placement="bottom" title={CallCenterStore.languages.enable}>
<Button
key="disable"
size="small"
shape="circle"
icon="yijieshu"
style={{ cursor: 'pointer', color: '#2196F3' }}
onClick={this.handleAble.bind(this, record)}
/>
</Tooltip>
)
}
</div>);
}
},
]
return(
<Page>
<Header title={CallCenterStore.languages[`${intlPrefix}.callCenterManagement`]} />
<Content>
<div className="callCenter-header">
<span className="callCenter-header-searchbox">
<i className="icon icon-sousuo" />
<input
className="callCenter-header-searchbox-input"
placeholder={CallCenterStore.languages[`${intlPrefix}.search.more`]}
onChange={(e) => {
this.setState({
nickname: e.target.value,
});
}}
onKeyUp={e => this.handleKeyUp(e)}
/>
</span>
<span style={{ float: 'right' }}>
<Button
type="primary"
style={{ color: '#ffffff', background: '#2196F3', borderRadius: 4, marginRight: 15, fontSize: 14 }}
onClick={this.openNewPage}
>
{CallCenterStore.languages.create}
</Button>
</span>
</div>
<Table
dataSource={dataSource}
columns={column}
pagination={pagination}
filterBar={false}
onChange={this.handlePageChange.bind(this)}
/>
<Sidebar
title={this.renderSideTitle()}
className='sidebar-modal'
visible={visible}
okText={CallCenterStore.languages[edit ? 'save' : 'create']}
cancelText={CallCenterStore.languages["cancel"]}
onOk={e => this.editValue.handleSubmit(e)}
onCancel={(e) => {
this.editValue.handleCancel(e);
}}
confirmLoading={submitting}
>
{
this.renderSideBar()
}
</Sidebar>
</Content>
</Page>
)
}
}
export default withRouter(injectIntl(CallCenterHome));
| } else {
return `${values}`;
}
| conditional_block |
CallCenterHome.js | import React, { Component } from 'react';
import { injectIntl } from 'react-intl';
import { inject, observer } from 'mobx-react';
import { withRouter } from 'react-router-dom';
import { Page, Header, Content } from 'yqcloud-front-boot';
import { message, Button, Table, Modal, Tooltip, Icon } from 'yqcloud-ui';
import CallCenterStore from '../../../../stores/globalStores/callCenter/CallCenterStore';
import './CallCenterStyle.scss';
import CallCenterEdit from "../../../global/callCenter/callCenterEdit/CallCenterEdit";
const intlPrefix = 'organization.callCenter';
const { Sidebar } = Modal;
@inject('AppState')
@observer
class CallCenterHome extends Component{
state = this.getInitState();
getInitState() |
componentWillMount() {
this.fetch(this.props);
}
componentDidMount() {
this.loadLanguage();
this.queryInfo();
}
fetch() {
CallCenterStore.getIsEnabled();
}
loadLanguage=() => {
const { AppState } = this.props;
CallCenterStore.queryLanguage(0, AppState.currentLanguage);
}
handleKeyUp = (e) => {
const { pagination } = this.state;
if (e.keyCode === 13) {
this.queryInfo(pagination);
}
};
// 查询分页信息
queryInfo=(paginationIn) => {
const { pagination: paginationState, nickname, sort } = this.state;
const { AppState } = this.props;
const { id } = AppState.currentMenuType;
const pagination = paginationIn || paginationState;
const filters = nickname;
CallCenterStore.queryCallCenterPage(
id,
pagination,
sort,
filters,
).then((data) => {
if (data.success) {
this.setState({
pagination: {
current: (data.result.number || 0) + 1,
pageSize: data.result.size || 25,
total: data.result.totalElements || '',
pageSizeOptions: ['25', '50', '100', '200'],
},
filters,
dataSource: data.result.content,
});
}
});
}
renderSideBar() {
const {Id, edit, visible} = this.state;
return (
<CallCenterEdit
id={Id}
visible={visible}
edit={edit}
onRef={(node) => {
this.editValue = node;
}}
OnUnchangedSuccess={() => {
this.setState({
visible: false,
submitting: false,
});
}}
onSubmit={() => {
this.setState({
submitting: true,
});
}}
onSuccess={() => {
this.setState({
visible: false,
submitting: false,
});
this.queryInfo();
}}
onError={() => {
this.setState({
submitting: false,
});
}}
OnCloseModel={() => {
this.setState({
visible: false,
submitting: false,
});
}}
/>
);
}
handlePageChange(pagination, filters, {field, order}, params) {
const sorter = [];
if (field) {
sorter.push(field);
if (order === 'descend') {
sorter.push('desc');
}
}
this.queryInfo(pagination, sorter.join(','), filters, params);
}
/**
* 呼叫中心标题
* @returns {*}
*/
renderSideTitle() {
if (this.state.edit) {
return CallCenterStore.languages[`${intlPrefix}.editCallCenter`];
} else {
return CallCenterStore.languages[`${intlPrefix}.createCallCenter`];
}
}
openNewPage = () => {
this.setState({
visible: true,
edit: false,
});
};
// 修改按钮
onEdit = (id) => {
this.setState({
visible: true,
edit: true,
Id: id,
});
};
// 生效快码
enabledState = (values) => {
const enabled = CallCenterStore.getEnabled;
const temp = enabled.filter(v => (v.lookupValue === `${values}`));
if (temp.length > 0) {
return temp[0].lookupMeaning;
} else {
return `${values}`;
}
}
handleAble = (record) => {
const body = {
id: record.id,
enabled: !record.enabled
}
CallCenterStore.handleEdit(body).then((data) => {
if (data.success) {
this.queryInfo();
}
})
}
render() {
const { pagination, visible, dataSource, edit, submitting } =this.state;
const enabled = CallCenterStore.getEnabled;
const tableStyleName = {
overflow: 'hidden',
textOverflow: 'ellipsis',
display: '-webkit-box',
WebkitLineClamp: 1,
WebkitBoxOrient: 'vertical',
width: '100px',
};
const column = [
{
title: CallCenterStore.languages[`${intlPrefix}.tenantCode`],
dataIndex: 'code',
key: 'code',
width: 120
},
{
title: CallCenterStore.languages[`${intlPrefix}.tenantName`],
dataIndex: 'name',
key: 'name',
width: 150
},
{
title: CallCenterStore.languages[`${intlPrefix}.socket`],
dataIndex: 'websocketAddress',
key: 'websocketAddress',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.websocketAddress}` === 'null' ? '' : `${record.websocketAddress}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages[`${intlPrefix}.domainName`],
dataIndex: 'apiAddress',
key: 'apiAddress',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.apiAddress}` === 'null' ? '' : `${record.apiAddress}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages[`${intlPrefix}.key`],
dataIndex: 'accessKey',
key: 'accessKey',
width: 100,
render: (values, record) => (
<span style={tableStyleName}>
<Tooltip title={values} lines={20}>
<div style={{ textAlign: 'left' }}>{`${record.accessKey}` === 'null' ? '' : `${record.accessKey}` }</div>
</Tooltip>
</span>
),
},
{
title: CallCenterStore.languages.status,
dataIndex: 'enabled',
key: 'enabled',
width: 90,
render: (values, record) => this.enabledState(record.enabled),
},
{
title: CallCenterStore.languages['publictime'],
dataIndex: 'creationDate',
key: 'creationDate',
width: 150
},
{
title: CallCenterStore.languages['updateTime'],
dataIndex: 'lastUpdateDate',
key: 'lastUpdateDate',
width: 150
},
{
title: CallCenterStore.languages.operation,
dataIndex: 'option',
key: 'option',
width: 130,
render:(text, record) =>{
const style = {
cursor: 'pointer',
};
return (
<div>
<Tooltip
title={CallCenterStore.languages["modify"]}
placement="bottom"
>
<Button
size="small"
icon="bianji-"
shape="circle"
style={{ cursor: 'pointer', color: record.enabled ? '#2196F3' : '' }}
onClick={this.onEdit.bind(this, record.id)}
disabled={!record.enabled}
/>
</Tooltip>
{record.enabled ? (
<Tooltip placement="bottom" title={CallCenterStore.languages.disable}>
<Button
key="enable"
icon="jinyongzhuangtai"
style={style}
size="small"
shape="circle"
onClick={this.handleAble.bind(this, record)}
/>
</Tooltip>
)
: (
<Tooltip placement="bottom" title={CallCenterStore.languages.enable}>
<Button
key="disable"
size="small"
shape="circle"
icon="yijieshu"
style={{ cursor: 'pointer', color: '#2196F3' }}
onClick={this.handleAble.bind(this, record)}
/>
</Tooltip>
)
}
</div>);
}
},
]
return(
<Page>
<Header title={CallCenterStore.languages[`${intlPrefix}.callCenterManagement`]} />
<Content>
<div className="callCenter-header">
<span className="callCenter-header-searchbox">
<i className="icon icon-sousuo" />
<input
className="callCenter-header-searchbox-input"
placeholder={CallCenterStore.languages[`${intlPrefix}.search.more`]}
onChange={(e) => {
this.setState({
nickname: e.target.value,
});
}}
onKeyUp={e => this.handleKeyUp(e)}
/>
</span>
<span style={{ float: 'right' }}>
<Button
type="primary"
style={{ color: '#ffffff', background: '#2196F3', borderRadius: 4, marginRight: 15, fontSize: 14 }}
onClick={this.openNewPage}
>
{CallCenterStore.languages.create}
</Button>
</span>
</div>
<Table
dataSource={dataSource}
columns={column}
pagination={pagination}
filterBar={false}
onChange={this.handlePageChange.bind(this)}
/>
<Sidebar
title={this.renderSideTitle()}
className='sidebar-modal'
visible={visible}
okText={CallCenterStore.languages[edit ? 'save' : 'create']}
cancelText={CallCenterStore.languages["cancel"]}
onOk={e => this.editValue.handleSubmit(e)}
onCancel={(e) => {
this.editValue.handleCancel(e);
}}
confirmLoading={submitting}
>
{
this.renderSideBar()
}
</Sidebar>
</Content>
</Page>
)
}
}
export default withRouter(injectIntl(CallCenterHome));
| {
return{
dataSource: [],
pagination: {
current: 1,
pageSize: 25,
total: '',
pageSizeOptions: ['25', '50', '100', '200'],
},
visible: false,
submitting: false,
edit: false,
isLoading: true,
Id: '',
nickname: '',
sort: 'isEnabled,desc'
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.