file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
admin_script_builder.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use diem_framework::compile_script;
use diem_types::{
account_address::AccountAddress,
account_config::diem_root_address,
transaction::{Script, WriteSetPayload},
};
use handlebars::Handlebars;
use serde::Serialize;
use std::{collections::HashMap, io::Write, path::PathBuf};
use tempfile::NamedTempFile;
/// The relative path to the scripts templates
pub const SCRIPTS_DIR_PATH: &str = "templates";
fn compile_admin_script(input: &str) -> Result<Script> {
let mut temp_file = NamedTempFile::new()?;
temp_file.write_all(input.as_bytes())?;
let cur_path = temp_file.path().to_str().unwrap().to_owned();
Ok(Script::new(compile_script(cur_path), vec![], vec![]))
}
pub fn template_path() -> PathBuf {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push(SCRIPTS_DIR_PATH.to_string());
path
}
pub fn
|
(validators: Vec<AccountAddress>) -> WriteSetPayload {
assert!(!validators.is_empty(), "Unexpected validator set length");
let mut script = template_path();
script.push("remove_validators.move");
let script = {
let mut hb = Handlebars::new();
hb.set_strict_mode(true);
hb.register_template_file("script", script).unwrap();
let mut data = HashMap::new();
data.insert("addresses", validators);
let output = hb.render("script", &data).unwrap();
compile_admin_script(output.as_str()).unwrap()
};
WriteSetPayload::Script {
script,
execute_as: diem_root_address(),
}
}
pub fn encode_custom_script<T: Serialize>(
script_name_in_templates: &str,
args: &T,
execute_as: Option<AccountAddress>,
) -> WriteSetPayload {
let mut script = template_path();
script.push(script_name_in_templates);
let script = {
let mut hb = Handlebars::new();
hb.register_template_file("script", script).unwrap();
hb.set_strict_mode(true);
let output = hb.render("script", args).unwrap();
compile_admin_script(output.as_str()).unwrap()
};
WriteSetPayload::Script {
script,
execute_as: execute_as.unwrap_or_else(diem_root_address),
}
}
pub fn encode_halt_network_payload() -> WriteSetPayload {
let mut script = template_path();
script.push("halt_transactions.move");
WriteSetPayload::Script {
script: Script::new(
compile_script(script.to_str().unwrap().to_owned()),
vec![],
vec![],
),
execute_as: diem_root_address(),
}
}
|
encode_remove_validators_payload
|
identifier_name
|
admin_script_builder.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use diem_framework::compile_script;
use diem_types::{
account_address::AccountAddress,
account_config::diem_root_address,
transaction::{Script, WriteSetPayload},
};
use handlebars::Handlebars;
use serde::Serialize;
use std::{collections::HashMap, io::Write, path::PathBuf};
use tempfile::NamedTempFile;
/// The relative path to the scripts templates
pub const SCRIPTS_DIR_PATH: &str = "templates";
fn compile_admin_script(input: &str) -> Result<Script>
|
pub fn template_path() -> PathBuf {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push(SCRIPTS_DIR_PATH.to_string());
path
}
pub fn encode_remove_validators_payload(validators: Vec<AccountAddress>) -> WriteSetPayload {
assert!(!validators.is_empty(), "Unexpected validator set length");
let mut script = template_path();
script.push("remove_validators.move");
let script = {
let mut hb = Handlebars::new();
hb.set_strict_mode(true);
hb.register_template_file("script", script).unwrap();
let mut data = HashMap::new();
data.insert("addresses", validators);
let output = hb.render("script", &data).unwrap();
compile_admin_script(output.as_str()).unwrap()
};
WriteSetPayload::Script {
script,
execute_as: diem_root_address(),
}
}
pub fn encode_custom_script<T: Serialize>(
script_name_in_templates: &str,
args: &T,
execute_as: Option<AccountAddress>,
) -> WriteSetPayload {
let mut script = template_path();
script.push(script_name_in_templates);
let script = {
let mut hb = Handlebars::new();
hb.register_template_file("script", script).unwrap();
hb.set_strict_mode(true);
let output = hb.render("script", args).unwrap();
compile_admin_script(output.as_str()).unwrap()
};
WriteSetPayload::Script {
script,
execute_as: execute_as.unwrap_or_else(diem_root_address),
}
}
pub fn encode_halt_network_payload() -> WriteSetPayload {
let mut script = template_path();
script.push("halt_transactions.move");
WriteSetPayload::Script {
script: Script::new(
compile_script(script.to_str().unwrap().to_owned()),
vec![],
vec![],
),
execute_as: diem_root_address(),
}
}
|
{
let mut temp_file = NamedTempFile::new()?;
temp_file.write_all(input.as_bytes())?;
let cur_path = temp_file.path().to_str().unwrap().to_owned();
Ok(Script::new(compile_script(cur_path), vec![], vec![]))
}
|
identifier_body
|
admin_script_builder.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use diem_framework::compile_script;
use diem_types::{
account_address::AccountAddress,
account_config::diem_root_address,
transaction::{Script, WriteSetPayload},
};
use handlebars::Handlebars;
use serde::Serialize;
use std::{collections::HashMap, io::Write, path::PathBuf};
use tempfile::NamedTempFile;
/// The relative path to the scripts templates
pub const SCRIPTS_DIR_PATH: &str = "templates";
fn compile_admin_script(input: &str) -> Result<Script> {
let mut temp_file = NamedTempFile::new()?;
temp_file.write_all(input.as_bytes())?;
let cur_path = temp_file.path().to_str().unwrap().to_owned();
Ok(Script::new(compile_script(cur_path), vec![], vec![]))
}
pub fn template_path() -> PathBuf {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push(SCRIPTS_DIR_PATH.to_string());
path
}
pub fn encode_remove_validators_payload(validators: Vec<AccountAddress>) -> WriteSetPayload {
assert!(!validators.is_empty(), "Unexpected validator set length");
let mut script = template_path();
script.push("remove_validators.move");
let script = {
let mut hb = Handlebars::new();
hb.set_strict_mode(true);
hb.register_template_file("script", script).unwrap();
let mut data = HashMap::new();
data.insert("addresses", validators);
let output = hb.render("script", &data).unwrap();
compile_admin_script(output.as_str()).unwrap()
};
WriteSetPayload::Script {
script,
execute_as: diem_root_address(),
}
}
pub fn encode_custom_script<T: Serialize>(
script_name_in_templates: &str,
args: &T,
|
) -> WriteSetPayload {
let mut script = template_path();
script.push(script_name_in_templates);
let script = {
let mut hb = Handlebars::new();
hb.register_template_file("script", script).unwrap();
hb.set_strict_mode(true);
let output = hb.render("script", args).unwrap();
compile_admin_script(output.as_str()).unwrap()
};
WriteSetPayload::Script {
script,
execute_as: execute_as.unwrap_or_else(diem_root_address),
}
}
pub fn encode_halt_network_payload() -> WriteSetPayload {
let mut script = template_path();
script.push("halt_transactions.move");
WriteSetPayload::Script {
script: Script::new(
compile_script(script.to_str().unwrap().to_owned()),
vec![],
vec![],
),
execute_as: diem_root_address(),
}
}
|
execute_as: Option<AccountAddress>,
|
random_line_split
|
ring_slices.rs
|
use core::ptr::{self};
/// Returns the two slices that cover the `VecDeque`'s valid range
pub trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else
|
}
}
impl<T> RingSlices for &[T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<T> RingSlices for &mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
}
impl<T> RingSlices for *mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
assert!(from <= to && to < self.len());
// Not using `get_unchecked_mut` to keep this a safe operation.
let len = to - from;
ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
}
fn split_at(self, mid: usize) -> (Self, Self) {
let len = self.len();
let ptr = self.as_mut_ptr();
assert!(mid <= len);
(
ptr::slice_from_raw_parts_mut(ptr, mid),
ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
)
}
}
|
{
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
|
conditional_block
|
ring_slices.rs
|
use core::ptr::{self};
/// Returns the two slices that cover the `VecDeque`'s valid range
pub trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else {
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
}
}
impl<T> RingSlices for &[T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<T> RingSlices for &mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
}
impl<T> RingSlices for *mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
assert!(from <= to && to < self.len());
// Not using `get_unchecked_mut` to keep this a safe operation.
let len = to - from;
ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
}
fn
|
(self, mid: usize) -> (Self, Self) {
let len = self.len();
let ptr = self.as_mut_ptr();
assert!(mid <= len);
(
ptr::slice_from_raw_parts_mut(ptr, mid),
ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
)
}
}
|
split_at
|
identifier_name
|
ring_slices.rs
|
use core::ptr::{self};
/// Returns the two slices that cover the `VecDeque`'s valid range
pub trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else {
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
}
}
impl<T> RingSlices for &[T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<T> RingSlices for &mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
|
}
impl<T> RingSlices for *mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
assert!(from <= to && to < self.len());
// Not using `get_unchecked_mut` to keep this a safe operation.
let len = to - from;
ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
}
fn split_at(self, mid: usize) -> (Self, Self) {
let len = self.len();
let ptr = self.as_mut_ptr();
assert!(mid <= len);
(
ptr::slice_from_raw_parts_mut(ptr, mid),
ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
)
}
}
|
random_line_split
|
|
ring_slices.rs
|
use core::ptr::{self};
/// Returns the two slices that cover the `VecDeque`'s valid range
pub trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else {
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
}
}
impl<T> RingSlices for &[T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<T> RingSlices for &mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
}
impl<T> RingSlices for *mut [T] {
fn slice(self, from: usize, to: usize) -> Self
|
fn split_at(self, mid: usize) -> (Self, Self) {
let len = self.len();
let ptr = self.as_mut_ptr();
assert!(mid <= len);
(
ptr::slice_from_raw_parts_mut(ptr, mid),
ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
)
}
}
|
{
assert!(from <= to && to < self.len());
// Not using `get_unchecked_mut` to keep this a safe operation.
let len = to - from;
ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
}
|
identifier_body
|
write_ogr.rs
|
extern crate gdal;
use std::path::Path;
use std::fs;
use gdal::errors::Error;
use gdal::vector::{Defn, Driver, Feature, FieldDefn, FieldValue, Geometry, OGRFieldType};
/// Example 1, the detailed way:
fn example_1() -> Result<(), Error> {
let _ = fs::remove_file("/tmp/output1.geojson");
let drv = Driver::get("GeoJSON")?;
let mut ds = drv.create(Path::new("/tmp/output1.geojson"))?;
let lyr = ds.create_layer()?;
let field_defn = FieldDefn::new("Name", OGRFieldType::OFTString)?;
field_defn.set_width(80);
field_defn.add_to_layer(lyr)?;
let field_defn = FieldDefn::new("Value", OGRFieldType::OFTReal)?;
field_defn.add_to_layer(lyr)?;
let defn = Defn::from_layer(lyr);
// 1st feature:
let mut ft = Feature::new(&defn)?;
ft.set_geometry(Geometry::from_wkt("POINT (45.21 21.76)")?)?;
ft.set_field_string("Name", "Feature 1")?;
ft.set_field_double("Value", 45.78)?;
ft.create(lyr)?;
// 2nd feature:
let mut ft = Feature::new(&defn)?;
ft.set_field_double("Value", 0.789)?;
ft.set_geometry(Geometry::from_wkt("POINT (46.50 22.50)")?)?;
ft.set_field_string("Name", "Feature 2")?;
ft.create(lyr)?;
// Feature triggering an error due to a wrong field name:
let mut ft = Feature::new(&defn)?;
ft.set_geometry(Geometry::from_wkt("POINT (46.50 22.50)")?)?;
ft.set_field_string("Name", "Feature 2")?;
match ft.set_field_double("Values", 0.789) {
Ok(v) => v,
Err(err) => println!("{}", err),
};
ft.create(lyr)?;
Ok(())
}
/// Example 2, same output, shortened way:
fn
|
() -> Result<(), Error> {
let _ = fs::remove_file("/tmp/output2.geojson");
let driver = Driver::get("GeoJSON")?;
let mut ds = driver.create(Path::new("/tmp/output2.geojson"))?;
let layer = ds.create_layer()?;
layer.create_defn_fields(&[
("Name", OGRFieldType::OFTString),
("Value", OGRFieldType::OFTReal),
])?;
layer.create_feature_fields(
Geometry::from_wkt("POINT (45.21 21.76)")?,
&["Name", "Value"],
&[
FieldValue::StringValue("Feature 1".to_string()),
FieldValue::RealValue(45.78),
],
)?;
layer.create_feature_fields(
Geometry::from_wkt("POINT (46.50 22.50)")?,
&["Name", "Value"],
&[
FieldValue::StringValue("Feature 2".to_string()),
FieldValue::RealValue(0.789),
],
)?;
// Feature creation triggering an error due to a wrong field name:
match layer.create_feature_fields(
Geometry::from_wkt("POINT (46.50 22.50)")?,
&["Abcd", "Value"],
&[
FieldValue::StringValue("Feature 2".to_string()),
FieldValue::RealValue(0.789),
],
) {
Ok(v) => v,
Err(err) => println!("{}", err),
};
Ok(())
}
fn main() {
example_1().unwrap();
example_2().unwrap();
}
|
example_2
|
identifier_name
|
write_ogr.rs
|
extern crate gdal;
use std::path::Path;
use std::fs;
use gdal::errors::Error;
use gdal::vector::{Defn, Driver, Feature, FieldDefn, FieldValue, Geometry, OGRFieldType};
/// Example 1, the detailed way:
fn example_1() -> Result<(), Error> {
let _ = fs::remove_file("/tmp/output1.geojson");
let drv = Driver::get("GeoJSON")?;
let mut ds = drv.create(Path::new("/tmp/output1.geojson"))?;
let lyr = ds.create_layer()?;
let field_defn = FieldDefn::new("Name", OGRFieldType::OFTString)?;
field_defn.set_width(80);
field_defn.add_to_layer(lyr)?;
let field_defn = FieldDefn::new("Value", OGRFieldType::OFTReal)?;
field_defn.add_to_layer(lyr)?;
let defn = Defn::from_layer(lyr);
// 1st feature:
let mut ft = Feature::new(&defn)?;
ft.set_geometry(Geometry::from_wkt("POINT (45.21 21.76)")?)?;
ft.set_field_string("Name", "Feature 1")?;
ft.set_field_double("Value", 45.78)?;
ft.create(lyr)?;
// 2nd feature:
let mut ft = Feature::new(&defn)?;
ft.set_field_double("Value", 0.789)?;
ft.set_geometry(Geometry::from_wkt("POINT (46.50 22.50)")?)?;
ft.set_field_string("Name", "Feature 2")?;
ft.create(lyr)?;
// Feature triggering an error due to a wrong field name:
let mut ft = Feature::new(&defn)?;
ft.set_geometry(Geometry::from_wkt("POINT (46.50 22.50)")?)?;
ft.set_field_string("Name", "Feature 2")?;
match ft.set_field_double("Values", 0.789) {
Ok(v) => v,
Err(err) => println!("{}", err),
};
ft.create(lyr)?;
Ok(())
}
/// Example 2, same output, shortened way:
fn example_2() -> Result<(), Error> {
let _ = fs::remove_file("/tmp/output2.geojson");
let driver = Driver::get("GeoJSON")?;
let mut ds = driver.create(Path::new("/tmp/output2.geojson"))?;
let layer = ds.create_layer()?;
layer.create_defn_fields(&[
("Name", OGRFieldType::OFTString),
("Value", OGRFieldType::OFTReal),
])?;
layer.create_feature_fields(
Geometry::from_wkt("POINT (45.21 21.76)")?,
&["Name", "Value"],
&[
FieldValue::StringValue("Feature 1".to_string()),
FieldValue::RealValue(45.78),
],
)?;
layer.create_feature_fields(
Geometry::from_wkt("POINT (46.50 22.50)")?,
&["Name", "Value"],
&[
FieldValue::StringValue("Feature 2".to_string()),
FieldValue::RealValue(0.789),
],
)?;
// Feature creation triggering an error due to a wrong field name:
match layer.create_feature_fields(
Geometry::from_wkt("POINT (46.50 22.50)")?,
&["Abcd", "Value"],
&[
FieldValue::StringValue("Feature 2".to_string()),
FieldValue::RealValue(0.789),
],
) {
Ok(v) => v,
Err(err) => println!("{}", err),
};
Ok(())
}
fn main()
|
{
example_1().unwrap();
example_2().unwrap();
}
|
identifier_body
|
|
write_ogr.rs
|
extern crate gdal;
use std::path::Path;
use std::fs;
use gdal::errors::Error;
use gdal::vector::{Defn, Driver, Feature, FieldDefn, FieldValue, Geometry, OGRFieldType};
/// Example 1, the detailed way:
fn example_1() -> Result<(), Error> {
let _ = fs::remove_file("/tmp/output1.geojson");
let drv = Driver::get("GeoJSON")?;
let mut ds = drv.create(Path::new("/tmp/output1.geojson"))?;
let lyr = ds.create_layer()?;
let field_defn = FieldDefn::new("Name", OGRFieldType::OFTString)?;
field_defn.set_width(80);
field_defn.add_to_layer(lyr)?;
let field_defn = FieldDefn::new("Value", OGRFieldType::OFTReal)?;
field_defn.add_to_layer(lyr)?;
let defn = Defn::from_layer(lyr);
// 1st feature:
let mut ft = Feature::new(&defn)?;
ft.set_geometry(Geometry::from_wkt("POINT (45.21 21.76)")?)?;
ft.set_field_string("Name", "Feature 1")?;
ft.set_field_double("Value", 45.78)?;
ft.create(lyr)?;
// 2nd feature:
let mut ft = Feature::new(&defn)?;
ft.set_field_double("Value", 0.789)?;
ft.set_geometry(Geometry::from_wkt("POINT (46.50 22.50)")?)?;
ft.set_field_string("Name", "Feature 2")?;
ft.create(lyr)?;
// Feature triggering an error due to a wrong field name:
|
let mut ft = Feature::new(&defn)?;
ft.set_geometry(Geometry::from_wkt("POINT (46.50 22.50)")?)?;
ft.set_field_string("Name", "Feature 2")?;
match ft.set_field_double("Values", 0.789) {
Ok(v) => v,
Err(err) => println!("{}", err),
};
ft.create(lyr)?;
Ok(())
}
/// Example 2, same output, shortened way:
fn example_2() -> Result<(), Error> {
let _ = fs::remove_file("/tmp/output2.geojson");
let driver = Driver::get("GeoJSON")?;
let mut ds = driver.create(Path::new("/tmp/output2.geojson"))?;
let layer = ds.create_layer()?;
layer.create_defn_fields(&[
("Name", OGRFieldType::OFTString),
("Value", OGRFieldType::OFTReal),
])?;
layer.create_feature_fields(
Geometry::from_wkt("POINT (45.21 21.76)")?,
&["Name", "Value"],
&[
FieldValue::StringValue("Feature 1".to_string()),
FieldValue::RealValue(45.78),
],
)?;
layer.create_feature_fields(
Geometry::from_wkt("POINT (46.50 22.50)")?,
&["Name", "Value"],
&[
FieldValue::StringValue("Feature 2".to_string()),
FieldValue::RealValue(0.789),
],
)?;
// Feature creation triggering an error due to a wrong field name:
match layer.create_feature_fields(
Geometry::from_wkt("POINT (46.50 22.50)")?,
&["Abcd", "Value"],
&[
FieldValue::StringValue("Feature 2".to_string()),
FieldValue::RealValue(0.789),
],
) {
Ok(v) => v,
Err(err) => println!("{}", err),
};
Ok(())
}
fn main() {
example_1().unwrap();
example_2().unwrap();
}
|
random_line_split
|
|
lib.rs
|
// Copyright © 2018 Cormac O'Brien
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
// and associated documentation files (the "Software"), to deal in the Software without
|
// restriction, including without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#![deny(unused_must_use)]
#![feature(drain_filter)]
#[macro_use]
extern crate bitflags;
extern crate byteorder;
extern crate cgmath;
extern crate chrono;
extern crate env_logger;
#[macro_use]
extern crate failure;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate num;
#[macro_use]
extern crate num_derive;
extern crate rand;
extern crate regex;
extern crate rodio;
extern crate winit;
pub mod client;
pub mod common;
pub mod server;
|
random_line_split
|
|
comments.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::{is_whitespace, with_str_from, reader};
use parse::lexer::{StringReader, bump, is_eof, nextch, TokenAndSpan};
use parse::lexer::{is_line_non_doc_comment, is_block_non_doc_comment};
use parse::lexer;
use parse::token;
use parse::token::{get_ident_interner};
use std::io;
use std::str;
use std::uint;
#[deriving(Clone, Eq)]
pub enum cmnt_style {
isolated, // No code on either side of each line of the comment
trailing, // Code exists to the left of the comment
mixed, // Code before /* foo */ and after the comment
blank_line, // Just a manual blank line "\n\n", for layout
}
#[deriving(Clone)]
pub struct cmnt {
style: cmnt_style,
lines: ~[~str],
pos: BytePos
}
pub fn is_doc_comment(s: &str) -> bool
|
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> ~str {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: ~[~str]) -> ~[~str] {
let mut i = 0u;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 && lines[0].iter().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1].iter().skip(1).all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].trim().is_empty() {
j -= 1;
}
return lines.slice(i, j).to_owned();
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: ~[~str]) -> ~[~str] {
let mut i = uint::max_value;
let mut can_trim = true;
let mut first = true;
for line in lines.iter() {
for (j, c) in line.iter().enumerate() {
if j > i ||!"* \t".contains_char(c) {
can_trim = false;
break;
}
if c == '*' {
if first {
i = j;
first = false;
} else if i!= j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if!can_trim {
break;
}
}
if can_trim {
do lines.map |line| {
line.slice(i + 1, line.len()).to_owned()
}
} else {
lines
}
}
// one-line comments lose their prefix
static ONLINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONLINERS.iter() {
if comment.starts_with(*prefix) {
return comment.slice_from(prefix.len()).to_owned();
}
}
if comment.starts_with("/*") {
let lines = comment.slice(3u, comment.len() - 2u)
.any_line_iter()
.map(|s| s.to_owned())
.collect::<~[~str]>();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.connect("\n");
}
fail2!("not a doc-comment: {}", comment);
}
fn read_to_eol(rdr: @mut StringReader) -> ~str {
let mut val = ~"";
while rdr.curr!= '\n' &&!is_eof(rdr) {
val.push_char(rdr.curr);
bump(rdr);
}
if rdr.curr == '\n' { bump(rdr); }
return val;
}
fn read_one_line_comment(rdr: @mut StringReader) -> ~str {
let val = read_to_eol(rdr);
assert!((val[0] == '/' as u8 && val[1] == '/' as u8) ||
(val[0] == '#' as u8 && val[1] == '!' as u8));
return val;
}
fn consume_non_eol_whitespace(rdr: @mut StringReader) {
while is_whitespace(rdr.curr) && rdr.curr!= '\n' &&!is_eof(rdr) {
bump(rdr);
}
}
fn push_blank_line_comment(rdr: @mut StringReader, comments: &mut ~[cmnt]) {
debug2!(">>> blank-line comment");
let v: ~[~str] = ~[];
comments.push(cmnt {style: blank_line, lines: v, pos: rdr.last_pos});
}
fn consume_whitespace_counting_blank_lines(rdr: @mut StringReader,
comments: &mut ~[cmnt]) {
while is_whitespace(rdr.curr) &&!is_eof(rdr) {
if rdr.col == CharPos(0u) && rdr.curr == '\n' {
push_blank_line_comment(rdr, &mut *comments);
}
bump(rdr);
}
}
fn read_shebang_comment(rdr: @mut StringReader, code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> shebang comment");
let p = rdr.last_pos;
debug2!("<<< shebang comment");
comments.push(cmnt {
style: if code_to_the_left { trailing } else { isolated },
lines: ~[read_one_line_comment(rdr)],
pos: p
});
}
fn read_line_comments(rdr: @mut StringReader, code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> line comments");
let p = rdr.last_pos;
let mut lines: ~[~str] = ~[];
while rdr.curr == '/' && nextch(rdr) == '/' {
let line = read_one_line_comment(rdr);
debug2!("{}", line);
if is_doc_comment(line) { // doc-comments are not put in comments
break;
}
lines.push(line);
consume_non_eol_whitespace(rdr);
}
debug2!("<<< line comments");
if!lines.is_empty() {
comments.push(cmnt {
style: if code_to_the_left { trailing } else { isolated },
lines: lines,
pos: p
});
}
}
// Returns None if the first col chars of s contain a non-whitespace char.
// Otherwise returns Some(k) where k is first char offset after that leading
// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
let mut cursor: uint = 0;
while col > 0 && cursor < len {
let r: str::CharRange = s.char_range_at(cursor);
if!r.ch.is_whitespace() {
return None;
}
cursor = r.next;
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut ~[~str],
s: ~str, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(s, col) {
Some(col) => {
if col < len {
s.slice(col, len).to_owned()
} else { ~"" }
}
None => s,
};
debug2!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: @mut StringReader,
code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> block comment");
let p = rdr.last_pos;
let mut lines: ~[~str] = ~[];
let col: CharPos = rdr.col;
bump(rdr);
bump(rdr);
let mut curr_line = ~"/*";
// doc-comments are not really comments, they are attributes
if rdr.curr == '*' || rdr.curr == '!' {
while!(rdr.curr == '*' && nextch(rdr) == '/') &&!is_eof(rdr) {
curr_line.push_char(rdr.curr);
bump(rdr);
}
if!is_eof(rdr) {
curr_line.push_str("*/");
bump(rdr);
bump(rdr);
}
if!is_block_non_doc_comment(curr_line) { return; }
assert!(!curr_line.contains_char('\n'));
lines.push(curr_line);
} else {
let mut level: int = 1;
while level > 0 {
debug2!("=== block comment level {}", level);
if is_eof(rdr) {
(rdr as @mut reader).fatal(~"unterminated block comment");
}
if rdr.curr == '\n' {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line,
col);
curr_line = ~"";
bump(rdr);
} else {
curr_line.push_char(rdr.curr);
if rdr.curr == '/' && nextch(rdr) == '*' {
bump(rdr);
bump(rdr);
curr_line.push_char('*');
level += 1;
} else {
if rdr.curr == '*' && nextch(rdr) == '/' {
bump(rdr);
bump(rdr);
curr_line.push_char('/');
level -= 1;
} else { bump(rdr); }
}
}
}
if curr_line.len()!= 0 {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line, col);
}
}
let mut style = if code_to_the_left { trailing } else { isolated };
consume_non_eol_whitespace(rdr);
if!is_eof(rdr) && rdr.curr!= '\n' && lines.len() == 1u {
style = mixed;
}
debug2!("<<< block comment");
comments.push(cmnt {style: style, lines: lines, pos: p});
}
fn peeking_at_comment(rdr: @mut StringReader) -> bool {
return ((rdr.curr == '/' && nextch(rdr) == '/') ||
(rdr.curr == '/' && nextch(rdr) == '*')) ||
(rdr.curr == '#' && nextch(rdr) == '!');
}
fn consume_comment(rdr: @mut StringReader,
code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> consume comment");
if rdr.curr == '/' && nextch(rdr) == '/' {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr == '/' && nextch(rdr) == '*' {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr == '#' && nextch(rdr) == '!' {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { fail2!(); }
debug2!("<<< consume comment");
}
#[deriving(Clone)]
pub struct lit {
lit: ~str,
pos: BytePos
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic:
@mut diagnostic::span_handler,
path: @str,
srdr: @io::Reader)
-> (~[cmnt], ~[lit]) {
let src = str::from_utf8(srdr.read_whole_stream()).to_managed();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let rdr = lexer::new_low_level_string_reader(span_diagnostic, filemap);
let mut comments: ~[cmnt] = ~[];
let mut literals: ~[lit] = ~[];
let mut first_read: bool = true;
while!is_eof(rdr) {
loop {
let mut code_to_the_left =!first_read;
consume_non_eol_whitespace(rdr);
if rdr.curr == '\n' {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(rdr, &mut comments);
}
while peeking_at_comment(rdr) {
consume_comment(rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
if token::is_lit(&tok) {
do with_str_from(rdr, bstart) |s| {
debug2!("tok lit: {}", s);
literals.push(lit {lit: s.to_owned(), pos: sp.lo});
}
} else {
debug2!("tok: {}", token::to_str(get_ident_interner(), &tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod test {
use super::*;
#[test] fn test_block_doc_comment_1() {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" Test \n* Test\n Test");
}
#[test] fn test_block_doc_comment_2() {
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" Test\n Test");
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *int;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" let a: *int;\n *a = 5;");
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" test");
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, ~"test");
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, ~"test");
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, ~"test");
}
}
|
{
(s.starts_with("///") && !is_line_non_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") && !is_block_non_doc_comment(s)) ||
s.starts_with("/*!")
}
|
identifier_body
|
comments.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::{is_whitespace, with_str_from, reader};
use parse::lexer::{StringReader, bump, is_eof, nextch, TokenAndSpan};
use parse::lexer::{is_line_non_doc_comment, is_block_non_doc_comment};
use parse::lexer;
use parse::token;
use parse::token::{get_ident_interner};
use std::io;
use std::str;
use std::uint;
#[deriving(Clone, Eq)]
pub enum cmnt_style {
isolated, // No code on either side of each line of the comment
trailing, // Code exists to the left of the comment
mixed, // Code before /* foo */ and after the comment
blank_line, // Just a manual blank line "\n\n", for layout
}
#[deriving(Clone)]
pub struct cmnt {
style: cmnt_style,
lines: ~[~str],
pos: BytePos
}
pub fn is_doc_comment(s: &str) -> bool {
(s.starts_with("///") &&!is_line_non_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") &&!is_block_non_doc_comment(s)) ||
s.starts_with("/*!")
}
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> ~str {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: ~[~str]) -> ~[~str] {
let mut i = 0u;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 && lines[0].iter().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1].iter().skip(1).all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].trim().is_empty() {
j -= 1;
}
return lines.slice(i, j).to_owned();
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: ~[~str]) -> ~[~str] {
let mut i = uint::max_value;
let mut can_trim = true;
let mut first = true;
for line in lines.iter() {
for (j, c) in line.iter().enumerate() {
if j > i ||!"* \t".contains_char(c) {
can_trim = false;
break;
}
if c == '*' {
if first {
i = j;
first = false;
} else if i!= j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if!can_trim {
break;
}
}
if can_trim {
do lines.map |line| {
line.slice(i + 1, line.len()).to_owned()
}
} else {
lines
}
}
// one-line comments lose their prefix
static ONLINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONLINERS.iter() {
if comment.starts_with(*prefix) {
return comment.slice_from(prefix.len()).to_owned();
}
}
if comment.starts_with("/*") {
let lines = comment.slice(3u, comment.len() - 2u)
.any_line_iter()
.map(|s| s.to_owned())
.collect::<~[~str]>();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.connect("\n");
}
fail2!("not a doc-comment: {}", comment);
}
fn read_to_eol(rdr: @mut StringReader) -> ~str {
let mut val = ~"";
while rdr.curr!= '\n' &&!is_eof(rdr) {
val.push_char(rdr.curr);
bump(rdr);
}
if rdr.curr == '\n' { bump(rdr); }
return val;
}
fn read_one_line_comment(rdr: @mut StringReader) -> ~str {
let val = read_to_eol(rdr);
assert!((val[0] == '/' as u8 && val[1] == '/' as u8) ||
(val[0] == '#' as u8 && val[1] == '!' as u8));
return val;
}
fn consume_non_eol_whitespace(rdr: @mut StringReader) {
while is_whitespace(rdr.curr) && rdr.curr!= '\n' &&!is_eof(rdr) {
bump(rdr);
}
}
fn push_blank_line_comment(rdr: @mut StringReader, comments: &mut ~[cmnt]) {
debug2!(">>> blank-line comment");
let v: ~[~str] = ~[];
comments.push(cmnt {style: blank_line, lines: v, pos: rdr.last_pos});
}
fn consume_whitespace_counting_blank_lines(rdr: @mut StringReader,
comments: &mut ~[cmnt]) {
while is_whitespace(rdr.curr) &&!is_eof(rdr) {
if rdr.col == CharPos(0u) && rdr.curr == '\n' {
push_blank_line_comment(rdr, &mut *comments);
}
bump(rdr);
}
}
fn read_shebang_comment(rdr: @mut StringReader, code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> shebang comment");
let p = rdr.last_pos;
debug2!("<<< shebang comment");
comments.push(cmnt {
style: if code_to_the_left { trailing } else
|
,
lines: ~[read_one_line_comment(rdr)],
pos: p
});
}
fn read_line_comments(rdr: @mut StringReader, code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> line comments");
let p = rdr.last_pos;
let mut lines: ~[~str] = ~[];
while rdr.curr == '/' && nextch(rdr) == '/' {
let line = read_one_line_comment(rdr);
debug2!("{}", line);
if is_doc_comment(line) { // doc-comments are not put in comments
break;
}
lines.push(line);
consume_non_eol_whitespace(rdr);
}
debug2!("<<< line comments");
if!lines.is_empty() {
comments.push(cmnt {
style: if code_to_the_left { trailing } else { isolated },
lines: lines,
pos: p
});
}
}
// Returns None if the first col chars of s contain a non-whitespace char.
// Otherwise returns Some(k) where k is first char offset after that leading
// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
let mut cursor: uint = 0;
while col > 0 && cursor < len {
let r: str::CharRange = s.char_range_at(cursor);
if!r.ch.is_whitespace() {
return None;
}
cursor = r.next;
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut ~[~str],
s: ~str, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(s, col) {
Some(col) => {
if col < len {
s.slice(col, len).to_owned()
} else { ~"" }
}
None => s,
};
debug2!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: @mut StringReader,
code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> block comment");
let p = rdr.last_pos;
let mut lines: ~[~str] = ~[];
let col: CharPos = rdr.col;
bump(rdr);
bump(rdr);
let mut curr_line = ~"/*";
// doc-comments are not really comments, they are attributes
if rdr.curr == '*' || rdr.curr == '!' {
while!(rdr.curr == '*' && nextch(rdr) == '/') &&!is_eof(rdr) {
curr_line.push_char(rdr.curr);
bump(rdr);
}
if!is_eof(rdr) {
curr_line.push_str("*/");
bump(rdr);
bump(rdr);
}
if!is_block_non_doc_comment(curr_line) { return; }
assert!(!curr_line.contains_char('\n'));
lines.push(curr_line);
} else {
let mut level: int = 1;
while level > 0 {
debug2!("=== block comment level {}", level);
if is_eof(rdr) {
(rdr as @mut reader).fatal(~"unterminated block comment");
}
if rdr.curr == '\n' {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line,
col);
curr_line = ~"";
bump(rdr);
} else {
curr_line.push_char(rdr.curr);
if rdr.curr == '/' && nextch(rdr) == '*' {
bump(rdr);
bump(rdr);
curr_line.push_char('*');
level += 1;
} else {
if rdr.curr == '*' && nextch(rdr) == '/' {
bump(rdr);
bump(rdr);
curr_line.push_char('/');
level -= 1;
} else { bump(rdr); }
}
}
}
if curr_line.len()!= 0 {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line, col);
}
}
let mut style = if code_to_the_left { trailing } else { isolated };
consume_non_eol_whitespace(rdr);
if!is_eof(rdr) && rdr.curr!= '\n' && lines.len() == 1u {
style = mixed;
}
debug2!("<<< block comment");
comments.push(cmnt {style: style, lines: lines, pos: p});
}
fn peeking_at_comment(rdr: @mut StringReader) -> bool {
return ((rdr.curr == '/' && nextch(rdr) == '/') ||
(rdr.curr == '/' && nextch(rdr) == '*')) ||
(rdr.curr == '#' && nextch(rdr) == '!');
}
fn consume_comment(rdr: @mut StringReader,
code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> consume comment");
if rdr.curr == '/' && nextch(rdr) == '/' {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr == '/' && nextch(rdr) == '*' {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr == '#' && nextch(rdr) == '!' {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { fail2!(); }
debug2!("<<< consume comment");
}
#[deriving(Clone)]
pub struct lit {
lit: ~str,
pos: BytePos
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic:
@mut diagnostic::span_handler,
path: @str,
srdr: @io::Reader)
-> (~[cmnt], ~[lit]) {
let src = str::from_utf8(srdr.read_whole_stream()).to_managed();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let rdr = lexer::new_low_level_string_reader(span_diagnostic, filemap);
let mut comments: ~[cmnt] = ~[];
let mut literals: ~[lit] = ~[];
let mut first_read: bool = true;
while!is_eof(rdr) {
loop {
let mut code_to_the_left =!first_read;
consume_non_eol_whitespace(rdr);
if rdr.curr == '\n' {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(rdr, &mut comments);
}
while peeking_at_comment(rdr) {
consume_comment(rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
if token::is_lit(&tok) {
do with_str_from(rdr, bstart) |s| {
debug2!("tok lit: {}", s);
literals.push(lit {lit: s.to_owned(), pos: sp.lo});
}
} else {
debug2!("tok: {}", token::to_str(get_ident_interner(), &tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod test {
use super::*;
#[test] fn test_block_doc_comment_1() {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" Test \n* Test\n Test");
}
#[test] fn test_block_doc_comment_2() {
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" Test\n Test");
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *int;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" let a: *int;\n *a = 5;");
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" test");
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, ~"test");
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, ~"test");
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, ~"test");
}
}
|
{ isolated }
|
conditional_block
|
comments.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::{is_whitespace, with_str_from, reader};
use parse::lexer::{StringReader, bump, is_eof, nextch, TokenAndSpan};
use parse::lexer::{is_line_non_doc_comment, is_block_non_doc_comment};
use parse::lexer;
use parse::token;
use parse::token::{get_ident_interner};
use std::io;
use std::str;
use std::uint;
#[deriving(Clone, Eq)]
pub enum cmnt_style {
isolated, // No code on either side of each line of the comment
trailing, // Code exists to the left of the comment
mixed, // Code before /* foo */ and after the comment
blank_line, // Just a manual blank line "\n\n", for layout
}
#[deriving(Clone)]
pub struct cmnt {
style: cmnt_style,
lines: ~[~str],
pos: BytePos
}
pub fn is_doc_comment(s: &str) -> bool {
(s.starts_with("///") &&!is_line_non_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") &&!is_block_non_doc_comment(s)) ||
s.starts_with("/*!")
}
pub fn
|
(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> ~str {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: ~[~str]) -> ~[~str] {
let mut i = 0u;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 && lines[0].iter().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1].iter().skip(1).all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].trim().is_empty() {
j -= 1;
}
return lines.slice(i, j).to_owned();
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: ~[~str]) -> ~[~str] {
let mut i = uint::max_value;
let mut can_trim = true;
let mut first = true;
for line in lines.iter() {
for (j, c) in line.iter().enumerate() {
if j > i ||!"* \t".contains_char(c) {
can_trim = false;
break;
}
if c == '*' {
if first {
i = j;
first = false;
} else if i!= j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if!can_trim {
break;
}
}
if can_trim {
do lines.map |line| {
line.slice(i + 1, line.len()).to_owned()
}
} else {
lines
}
}
// one-line comments lose their prefix
static ONLINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONLINERS.iter() {
if comment.starts_with(*prefix) {
return comment.slice_from(prefix.len()).to_owned();
}
}
if comment.starts_with("/*") {
let lines = comment.slice(3u, comment.len() - 2u)
.any_line_iter()
.map(|s| s.to_owned())
.collect::<~[~str]>();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.connect("\n");
}
fail2!("not a doc-comment: {}", comment);
}
fn read_to_eol(rdr: @mut StringReader) -> ~str {
let mut val = ~"";
while rdr.curr!= '\n' &&!is_eof(rdr) {
val.push_char(rdr.curr);
bump(rdr);
}
if rdr.curr == '\n' { bump(rdr); }
return val;
}
fn read_one_line_comment(rdr: @mut StringReader) -> ~str {
let val = read_to_eol(rdr);
assert!((val[0] == '/' as u8 && val[1] == '/' as u8) ||
(val[0] == '#' as u8 && val[1] == '!' as u8));
return val;
}
fn consume_non_eol_whitespace(rdr: @mut StringReader) {
while is_whitespace(rdr.curr) && rdr.curr!= '\n' &&!is_eof(rdr) {
bump(rdr);
}
}
fn push_blank_line_comment(rdr: @mut StringReader, comments: &mut ~[cmnt]) {
debug2!(">>> blank-line comment");
let v: ~[~str] = ~[];
comments.push(cmnt {style: blank_line, lines: v, pos: rdr.last_pos});
}
fn consume_whitespace_counting_blank_lines(rdr: @mut StringReader,
comments: &mut ~[cmnt]) {
while is_whitespace(rdr.curr) &&!is_eof(rdr) {
if rdr.col == CharPos(0u) && rdr.curr == '\n' {
push_blank_line_comment(rdr, &mut *comments);
}
bump(rdr);
}
}
fn read_shebang_comment(rdr: @mut StringReader, code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> shebang comment");
let p = rdr.last_pos;
debug2!("<<< shebang comment");
comments.push(cmnt {
style: if code_to_the_left { trailing } else { isolated },
lines: ~[read_one_line_comment(rdr)],
pos: p
});
}
fn read_line_comments(rdr: @mut StringReader, code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> line comments");
let p = rdr.last_pos;
let mut lines: ~[~str] = ~[];
while rdr.curr == '/' && nextch(rdr) == '/' {
let line = read_one_line_comment(rdr);
debug2!("{}", line);
if is_doc_comment(line) { // doc-comments are not put in comments
break;
}
lines.push(line);
consume_non_eol_whitespace(rdr);
}
debug2!("<<< line comments");
if!lines.is_empty() {
comments.push(cmnt {
style: if code_to_the_left { trailing } else { isolated },
lines: lines,
pos: p
});
}
}
// Returns None if the first col chars of s contain a non-whitespace char.
// Otherwise returns Some(k) where k is first char offset after that leading
// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
let mut cursor: uint = 0;
while col > 0 && cursor < len {
let r: str::CharRange = s.char_range_at(cursor);
if!r.ch.is_whitespace() {
return None;
}
cursor = r.next;
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut ~[~str],
s: ~str, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(s, col) {
Some(col) => {
if col < len {
s.slice(col, len).to_owned()
} else { ~"" }
}
None => s,
};
debug2!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: @mut StringReader,
code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> block comment");
let p = rdr.last_pos;
let mut lines: ~[~str] = ~[];
let col: CharPos = rdr.col;
bump(rdr);
bump(rdr);
let mut curr_line = ~"/*";
// doc-comments are not really comments, they are attributes
if rdr.curr == '*' || rdr.curr == '!' {
while!(rdr.curr == '*' && nextch(rdr) == '/') &&!is_eof(rdr) {
curr_line.push_char(rdr.curr);
bump(rdr);
}
if!is_eof(rdr) {
curr_line.push_str("*/");
bump(rdr);
bump(rdr);
}
if!is_block_non_doc_comment(curr_line) { return; }
assert!(!curr_line.contains_char('\n'));
lines.push(curr_line);
} else {
let mut level: int = 1;
while level > 0 {
debug2!("=== block comment level {}", level);
if is_eof(rdr) {
(rdr as @mut reader).fatal(~"unterminated block comment");
}
if rdr.curr == '\n' {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line,
col);
curr_line = ~"";
bump(rdr);
} else {
curr_line.push_char(rdr.curr);
if rdr.curr == '/' && nextch(rdr) == '*' {
bump(rdr);
bump(rdr);
curr_line.push_char('*');
level += 1;
} else {
if rdr.curr == '*' && nextch(rdr) == '/' {
bump(rdr);
bump(rdr);
curr_line.push_char('/');
level -= 1;
} else { bump(rdr); }
}
}
}
if curr_line.len()!= 0 {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line, col);
}
}
let mut style = if code_to_the_left { trailing } else { isolated };
consume_non_eol_whitespace(rdr);
if!is_eof(rdr) && rdr.curr!= '\n' && lines.len() == 1u {
style = mixed;
}
debug2!("<<< block comment");
comments.push(cmnt {style: style, lines: lines, pos: p});
}
fn peeking_at_comment(rdr: @mut StringReader) -> bool {
return ((rdr.curr == '/' && nextch(rdr) == '/') ||
(rdr.curr == '/' && nextch(rdr) == '*')) ||
(rdr.curr == '#' && nextch(rdr) == '!');
}
fn consume_comment(rdr: @mut StringReader,
code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> consume comment");
if rdr.curr == '/' && nextch(rdr) == '/' {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr == '/' && nextch(rdr) == '*' {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr == '#' && nextch(rdr) == '!' {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { fail2!(); }
debug2!("<<< consume comment");
}
#[deriving(Clone)]
pub struct lit {
lit: ~str,
pos: BytePos
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic:
@mut diagnostic::span_handler,
path: @str,
srdr: @io::Reader)
-> (~[cmnt], ~[lit]) {
let src = str::from_utf8(srdr.read_whole_stream()).to_managed();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let rdr = lexer::new_low_level_string_reader(span_diagnostic, filemap);
let mut comments: ~[cmnt] = ~[];
let mut literals: ~[lit] = ~[];
let mut first_read: bool = true;
while!is_eof(rdr) {
loop {
let mut code_to_the_left =!first_read;
consume_non_eol_whitespace(rdr);
if rdr.curr == '\n' {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(rdr, &mut comments);
}
while peeking_at_comment(rdr) {
consume_comment(rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
if token::is_lit(&tok) {
do with_str_from(rdr, bstart) |s| {
debug2!("tok lit: {}", s);
literals.push(lit {lit: s.to_owned(), pos: sp.lo});
}
} else {
debug2!("tok: {}", token::to_str(get_ident_interner(), &tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod test {
use super::*;
#[test] fn test_block_doc_comment_1() {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" Test \n* Test\n Test");
}
#[test] fn test_block_doc_comment_2() {
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" Test\n Test");
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *int;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" let a: *int;\n *a = 5;");
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" test");
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, ~"test");
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, ~"test");
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, ~"test");
}
}
|
doc_comment_style
|
identifier_name
|
comments.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::{is_whitespace, with_str_from, reader};
use parse::lexer::{StringReader, bump, is_eof, nextch, TokenAndSpan};
use parse::lexer::{is_line_non_doc_comment, is_block_non_doc_comment};
use parse::lexer;
use parse::token;
use parse::token::{get_ident_interner};
use std::io;
use std::str;
use std::uint;
#[deriving(Clone, Eq)]
pub enum cmnt_style {
isolated, // No code on either side of each line of the comment
trailing, // Code exists to the left of the comment
mixed, // Code before /* foo */ and after the comment
blank_line, // Just a manual blank line "\n\n", for layout
}
#[deriving(Clone)]
pub struct cmnt {
style: cmnt_style,
lines: ~[~str],
pos: BytePos
}
pub fn is_doc_comment(s: &str) -> bool {
(s.starts_with("///") &&!is_line_non_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") &&!is_block_non_doc_comment(s)) ||
s.starts_with("/*!")
}
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> ~str {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: ~[~str]) -> ~[~str] {
let mut i = 0u;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 && lines[0].iter().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1].iter().skip(1).all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].trim().is_empty() {
j -= 1;
}
return lines.slice(i, j).to_owned();
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: ~[~str]) -> ~[~str] {
let mut i = uint::max_value;
let mut can_trim = true;
let mut first = true;
for line in lines.iter() {
for (j, c) in line.iter().enumerate() {
if j > i ||!"* \t".contains_char(c) {
can_trim = false;
break;
}
if c == '*' {
if first {
i = j;
first = false;
} else if i!= j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if!can_trim {
break;
}
}
if can_trim {
do lines.map |line| {
line.slice(i + 1, line.len()).to_owned()
}
} else {
lines
}
}
// one-line comments lose their prefix
static ONLINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONLINERS.iter() {
if comment.starts_with(*prefix) {
return comment.slice_from(prefix.len()).to_owned();
}
}
if comment.starts_with("/*") {
let lines = comment.slice(3u, comment.len() - 2u)
.any_line_iter()
.map(|s| s.to_owned())
.collect::<~[~str]>();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.connect("\n");
}
fail2!("not a doc-comment: {}", comment);
}
fn read_to_eol(rdr: @mut StringReader) -> ~str {
let mut val = ~"";
while rdr.curr!= '\n' &&!is_eof(rdr) {
val.push_char(rdr.curr);
bump(rdr);
}
if rdr.curr == '\n' { bump(rdr); }
return val;
}
fn read_one_line_comment(rdr: @mut StringReader) -> ~str {
let val = read_to_eol(rdr);
assert!((val[0] == '/' as u8 && val[1] == '/' as u8) ||
(val[0] == '#' as u8 && val[1] == '!' as u8));
return val;
}
fn consume_non_eol_whitespace(rdr: @mut StringReader) {
while is_whitespace(rdr.curr) && rdr.curr!= '\n' &&!is_eof(rdr) {
bump(rdr);
}
}
fn push_blank_line_comment(rdr: @mut StringReader, comments: &mut ~[cmnt]) {
debug2!(">>> blank-line comment");
let v: ~[~str] = ~[];
comments.push(cmnt {style: blank_line, lines: v, pos: rdr.last_pos});
}
fn consume_whitespace_counting_blank_lines(rdr: @mut StringReader,
comments: &mut ~[cmnt]) {
while is_whitespace(rdr.curr) &&!is_eof(rdr) {
if rdr.col == CharPos(0u) && rdr.curr == '\n' {
push_blank_line_comment(rdr, &mut *comments);
}
bump(rdr);
}
}
fn read_shebang_comment(rdr: @mut StringReader, code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> shebang comment");
let p = rdr.last_pos;
debug2!("<<< shebang comment");
comments.push(cmnt {
style: if code_to_the_left { trailing } else { isolated },
lines: ~[read_one_line_comment(rdr)],
pos: p
});
}
fn read_line_comments(rdr: @mut StringReader, code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> line comments");
let p = rdr.last_pos;
let mut lines: ~[~str] = ~[];
while rdr.curr == '/' && nextch(rdr) == '/' {
let line = read_one_line_comment(rdr);
debug2!("{}", line);
if is_doc_comment(line) { // doc-comments are not put in comments
break;
}
lines.push(line);
consume_non_eol_whitespace(rdr);
}
debug2!("<<< line comments");
if!lines.is_empty() {
comments.push(cmnt {
style: if code_to_the_left { trailing } else { isolated },
lines: lines,
pos: p
});
}
}
// Returns None if the first col chars of s contain a non-whitespace char.
// Otherwise returns Some(k) where k is first char offset after that leading
// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
let mut cursor: uint = 0;
while col > 0 && cursor < len {
let r: str::CharRange = s.char_range_at(cursor);
if!r.ch.is_whitespace() {
return None;
}
cursor = r.next;
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut ~[~str],
s: ~str, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(s, col) {
Some(col) => {
if col < len {
s.slice(col, len).to_owned()
} else { ~"" }
}
None => s,
};
debug2!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: @mut StringReader,
code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> block comment");
let p = rdr.last_pos;
let mut lines: ~[~str] = ~[];
let col: CharPos = rdr.col;
bump(rdr);
bump(rdr);
let mut curr_line = ~"/*";
// doc-comments are not really comments, they are attributes
if rdr.curr == '*' || rdr.curr == '!' {
while!(rdr.curr == '*' && nextch(rdr) == '/') &&!is_eof(rdr) {
curr_line.push_char(rdr.curr);
bump(rdr);
}
if!is_eof(rdr) {
curr_line.push_str("*/");
bump(rdr);
bump(rdr);
}
if!is_block_non_doc_comment(curr_line) { return; }
assert!(!curr_line.contains_char('\n'));
lines.push(curr_line);
} else {
let mut level: int = 1;
while level > 0 {
debug2!("=== block comment level {}", level);
if is_eof(rdr) {
(rdr as @mut reader).fatal(~"unterminated block comment");
}
if rdr.curr == '\n' {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line,
col);
curr_line = ~"";
bump(rdr);
} else {
curr_line.push_char(rdr.curr);
if rdr.curr == '/' && nextch(rdr) == '*' {
bump(rdr);
bump(rdr);
curr_line.push_char('*');
level += 1;
} else {
if rdr.curr == '*' && nextch(rdr) == '/' {
bump(rdr);
bump(rdr);
curr_line.push_char('/');
level -= 1;
} else { bump(rdr); }
}
}
}
if curr_line.len()!= 0 {
trim_whitespace_prefix_and_push_line(&mut lines, curr_line, col);
}
}
let mut style = if code_to_the_left { trailing } else { isolated };
consume_non_eol_whitespace(rdr);
if!is_eof(rdr) && rdr.curr!= '\n' && lines.len() == 1u {
style = mixed;
}
debug2!("<<< block comment");
comments.push(cmnt {style: style, lines: lines, pos: p});
}
fn peeking_at_comment(rdr: @mut StringReader) -> bool {
return ((rdr.curr == '/' && nextch(rdr) == '/') ||
(rdr.curr == '/' && nextch(rdr) == '*')) ||
(rdr.curr == '#' && nextch(rdr) == '!');
}
fn consume_comment(rdr: @mut StringReader,
code_to_the_left: bool,
comments: &mut ~[cmnt]) {
debug2!(">>> consume comment");
if rdr.curr == '/' && nextch(rdr) == '/' {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr == '/' && nextch(rdr) == '*' {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr == '#' && nextch(rdr) == '!' {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { fail2!(); }
debug2!("<<< consume comment");
}
#[deriving(Clone)]
pub struct lit {
lit: ~str,
pos: BytePos
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic:
@mut diagnostic::span_handler,
path: @str,
srdr: @io::Reader)
-> (~[cmnt], ~[lit]) {
let src = str::from_utf8(srdr.read_whole_stream()).to_managed();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let rdr = lexer::new_low_level_string_reader(span_diagnostic, filemap);
let mut comments: ~[cmnt] = ~[];
let mut literals: ~[lit] = ~[];
let mut first_read: bool = true;
while!is_eof(rdr) {
loop {
let mut code_to_the_left =!first_read;
consume_non_eol_whitespace(rdr);
if rdr.curr == '\n' {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(rdr, &mut comments);
}
while peeking_at_comment(rdr) {
consume_comment(rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
if token::is_lit(&tok) {
do with_str_from(rdr, bstart) |s| {
debug2!("tok lit: {}", s);
literals.push(lit {lit: s.to_owned(), pos: sp.lo});
}
} else {
debug2!("tok: {}", token::to_str(get_ident_interner(), &tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod test {
use super::*;
#[test] fn test_block_doc_comment_1() {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" Test \n* Test\n Test");
}
|
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" Test\n Test");
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *int;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" let a: *int;\n *a = 5;");
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, ~" test");
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, ~" test");
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, ~"test");
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, ~"test");
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, ~"test");
}
}
|
#[test] fn test_block_doc_comment_2() {
|
random_line_split
|
lib.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethereum virtual machine.
extern crate bit_set;
extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint;
extern crate parking_lot;
extern crate heapsize;
extern crate vm;
extern crate keccak_hash as hash;
extern crate memory_cache;
#[macro_use]
extern crate lazy_static;
extern crate log;
#[cfg(feature = "jit")]
extern crate evmjit;
#[cfg(test)]
extern crate rustc_hex;
|
pub mod evm;
pub mod interpreter;
#[macro_use]
pub mod factory;
mod vmtype;
mod instructions;
#[cfg(feature = "jit" )]
mod jit;
#[cfg(test)]
mod tests;
#[cfg(all(feature="benches", test))]
mod benches;
pub use vm::{
Schedule, CleanDustMode, EnvInfo, CallType, ActionParams, Ext,
ContractCreateResult, MessageCallResult, CreateContractAddress,
GasLeft, ReturnData
};
pub use self::evm::{Finalize, FinalizationResult, CostType};
pub use self::instructions::{InstructionInfo, INSTRUCTIONS, push_bytes};
pub use self::vmtype::VMType;
pub use self::factory::Factory;
|
random_line_split
|
|
num_format.rs
|
//! handles creating printed output for numeric substitutions
use std::env;
use std::vec::Vec;
use cli;
use super::format_field::{FormatField, FieldType};
use super::formatter::{Formatter, FormatPrimitive, InPrefix, Base};
use super::formatters::intf::Intf;
use super::formatters::floatf::Floatf;
use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf;
use super::formatters::scif::Scif;
use super::formatters::decf::Decf;
pub fn warn_expected_numeric(pf_arg: &String) {
// important: keep println here not print
cli::err_msg(&format!("{}: expected a numeric value", pf_arg));
}
// when character costant arguments have excess characters
// issue a warning when POSIXLY_CORRECT is not set
fn warn_char_constant_ign(remaining_bytes: Vec<u8>) {
match env::var("POSIXLY_CORRECT") {
Ok(_) => {}
Err(e) => {
match e {
env::VarError::NotPresent => {
cli::err_msg(&format!("warning: {:?}: character(s) following character \
constant have been ignored",
&*remaining_bytes));
}
_ => {}
}
}
}
}
// this function looks at the first few
// characters of an argument and returns a value if we can learn
// a value from that (e.g. no argument? return 0, char constant? ret value)
fn get_provided(str_in_opt: Option<&String>) -> Option<u8> {
const C_S_QUOTE: u8 = 39;
const C_D_QUOTE: u8 = 34;
match str_in_opt {
Some(str_in) => {
let mut byte_it = str_in.bytes();
if let Some(qchar) = byte_it.next() {
match qchar {
C_S_QUOTE | C_D_QUOTE => {
return Some(match byte_it.next() {
Some(second_byte) => {
let mut ignored: Vec<u8> = Vec::new();
while let Some(cont) = byte_it.next() {
ignored.push(cont);
}
if ignored.len() > 0 {
warn_char_constant_ign(ignored);
}
second_byte as u8
}
// no byte after quote
None => {
let so_far = (qchar as u8 as char).to_string();
warn_expected_numeric(&so_far);
0 as u8
}
});
}
// first byte is not quote
_ => {
return None;
}
// no first byte
}
} else {
Some(0 as u8)
}
}
None => Some(0),
}
}
// takes a string and returns
// a sign,
// a base,
// and an offset for index after all
// initial spacing, sign, base prefix, and leading zeroes
fn get_inprefix(str_in: &String, field_type: &FieldType) -> InPrefix {
let mut str_it = str_in.chars();
let mut ret = InPrefix {
radix_in: Base::Ten,
sign: 1,
offset: 0,
};
let mut topchar = str_it.next().clone();
// skip spaces and ensure topchar is the first non-space char
// (or None if none exists)
loop {
match topchar {
Some(' ') => {
ret.offset += 1;
topchar = str_it.next();
}
_ => {
break;
}
}
}
// parse sign
match topchar {
Some('+') => {
ret.offset += 1;
topchar = str_it.next();
}
Some('-') => {
ret.sign = -1;
ret.offset += 1;
topchar = str_it.next();
}
_ => {}
}
// we want to exit with offset being
// the index of the first non-zero
// digit before the decimal point or
// if there is none, the zero before the
// decimal point, or, if there is none,
// the decimal point.
// while we are determining the offset
// we will ensure as a convention
// the offset is always on the first character
// that we are yet unsure if it is the
// final offset. If the zero could be before
// a decimal point we don't move past the zero.
let mut is_hex = false;
if Some('0') == topchar {
if let Some(base) = str_it.next() {
// lead zeroes can only exist in
// octal and hex base
let mut do_clean_lead_zeroes = false;
match base {
'x' | 'X' => {
is_hex = true;
ret.offset += 2;
ret.radix_in = Base::Hex;
do_clean_lead_zeroes = true;
}
e @ '0'...'9' => {
ret.offset += 1;
match *field_type {
FieldType::Intf => {
ret.radix_in = Base::Octal;
}
_ => {}
}
if e == '0' {
do_clean_lead_zeroes = true;
}
}
_ => {}
}
if do_clean_lead_zeroes {
let mut first = true;
while let Some(ch_zero) = str_it.next() {
// see notes on offset above:
// this is why the offset for octals and decimals
// that reach this branch is 1 even though
// they have already eaten the characters '00'
// this is also why when hex encounters its
// first zero it does not move its offset
// forward because it does not know for sure
// that it's current offset (of that zero)
// is not the final offset,
// whereas at that point octal knows its
// current offset is not the final offset.
match ch_zero {
'0' => {
if!(is_hex && first)
|
}
// if decimal, keep last zero if one exists
// (it's possible for last zero to
// not exist at this branch if we're in hex input)
'.' => break,
// other digit, etc.
_ => {
if!(is_hex && first) {
ret.offset += 1;
}
break;
}
}
if first {
first = false;
}
}
}
}
}
ret
}
// this is the function a Sub's print will delegate to
// if it is a numeric field, passing the field details
// and an iterator to the argument
pub fn num_format(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> {
let fchar = field.field_char.clone();
// num format mainly operates by further delegating to one of
// several Formatter structs depending on the field
// see formatter.rs for more details
// to do switch to static dispatch
let fmtr: Box<Formatter> = match *field.field_type {
FieldType::Intf => Box::new(Intf::new()),
FieldType::Floatf => Box::new(Floatf::new()),
FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()),
FieldType::Scif => Box::new(Scif::new()),
FieldType::Decf => Box::new(Decf::new()),
_ => {
panic!("asked to do num format with non-num fieldtype");
}
};
let prim_opt=
// if we can get an assumed value from looking at the first
// few characters, use that value to create the FormatPrimitive
if let Some(provided_num) = get_provided(in_str_opt) {
let mut tmp : FormatPrimitive = Default::default();
match fchar {
'u' | 'i' | 'd' => {
tmp.pre_decimal = Some(
format!("{}", provided_num));
},
'x' | 'X' => {
tmp.pre_decimal = Some(
format!("{:x}", provided_num));
},
'o' => {
tmp.pre_decimal = Some(
format!("{:o}", provided_num));
},
'e' | 'E' | 'g' | 'G' => {
let as_str = format!("{}", provided_num);
let inprefix = get_inprefix(
&as_str,
&field.field_type
);
tmp=fmtr.get_primitive(field, &inprefix, &as_str)
.expect("err during default provided num");
},
_ => {
tmp.pre_decimal = Some(
format!("{}", provided_num));
tmp.post_decimal = Some(String::from("0"));
}
}
Some(tmp)
} else {
// otherwise we'll interpret the argument as a number
// using the appropriate Formatter
let in_str = in_str_opt.expect(
"please send the devs this message:
\n get_provided is failing to ret as Some(0) on no str ");
// first get information about the beginning of the
// numeric argument that would be useful for
// any formatter (int or float)
let inprefix = get_inprefix(
in_str,
&field.field_type
);
// then get the FormatPrimitive from the Formatter
fmtr.get_primitive(field, &inprefix, in_str)
};
// if we have a formatPrimitive, print its results
// according to the field-char appropriate Formatter
if let Some(prim) = prim_opt {
Some(fmtr.primitive_to_str(&prim, field.clone()))
} else {
None
}
}
|
{
ret.offset += 1;
}
|
conditional_block
|
num_format.rs
|
//! handles creating printed output for numeric substitutions
use std::env;
use std::vec::Vec;
use cli;
use super::format_field::{FormatField, FieldType};
use super::formatter::{Formatter, FormatPrimitive, InPrefix, Base};
use super::formatters::intf::Intf;
use super::formatters::floatf::Floatf;
use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf;
use super::formatters::scif::Scif;
use super::formatters::decf::Decf;
pub fn warn_expected_numeric(pf_arg: &String) {
// important: keep println here not print
cli::err_msg(&format!("{}: expected a numeric value", pf_arg));
}
// when character costant arguments have excess characters
// issue a warning when POSIXLY_CORRECT is not set
fn warn_char_constant_ign(remaining_bytes: Vec<u8>) {
match env::var("POSIXLY_CORRECT") {
Ok(_) => {}
Err(e) => {
match e {
env::VarError::NotPresent => {
cli::err_msg(&format!("warning: {:?}: character(s) following character \
constant have been ignored",
&*remaining_bytes));
}
_ => {}
}
}
}
}
// this function looks at the first few
// characters of an argument and returns a value if we can learn
// a value from that (e.g. no argument? return 0, char constant? ret value)
fn
|
(str_in_opt: Option<&String>) -> Option<u8> {
const C_S_QUOTE: u8 = 39;
const C_D_QUOTE: u8 = 34;
match str_in_opt {
Some(str_in) => {
let mut byte_it = str_in.bytes();
if let Some(qchar) = byte_it.next() {
match qchar {
C_S_QUOTE | C_D_QUOTE => {
return Some(match byte_it.next() {
Some(second_byte) => {
let mut ignored: Vec<u8> = Vec::new();
while let Some(cont) = byte_it.next() {
ignored.push(cont);
}
if ignored.len() > 0 {
warn_char_constant_ign(ignored);
}
second_byte as u8
}
// no byte after quote
None => {
let so_far = (qchar as u8 as char).to_string();
warn_expected_numeric(&so_far);
0 as u8
}
});
}
// first byte is not quote
_ => {
return None;
}
// no first byte
}
} else {
Some(0 as u8)
}
}
None => Some(0),
}
}
// takes a string and returns
// a sign,
// a base,
// and an offset for index after all
// initial spacing, sign, base prefix, and leading zeroes
fn get_inprefix(str_in: &String, field_type: &FieldType) -> InPrefix {
let mut str_it = str_in.chars();
let mut ret = InPrefix {
radix_in: Base::Ten,
sign: 1,
offset: 0,
};
let mut topchar = str_it.next().clone();
// skip spaces and ensure topchar is the first non-space char
// (or None if none exists)
loop {
match topchar {
Some(' ') => {
ret.offset += 1;
topchar = str_it.next();
}
_ => {
break;
}
}
}
// parse sign
match topchar {
Some('+') => {
ret.offset += 1;
topchar = str_it.next();
}
Some('-') => {
ret.sign = -1;
ret.offset += 1;
topchar = str_it.next();
}
_ => {}
}
// we want to exit with offset being
// the index of the first non-zero
// digit before the decimal point or
// if there is none, the zero before the
// decimal point, or, if there is none,
// the decimal point.
// while we are determining the offset
// we will ensure as a convention
// the offset is always on the first character
// that we are yet unsure if it is the
// final offset. If the zero could be before
// a decimal point we don't move past the zero.
let mut is_hex = false;
if Some('0') == topchar {
if let Some(base) = str_it.next() {
// lead zeroes can only exist in
// octal and hex base
let mut do_clean_lead_zeroes = false;
match base {
'x' | 'X' => {
is_hex = true;
ret.offset += 2;
ret.radix_in = Base::Hex;
do_clean_lead_zeroes = true;
}
e @ '0'...'9' => {
ret.offset += 1;
match *field_type {
FieldType::Intf => {
ret.radix_in = Base::Octal;
}
_ => {}
}
if e == '0' {
do_clean_lead_zeroes = true;
}
}
_ => {}
}
if do_clean_lead_zeroes {
let mut first = true;
while let Some(ch_zero) = str_it.next() {
// see notes on offset above:
// this is why the offset for octals and decimals
// that reach this branch is 1 even though
// they have already eaten the characters '00'
// this is also why when hex encounters its
// first zero it does not move its offset
// forward because it does not know for sure
// that it's current offset (of that zero)
// is not the final offset,
// whereas at that point octal knows its
// current offset is not the final offset.
match ch_zero {
'0' => {
if!(is_hex && first) {
ret.offset += 1;
}
}
// if decimal, keep last zero if one exists
// (it's possible for last zero to
// not exist at this branch if we're in hex input)
'.' => break,
// other digit, etc.
_ => {
if!(is_hex && first) {
ret.offset += 1;
}
break;
}
}
if first {
first = false;
}
}
}
}
}
ret
}
// this is the function a Sub's print will delegate to
// if it is a numeric field, passing the field details
// and an iterator to the argument
pub fn num_format(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> {
let fchar = field.field_char.clone();
// num format mainly operates by further delegating to one of
// several Formatter structs depending on the field
// see formatter.rs for more details
// to do switch to static dispatch
let fmtr: Box<Formatter> = match *field.field_type {
FieldType::Intf => Box::new(Intf::new()),
FieldType::Floatf => Box::new(Floatf::new()),
FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()),
FieldType::Scif => Box::new(Scif::new()),
FieldType::Decf => Box::new(Decf::new()),
_ => {
panic!("asked to do num format with non-num fieldtype");
}
};
let prim_opt=
// if we can get an assumed value from looking at the first
// few characters, use that value to create the FormatPrimitive
if let Some(provided_num) = get_provided(in_str_opt) {
let mut tmp : FormatPrimitive = Default::default();
match fchar {
'u' | 'i' | 'd' => {
tmp.pre_decimal = Some(
format!("{}", provided_num));
},
'x' | 'X' => {
tmp.pre_decimal = Some(
format!("{:x}", provided_num));
},
'o' => {
tmp.pre_decimal = Some(
format!("{:o}", provided_num));
},
'e' | 'E' | 'g' | 'G' => {
let as_str = format!("{}", provided_num);
let inprefix = get_inprefix(
&as_str,
&field.field_type
);
tmp=fmtr.get_primitive(field, &inprefix, &as_str)
.expect("err during default provided num");
},
_ => {
tmp.pre_decimal = Some(
format!("{}", provided_num));
tmp.post_decimal = Some(String::from("0"));
}
}
Some(tmp)
} else {
// otherwise we'll interpret the argument as a number
// using the appropriate Formatter
let in_str = in_str_opt.expect(
"please send the devs this message:
\n get_provided is failing to ret as Some(0) on no str ");
// first get information about the beginning of the
// numeric argument that would be useful for
// any formatter (int or float)
let inprefix = get_inprefix(
in_str,
&field.field_type
);
// then get the FormatPrimitive from the Formatter
fmtr.get_primitive(field, &inprefix, in_str)
};
// if we have a formatPrimitive, print its results
// according to the field-char appropriate Formatter
if let Some(prim) = prim_opt {
Some(fmtr.primitive_to_str(&prim, field.clone()))
} else {
None
}
}
|
get_provided
|
identifier_name
|
num_format.rs
|
//! handles creating printed output for numeric substitutions
use std::env;
use std::vec::Vec;
use cli;
use super::format_field::{FormatField, FieldType};
use super::formatter::{Formatter, FormatPrimitive, InPrefix, Base};
use super::formatters::intf::Intf;
use super::formatters::floatf::Floatf;
use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf;
use super::formatters::scif::Scif;
use super::formatters::decf::Decf;
pub fn warn_expected_numeric(pf_arg: &String) {
// important: keep println here not print
cli::err_msg(&format!("{}: expected a numeric value", pf_arg));
}
// when character costant arguments have excess characters
// issue a warning when POSIXLY_CORRECT is not set
fn warn_char_constant_ign(remaining_bytes: Vec<u8>) {
match env::var("POSIXLY_CORRECT") {
Ok(_) => {}
Err(e) => {
match e {
env::VarError::NotPresent => {
cli::err_msg(&format!("warning: {:?}: character(s) following character \
constant have been ignored",
&*remaining_bytes));
}
_ => {}
}
}
}
}
// this function looks at the first few
// characters of an argument and returns a value if we can learn
// a value from that (e.g. no argument? return 0, char constant? ret value)
fn get_provided(str_in_opt: Option<&String>) -> Option<u8> {
const C_S_QUOTE: u8 = 39;
const C_D_QUOTE: u8 = 34;
match str_in_opt {
Some(str_in) => {
let mut byte_it = str_in.bytes();
if let Some(qchar) = byte_it.next() {
match qchar {
C_S_QUOTE | C_D_QUOTE => {
return Some(match byte_it.next() {
Some(second_byte) => {
let mut ignored: Vec<u8> = Vec::new();
while let Some(cont) = byte_it.next() {
ignored.push(cont);
}
if ignored.len() > 0 {
warn_char_constant_ign(ignored);
}
second_byte as u8
}
// no byte after quote
None => {
let so_far = (qchar as u8 as char).to_string();
warn_expected_numeric(&so_far);
0 as u8
}
});
}
// first byte is not quote
_ => {
return None;
}
// no first byte
}
} else {
Some(0 as u8)
}
}
None => Some(0),
}
}
// takes a string and returns
// a sign,
// a base,
// and an offset for index after all
// initial spacing, sign, base prefix, and leading zeroes
fn get_inprefix(str_in: &String, field_type: &FieldType) -> InPrefix {
let mut str_it = str_in.chars();
let mut ret = InPrefix {
radix_in: Base::Ten,
sign: 1,
offset: 0,
};
let mut topchar = str_it.next().clone();
// skip spaces and ensure topchar is the first non-space char
// (or None if none exists)
loop {
match topchar {
Some(' ') => {
ret.offset += 1;
topchar = str_it.next();
}
_ => {
break;
}
}
}
// parse sign
match topchar {
Some('+') => {
ret.offset += 1;
topchar = str_it.next();
}
Some('-') => {
ret.sign = -1;
ret.offset += 1;
topchar = str_it.next();
}
_ => {}
}
// we want to exit with offset being
// the index of the first non-zero
// digit before the decimal point or
// if there is none, the zero before the
// decimal point, or, if there is none,
// the decimal point.
// while we are determining the offset
// we will ensure as a convention
// the offset is always on the first character
// that we are yet unsure if it is the
// final offset. If the zero could be before
// a decimal point we don't move past the zero.
let mut is_hex = false;
if Some('0') == topchar {
if let Some(base) = str_it.next() {
// lead zeroes can only exist in
// octal and hex base
let mut do_clean_lead_zeroes = false;
match base {
'x' | 'X' => {
is_hex = true;
ret.offset += 2;
ret.radix_in = Base::Hex;
do_clean_lead_zeroes = true;
}
e @ '0'...'9' => {
ret.offset += 1;
match *field_type {
FieldType::Intf => {
ret.radix_in = Base::Octal;
}
_ => {}
}
if e == '0' {
do_clean_lead_zeroes = true;
}
}
_ => {}
}
if do_clean_lead_zeroes {
let mut first = true;
while let Some(ch_zero) = str_it.next() {
// see notes on offset above:
// this is why the offset for octals and decimals
// that reach this branch is 1 even though
// they have already eaten the characters '00'
// this is also why when hex encounters its
|
// whereas at that point octal knows its
// current offset is not the final offset.
match ch_zero {
'0' => {
if!(is_hex && first) {
ret.offset += 1;
}
}
// if decimal, keep last zero if one exists
// (it's possible for last zero to
// not exist at this branch if we're in hex input)
'.' => break,
// other digit, etc.
_ => {
if!(is_hex && first) {
ret.offset += 1;
}
break;
}
}
if first {
first = false;
}
}
}
}
}
ret
}
// this is the function a Sub's print will delegate to
// if it is a numeric field, passing the field details
// and an iterator to the argument
pub fn num_format(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> {
let fchar = field.field_char.clone();
// num format mainly operates by further delegating to one of
// several Formatter structs depending on the field
// see formatter.rs for more details
// to do switch to static dispatch
let fmtr: Box<Formatter> = match *field.field_type {
FieldType::Intf => Box::new(Intf::new()),
FieldType::Floatf => Box::new(Floatf::new()),
FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()),
FieldType::Scif => Box::new(Scif::new()),
FieldType::Decf => Box::new(Decf::new()),
_ => {
panic!("asked to do num format with non-num fieldtype");
}
};
let prim_opt=
// if we can get an assumed value from looking at the first
// few characters, use that value to create the FormatPrimitive
if let Some(provided_num) = get_provided(in_str_opt) {
let mut tmp : FormatPrimitive = Default::default();
match fchar {
'u' | 'i' | 'd' => {
tmp.pre_decimal = Some(
format!("{}", provided_num));
},
'x' | 'X' => {
tmp.pre_decimal = Some(
format!("{:x}", provided_num));
},
'o' => {
tmp.pre_decimal = Some(
format!("{:o}", provided_num));
},
'e' | 'E' | 'g' | 'G' => {
let as_str = format!("{}", provided_num);
let inprefix = get_inprefix(
&as_str,
&field.field_type
);
tmp=fmtr.get_primitive(field, &inprefix, &as_str)
.expect("err during default provided num");
},
_ => {
tmp.pre_decimal = Some(
format!("{}", provided_num));
tmp.post_decimal = Some(String::from("0"));
}
}
Some(tmp)
} else {
// otherwise we'll interpret the argument as a number
// using the appropriate Formatter
let in_str = in_str_opt.expect(
"please send the devs this message:
\n get_provided is failing to ret as Some(0) on no str ");
// first get information about the beginning of the
// numeric argument that would be useful for
// any formatter (int or float)
let inprefix = get_inprefix(
in_str,
&field.field_type
);
// then get the FormatPrimitive from the Formatter
fmtr.get_primitive(field, &inprefix, in_str)
};
// if we have a formatPrimitive, print its results
// according to the field-char appropriate Formatter
if let Some(prim) = prim_opt {
Some(fmtr.primitive_to_str(&prim, field.clone()))
} else {
None
}
}
|
// first zero it does not move its offset
// forward because it does not know for sure
// that it's current offset (of that zero)
// is not the final offset,
|
random_line_split
|
num_format.rs
|
//! handles creating printed output for numeric substitutions
use std::env;
use std::vec::Vec;
use cli;
use super::format_field::{FormatField, FieldType};
use super::formatter::{Formatter, FormatPrimitive, InPrefix, Base};
use super::formatters::intf::Intf;
use super::formatters::floatf::Floatf;
use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf;
use super::formatters::scif::Scif;
use super::formatters::decf::Decf;
pub fn warn_expected_numeric(pf_arg: &String) {
// important: keep println here not print
cli::err_msg(&format!("{}: expected a numeric value", pf_arg));
}
// when character costant arguments have excess characters
// issue a warning when POSIXLY_CORRECT is not set
fn warn_char_constant_ign(remaining_bytes: Vec<u8>)
|
// this function looks at the first few
// characters of an argument and returns a value if we can learn
// a value from that (e.g. no argument? return 0, char constant? ret value)
fn get_provided(str_in_opt: Option<&String>) -> Option<u8> {
const C_S_QUOTE: u8 = 39;
const C_D_QUOTE: u8 = 34;
match str_in_opt {
Some(str_in) => {
let mut byte_it = str_in.bytes();
if let Some(qchar) = byte_it.next() {
match qchar {
C_S_QUOTE | C_D_QUOTE => {
return Some(match byte_it.next() {
Some(second_byte) => {
let mut ignored: Vec<u8> = Vec::new();
while let Some(cont) = byte_it.next() {
ignored.push(cont);
}
if ignored.len() > 0 {
warn_char_constant_ign(ignored);
}
second_byte as u8
}
// no byte after quote
None => {
let so_far = (qchar as u8 as char).to_string();
warn_expected_numeric(&so_far);
0 as u8
}
});
}
// first byte is not quote
_ => {
return None;
}
// no first byte
}
} else {
Some(0 as u8)
}
}
None => Some(0),
}
}
// takes a string and returns
// a sign,
// a base,
// and an offset for index after all
// initial spacing, sign, base prefix, and leading zeroes
fn get_inprefix(str_in: &String, field_type: &FieldType) -> InPrefix {
let mut str_it = str_in.chars();
let mut ret = InPrefix {
radix_in: Base::Ten,
sign: 1,
offset: 0,
};
let mut topchar = str_it.next().clone();
// skip spaces and ensure topchar is the first non-space char
// (or None if none exists)
loop {
match topchar {
Some(' ') => {
ret.offset += 1;
topchar = str_it.next();
}
_ => {
break;
}
}
}
// parse sign
match topchar {
Some('+') => {
ret.offset += 1;
topchar = str_it.next();
}
Some('-') => {
ret.sign = -1;
ret.offset += 1;
topchar = str_it.next();
}
_ => {}
}
// we want to exit with offset being
// the index of the first non-zero
// digit before the decimal point or
// if there is none, the zero before the
// decimal point, or, if there is none,
// the decimal point.
// while we are determining the offset
// we will ensure as a convention
// the offset is always on the first character
// that we are yet unsure if it is the
// final offset. If the zero could be before
// a decimal point we don't move past the zero.
let mut is_hex = false;
if Some('0') == topchar {
if let Some(base) = str_it.next() {
// lead zeroes can only exist in
// octal and hex base
let mut do_clean_lead_zeroes = false;
match base {
'x' | 'X' => {
is_hex = true;
ret.offset += 2;
ret.radix_in = Base::Hex;
do_clean_lead_zeroes = true;
}
e @ '0'...'9' => {
ret.offset += 1;
match *field_type {
FieldType::Intf => {
ret.radix_in = Base::Octal;
}
_ => {}
}
if e == '0' {
do_clean_lead_zeroes = true;
}
}
_ => {}
}
if do_clean_lead_zeroes {
let mut first = true;
while let Some(ch_zero) = str_it.next() {
// see notes on offset above:
// this is why the offset for octals and decimals
// that reach this branch is 1 even though
// they have already eaten the characters '00'
// this is also why when hex encounters its
// first zero it does not move its offset
// forward because it does not know for sure
// that it's current offset (of that zero)
// is not the final offset,
// whereas at that point octal knows its
// current offset is not the final offset.
match ch_zero {
'0' => {
if!(is_hex && first) {
ret.offset += 1;
}
}
// if decimal, keep last zero if one exists
// (it's possible for last zero to
// not exist at this branch if we're in hex input)
'.' => break,
// other digit, etc.
_ => {
if!(is_hex && first) {
ret.offset += 1;
}
break;
}
}
if first {
first = false;
}
}
}
}
}
ret
}
// this is the function a Sub's print will delegate to
// if it is a numeric field, passing the field details
// and an iterator to the argument
pub fn num_format(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> {
let fchar = field.field_char.clone();
// num format mainly operates by further delegating to one of
// several Formatter structs depending on the field
// see formatter.rs for more details
// to do switch to static dispatch
let fmtr: Box<Formatter> = match *field.field_type {
FieldType::Intf => Box::new(Intf::new()),
FieldType::Floatf => Box::new(Floatf::new()),
FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()),
FieldType::Scif => Box::new(Scif::new()),
FieldType::Decf => Box::new(Decf::new()),
_ => {
panic!("asked to do num format with non-num fieldtype");
}
};
let prim_opt=
// if we can get an assumed value from looking at the first
// few characters, use that value to create the FormatPrimitive
if let Some(provided_num) = get_provided(in_str_opt) {
let mut tmp : FormatPrimitive = Default::default();
match fchar {
'u' | 'i' | 'd' => {
tmp.pre_decimal = Some(
format!("{}", provided_num));
},
'x' | 'X' => {
tmp.pre_decimal = Some(
format!("{:x}", provided_num));
},
'o' => {
tmp.pre_decimal = Some(
format!("{:o}", provided_num));
},
'e' | 'E' | 'g' | 'G' => {
let as_str = format!("{}", provided_num);
let inprefix = get_inprefix(
&as_str,
&field.field_type
);
tmp=fmtr.get_primitive(field, &inprefix, &as_str)
.expect("err during default provided num");
},
_ => {
tmp.pre_decimal = Some(
format!("{}", provided_num));
tmp.post_decimal = Some(String::from("0"));
}
}
Some(tmp)
} else {
// otherwise we'll interpret the argument as a number
// using the appropriate Formatter
let in_str = in_str_opt.expect(
"please send the devs this message:
\n get_provided is failing to ret as Some(0) on no str ");
// first get information about the beginning of the
// numeric argument that would be useful for
// any formatter (int or float)
let inprefix = get_inprefix(
in_str,
&field.field_type
);
// then get the FormatPrimitive from the Formatter
fmtr.get_primitive(field, &inprefix, in_str)
};
// if we have a formatPrimitive, print its results
// according to the field-char appropriate Formatter
if let Some(prim) = prim_opt {
Some(fmtr.primitive_to_str(&prim, field.clone()))
} else {
None
}
}
|
{
match env::var("POSIXLY_CORRECT") {
Ok(_) => {}
Err(e) => {
match e {
env::VarError::NotPresent => {
cli::err_msg(&format!("warning: {:?}: character(s) following character \
constant have been ignored",
&*remaining_bytes));
}
_ => {}
}
}
}
}
|
identifier_body
|
main.rs
|
use std::env;
// Rust doesn't make it easy to do things like: 'a' + x to get the letter matching a digit.
static DIGITS: [char; 26] = [
'a', 'b', 'c', 'd',
'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p',
'q', 'r','s', 't',
'u', 'v', 'w', 'x',
'y', 'z'];
fn main() {
let input = match env::args().nth(1) {
Some(i) => i,
None => {
println!("usage: day11 <input value>");
return
}
};
let next = find_next_password(&input);
println!("found next valid password: {}", next);
let next_next = find_next_password(&input);
println!("found next next valid password: {}", next_next);
}
fn find_next_password(input: &str) -> String {
let mut candidate = increment_string(&input);
while!is_acceptable_password(&candidate) {
candidate = increment_string(&candidate);
}
candidate
}
fn is_acceptable_password(candidate: &str) -> bool
|
fn has_straight_and_pairs(candidate: &Vec<char>) -> bool {
let mut idx = 0;
let mut pairs = 0;
let mut straight = false;
while idx < candidate.len() {
straight |= is_straight(candidate, idx);
if idx > 0 && candidate[idx] == candidate[idx - 1] {
pairs += 1;
idx += 1;
}
idx += 1;
}
straight && pairs > 1
}
fn is_straight(candidate: &Vec<char>, idx: usize) -> bool {
idx > 1 && {
let c = candidate[idx];
candidate[idx - 1] == std::char::from_u32(c as u32 - 1).unwrap() &&
candidate[idx - 2] == std::char::from_u32(c as u32 - 2).unwrap()
}
}
fn increment_string(input: &str) -> String {
fn increment_char(c: char) -> char {
std::char::from_u32((c as u32) + 1).unwrap()
}
let mut carry = true;
let out : Vec<_> = input.chars().rev().scan(&mut carry, |carry, item| {
use std::mem::replace;
match (**carry, item) {
(true, 'z') => Some('a'),
(true, c) => { **carry = false; Some(increment_char(c)) },
(false, c) => Some(c)
}
}).collect();
let mut result : String = out.iter().rev().cloned().collect();
if carry {
result.insert(0, 'a');
}
result
}
//fn to_base26_string(mut input: usize) -> String {
//// This isn't pure base-26. For example, the count goes from 'z' to 'aa' rather than 'ba'
//// (because 'a' should represent 0). However, it fits the needs of the puzzle.
//let mut result = String::new();
//while input > 25 {
//let digit = input / 26;
//let new_input = input % 26;
//println!("cycle: {} => {},{}", input, digit, new_input);
//result.push(DIGITS[digit]);
//input = new_input
//}
//result.push(DIGITS[input + 1]);
//result
//}
//fn from_base26_string(input: &str) -> usize {
//let chars : Vec<_> = input.chars().collect();
//let mut value = 0;
//for (idx, c) in chars.iter().enumerate() {
//let mut digit = (*c as u32 - 'a' as u32) as usize;
//if idx < (chars.len() - 1) {
//digit += 1
//}
//value = (value * 26) + digit;
//}
//value
//}
|
{
let chars : Vec<_> = candidate.chars().collect();
!candidate.contains('i') &&
!candidate.contains('o') &&
!candidate.contains('l') &&
has_straight_and_pairs(&chars)
}
|
identifier_body
|
main.rs
|
use std::env;
// Rust doesn't make it easy to do things like: 'a' + x to get the letter matching a digit.
static DIGITS: [char; 26] = [
'a', 'b', 'c', 'd',
'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p',
'q', 'r','s', 't',
'u', 'v', 'w', 'x',
'y', 'z'];
fn main() {
let input = match env::args().nth(1) {
Some(i) => i,
None =>
|
};
let next = find_next_password(&input);
println!("found next valid password: {}", next);
let next_next = find_next_password(&input);
println!("found next next valid password: {}", next_next);
}
fn find_next_password(input: &str) -> String {
let mut candidate = increment_string(&input);
while!is_acceptable_password(&candidate) {
candidate = increment_string(&candidate);
}
candidate
}
fn is_acceptable_password(candidate: &str) -> bool {
let chars : Vec<_> = candidate.chars().collect();
!candidate.contains('i') &&
!candidate.contains('o') &&
!candidate.contains('l') &&
has_straight_and_pairs(&chars)
}
fn has_straight_and_pairs(candidate: &Vec<char>) -> bool {
let mut idx = 0;
let mut pairs = 0;
let mut straight = false;
while idx < candidate.len() {
straight |= is_straight(candidate, idx);
if idx > 0 && candidate[idx] == candidate[idx - 1] {
pairs += 1;
idx += 1;
}
idx += 1;
}
straight && pairs > 1
}
fn is_straight(candidate: &Vec<char>, idx: usize) -> bool {
idx > 1 && {
let c = candidate[idx];
candidate[idx - 1] == std::char::from_u32(c as u32 - 1).unwrap() &&
candidate[idx - 2] == std::char::from_u32(c as u32 - 2).unwrap()
}
}
fn increment_string(input: &str) -> String {
fn increment_char(c: char) -> char {
std::char::from_u32((c as u32) + 1).unwrap()
}
let mut carry = true;
let out : Vec<_> = input.chars().rev().scan(&mut carry, |carry, item| {
use std::mem::replace;
match (**carry, item) {
(true, 'z') => Some('a'),
(true, c) => { **carry = false; Some(increment_char(c)) },
(false, c) => Some(c)
}
}).collect();
let mut result : String = out.iter().rev().cloned().collect();
if carry {
result.insert(0, 'a');
}
result
}
//fn to_base26_string(mut input: usize) -> String {
//// This isn't pure base-26. For example, the count goes from 'z' to 'aa' rather than 'ba'
//// (because 'a' should represent 0). However, it fits the needs of the puzzle.
//let mut result = String::new();
//while input > 25 {
//let digit = input / 26;
//let new_input = input % 26;
//println!("cycle: {} => {},{}", input, digit, new_input);
//result.push(DIGITS[digit]);
//input = new_input
//}
//result.push(DIGITS[input + 1]);
//result
//}
//fn from_base26_string(input: &str) -> usize {
//let chars : Vec<_> = input.chars().collect();
//let mut value = 0;
//for (idx, c) in chars.iter().enumerate() {
//let mut digit = (*c as u32 - 'a' as u32) as usize;
//if idx < (chars.len() - 1) {
//digit += 1
//}
//value = (value * 26) + digit;
//}
//value
//}
|
{
println!("usage: day11 <input value>");
return
}
|
conditional_block
|
main.rs
|
use std::env;
// Rust doesn't make it easy to do things like: 'a' + x to get the letter matching a digit.
static DIGITS: [char; 26] = [
'a', 'b', 'c', 'd',
'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p',
'q', 'r','s', 't',
'u', 'v', 'w', 'x',
'y', 'z'];
fn main() {
let input = match env::args().nth(1) {
Some(i) => i,
None => {
println!("usage: day11 <input value>");
return
}
};
let next = find_next_password(&input);
println!("found next valid password: {}", next);
let next_next = find_next_password(&input);
println!("found next next valid password: {}", next_next);
}
fn find_next_password(input: &str) -> String {
let mut candidate = increment_string(&input);
while!is_acceptable_password(&candidate) {
candidate = increment_string(&candidate);
}
candidate
}
fn is_acceptable_password(candidate: &str) -> bool {
let chars : Vec<_> = candidate.chars().collect();
!candidate.contains('i') &&
!candidate.contains('o') &&
!candidate.contains('l') &&
has_straight_and_pairs(&chars)
}
fn has_straight_and_pairs(candidate: &Vec<char>) -> bool {
let mut idx = 0;
let mut pairs = 0;
|
pairs += 1;
idx += 1;
}
idx += 1;
}
straight && pairs > 1
}
fn is_straight(candidate: &Vec<char>, idx: usize) -> bool {
idx > 1 && {
let c = candidate[idx];
candidate[idx - 1] == std::char::from_u32(c as u32 - 1).unwrap() &&
candidate[idx - 2] == std::char::from_u32(c as u32 - 2).unwrap()
}
}
fn increment_string(input: &str) -> String {
fn increment_char(c: char) -> char {
std::char::from_u32((c as u32) + 1).unwrap()
}
let mut carry = true;
let out : Vec<_> = input.chars().rev().scan(&mut carry, |carry, item| {
use std::mem::replace;
match (**carry, item) {
(true, 'z') => Some('a'),
(true, c) => { **carry = false; Some(increment_char(c)) },
(false, c) => Some(c)
}
}).collect();
let mut result : String = out.iter().rev().cloned().collect();
if carry {
result.insert(0, 'a');
}
result
}
//fn to_base26_string(mut input: usize) -> String {
//// This isn't pure base-26. For example, the count goes from 'z' to 'aa' rather than 'ba'
//// (because 'a' should represent 0). However, it fits the needs of the puzzle.
//let mut result = String::new();
//while input > 25 {
//let digit = input / 26;
//let new_input = input % 26;
//println!("cycle: {} => {},{}", input, digit, new_input);
//result.push(DIGITS[digit]);
//input = new_input
//}
//result.push(DIGITS[input + 1]);
//result
//}
//fn from_base26_string(input: &str) -> usize {
//let chars : Vec<_> = input.chars().collect();
//let mut value = 0;
//for (idx, c) in chars.iter().enumerate() {
//let mut digit = (*c as u32 - 'a' as u32) as usize;
//if idx < (chars.len() - 1) {
//digit += 1
//}
//value = (value * 26) + digit;
//}
//value
//}
|
let mut straight = false;
while idx < candidate.len() {
straight |= is_straight(candidate, idx);
if idx > 0 && candidate[idx] == candidate[idx - 1] {
|
random_line_split
|
main.rs
|
use std::env;
// Rust doesn't make it easy to do things like: 'a' + x to get the letter matching a digit.
static DIGITS: [char; 26] = [
'a', 'b', 'c', 'd',
'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p',
'q', 'r','s', 't',
'u', 'v', 'w', 'x',
'y', 'z'];
fn main() {
let input = match env::args().nth(1) {
Some(i) => i,
None => {
println!("usage: day11 <input value>");
return
}
};
let next = find_next_password(&input);
println!("found next valid password: {}", next);
let next_next = find_next_password(&input);
println!("found next next valid password: {}", next_next);
}
fn find_next_password(input: &str) -> String {
let mut candidate = increment_string(&input);
while!is_acceptable_password(&candidate) {
candidate = increment_string(&candidate);
}
candidate
}
fn
|
(candidate: &str) -> bool {
let chars : Vec<_> = candidate.chars().collect();
!candidate.contains('i') &&
!candidate.contains('o') &&
!candidate.contains('l') &&
has_straight_and_pairs(&chars)
}
fn has_straight_and_pairs(candidate: &Vec<char>) -> bool {
let mut idx = 0;
let mut pairs = 0;
let mut straight = false;
while idx < candidate.len() {
straight |= is_straight(candidate, idx);
if idx > 0 && candidate[idx] == candidate[idx - 1] {
pairs += 1;
idx += 1;
}
idx += 1;
}
straight && pairs > 1
}
fn is_straight(candidate: &Vec<char>, idx: usize) -> bool {
idx > 1 && {
let c = candidate[idx];
candidate[idx - 1] == std::char::from_u32(c as u32 - 1).unwrap() &&
candidate[idx - 2] == std::char::from_u32(c as u32 - 2).unwrap()
}
}
fn increment_string(input: &str) -> String {
fn increment_char(c: char) -> char {
std::char::from_u32((c as u32) + 1).unwrap()
}
let mut carry = true;
let out : Vec<_> = input.chars().rev().scan(&mut carry, |carry, item| {
use std::mem::replace;
match (**carry, item) {
(true, 'z') => Some('a'),
(true, c) => { **carry = false; Some(increment_char(c)) },
(false, c) => Some(c)
}
}).collect();
let mut result : String = out.iter().rev().cloned().collect();
if carry {
result.insert(0, 'a');
}
result
}
//fn to_base26_string(mut input: usize) -> String {
//// This isn't pure base-26. For example, the count goes from 'z' to 'aa' rather than 'ba'
//// (because 'a' should represent 0). However, it fits the needs of the puzzle.
//let mut result = String::new();
//while input > 25 {
//let digit = input / 26;
//let new_input = input % 26;
//println!("cycle: {} => {},{}", input, digit, new_input);
//result.push(DIGITS[digit]);
//input = new_input
//}
//result.push(DIGITS[input + 1]);
//result
//}
//fn from_base26_string(input: &str) -> usize {
//let chars : Vec<_> = input.chars().collect();
//let mut value = 0;
//for (idx, c) in chars.iter().enumerate() {
//let mut digit = (*c as u32 - 'a' as u32) as usize;
//if idx < (chars.len() - 1) {
//digit += 1
//}
//value = (value * 26) + digit;
//}
//value
//}
|
is_acceptable_password
|
identifier_name
|
event.rs
|
//! Main interface for a stream of events the Supervisor can send out
//! in the course of its operations.
//!
//! Currently, the Supervisor is able to send events to a [NATS][1]
//! server. The `init_stream` function must be called
//! before sending events to initialize the publishing thread in the
//! background. Thereafter, you can pass "event" structs to the
//! `event` function, which will publish the event to the stream.
//!
//! All events are published under the "habitat" subject.
//!
//! [1]:https://github.com/nats-io/nats-server
mod error;
mod nats_message_stream;
mod types;
pub(crate) use self::types::ServiceMetadata;
use self::types::{EventMessage,
EventMetadata,
HealthCheckEvent,
ServiceStartedEvent,
ServiceStoppedEvent,
ServiceUpdateStartedEvent};
use crate::manager::{service::{HealthCheckHookStatus,
HealthCheckResult,
ProcessOutput,
Service,
StandardStreams},
sys::Sys};
pub use error::{Error,
Result};
use habitat_common::types::{EventStreamConnectMethod,
EventStreamMetadata,
EventStreamServerCertificate,
EventStreamToken};
use habitat_core::{package::ident::PackageIdent,
service::HealthCheckInterval};
use nats_message_stream::{NatsMessage,
NatsMessageStream};
use prost_types::Duration as ProstDuration;
use rants::{Address,
Subject};
use state::Storage;
use std::{net::SocketAddr,
time::Duration};
lazy_static! {
// TODO (CM): When const fn support lands in stable, we can ditch
// this lazy_static call.
// NATS subject names
static ref SERVICE_STARTED_SUBJECT: Subject =
"habitat.event.service_started".parse().expect("valid NATS subject");
static ref SERVICE_STOPPED_SUBJECT: Subject =
"habitat.event.service_stopped".parse().expect("valid NATS subject");
static ref SERVICE_UPDATE_STARTED_SUBJECT: Subject =
"habitat.event.service_update_started".parse().expect("valid NATS subject");
static ref HEALTHCHECK_SUBJECT: Subject =
"habitat.event.healthcheck".parse().expect("valid NATS subject");
/// Reference to the event stream.
static ref NATS_MESSAGE_STREAM: Storage<NatsMessageStream> = Storage::new();
/// Core information that is shared between all events.
static ref EVENT_CORE: Storage<EventCore> = Storage::new();
}
/// Starts a new task for sending events to a NATS Streaming
/// server. Stashes the handle to the stream, as well as the core
/// event information that will be a part of all events, in a global
/// static reference for access later.
pub async fn init(sys: &Sys, fqdn: String, config: EventStreamConfig) -> Result<()> {
// Only initialize once
if!initialized() {
let supervisor_id = sys.member_id.clone();
let ip_address = sys.gossip_listen();
let event_core = EventCore::new(&supervisor_id, ip_address, &fqdn, &config);
let stream = NatsMessageStream::new(&supervisor_id, config).await?;
NATS_MESSAGE_STREAM.set(stream);
EVENT_CORE.set(event_core);
}
Ok(())
}
/// Captures all event stream-related configuration options that would
/// be passed in by a user
// TODO (DM): The fields of this struct are only public for testing. We should refactor the crate
// layout so this can be avoided.
#[derive(Clone, Debug, PartialEq)]
pub struct EventStreamConfig {
pub environment: String,
pub application: String,
pub site: Option<String>,
pub meta: EventStreamMetadata,
pub token: EventStreamToken,
pub url: Address,
pub connect_method: EventStreamConnectMethod,
pub server_certificate: Option<EventStreamServerCertificate>,
}
/// Send an event for the start of a Service.
pub fn service_started(service: &Service) {
if initialized() {
publish(&SERVICE_STARTED_SUBJECT,
ServiceStartedEvent { service_metadata: Some(service.to_service_metadata()),
event_metadata: None, });
}
}
/// Send an event for the stop of a Service.
pub fn service_stopped(service: &Service) {
if initialized() {
publish(&SERVICE_STOPPED_SUBJECT,
ServiceStoppedEvent { service_metadata: Some(service.to_service_metadata()),
event_metadata: None, });
}
}
/// Send an event at the start of a Service update.
pub fn service_update_started(service: &Service, update: &PackageIdent) {
if initialized() {
publish(&SERVICE_UPDATE_STARTED_SUBJECT,
ServiceUpdateStartedEvent { event_metadata: None,
service_metadata:
Some(service.to_service_metadata()),
update_package_ident: update.clone().to_string(), });
}
}
// Takes metadata directly, rather than a `&Service` like other event
// functions, because of how the asynchronous health checking
// currently works. Revisit when async/await + Pin is all stabilized.
pub fn health_check(metadata: ServiceMetadata,
health_check_result: HealthCheckResult,
health_check_hook_status: HealthCheckHookStatus,
health_check_interval: HealthCheckInterval) {
if initialized() {
let health_check_result: types::HealthCheckResult = health_check_result.into();
let maybe_duration = health_check_hook_status.maybe_duration();
let maybe_process_output = health_check_hook_status.maybe_process_output();
let exit_status = maybe_process_output.as_ref()
.and_then(|o| o.exit_status().code());
let StandardStreams { stdout, stderr } =
maybe_process_output.map(ProcessOutput::standard_streams)
.unwrap_or_default();
let prost_interval = ProstDuration::from(Duration::from(health_check_interval));
publish(&HEALTHCHECK_SUBJECT,
HealthCheckEvent { service_metadata: Some(metadata),
event_metadata: None,
result: i32::from(health_check_result),
execution: maybe_duration.map(Duration::into),
exit_status,
stdout,
stderr,
interval: Some(prost_interval) });
}
}
////////////////////////////////////////////////////////////////////////
/// A collection of data that will be present in all events. Rather
/// than baking this into the structure of each event, we represent it
/// once and merge the information into the final rendered form of the
/// event.
///
/// This prevents us from having to thread information throughout the
/// system, just to get it to the places where the events are
/// generated (e.g., not all code has direct access to the
/// Supervisor's ID).
#[derive(Clone, Debug)]
struct EventCore {
/// The unique identifier of the Supervisor sending the event.
supervisor_id: String,
ip_address: SocketAddr,
fqdn: String,
application: String,
environment: String,
site: Option<String>,
meta: EventStreamMetadata,
}
impl EventCore {
fn new(supervisor_id: &str,
ip_address: SocketAddr,
fqdn: &str,
config: &EventStreamConfig)
-> Self {
EventCore { supervisor_id: String::from(supervisor_id),
ip_address,
fqdn: String::from(fqdn),
environment: config.environment.clone(),
application: config.application.clone(),
site: config.site.clone(),
meta: config.meta.clone() }
}
}
/// Internal helper function to know whether or not to go to the trouble of
/// creating event structures. If the event stream hasn't been
/// initialized, then we shouldn't need to do anything.
fn initialized() -> bool { NATS_MESSAGE_STREAM.try_get().is_some() }
/// Publish an event. This is the main interface that client code will
/// use.
///
/// If `init_stream` has not been called already, this function will
/// be a no-op.
fn publish(subject: &'static Subject, mut event: impl EventMessage) {
if let Some(stream) = NATS_MESSAGE_STREAM.try_get() {
// TODO (CM): Yeah... this is looking pretty gross. The
// intention is to be able to timestamp the events right as
// they go out.
//
// We *could* set the time when we convert the EventCore to a
// EventMetadata struct, but that seems odd.
//
// It probably means that this structure just isn't the right
// one.
//
// The ugliness is at least contained, though.
debug!("Publishing to event stream: event {:?} ", event);
event.event_metadata(EventMetadata { occurred_at:
Some(std::time::SystemTime::now().into()),
..EVENT_CORE.get().to_event_metadata() });
let packet = NatsMessage::new(subject, event.to_bytes());
stream.send(packet);
}
}
#[cfg(test)]
mod tests {
use super::{nats_message_stream::NatsMessageStream,
*};
use crate::prost::Message;
use futures::{channel::mpsc as futures_mpsc,
stream::StreamExt};
#[cfg(windows)]
use habitat_core::os::process::windows_child::ExitStatus;
use habitat_core::service::HealthCheckInterval;
#[cfg(unix)]
use std::{os::unix::process::ExitStatusExt,
process::ExitStatus};
#[tokio::test]
#[cfg(any(unix, windows))]
async fn
|
() {
let (tx, rx) = futures_mpsc::unbounded();
NATS_MESSAGE_STREAM.set(NatsMessageStream(tx));
EVENT_CORE.set(EventCore { supervisor_id: String::from("supervisor_id"),
ip_address: "127.0.0.1:8080".parse().unwrap(),
fqdn: String::from("fqdn"),
application: String::from("application"),
environment: String::from("environment"),
site: None,
meta: EventStreamMetadata::default(), });
health_check(ServiceMetadata::default(),
HealthCheckResult::Ok,
HealthCheckHookStatus::NoHook,
HealthCheckInterval::default());
health_check(ServiceMetadata::default(),
HealthCheckResult::Warning,
HealthCheckHookStatus::FailedToRun(Duration::from_secs(5)),
HealthCheckInterval::default());
#[cfg(windows)]
let exit_status = ExitStatus::from(2);
#[cfg(unix)]
let exit_status = ExitStatus::from_raw(2);
let process_output =
ProcessOutput::from_raw(StandardStreams { stdout: Some(String::from("stdout")),
stderr: Some(String::from("stderr")), },
exit_status);
health_check(ServiceMetadata::default(),
HealthCheckResult::Critical,
HealthCheckHookStatus::Ran(process_output, Duration::from_secs(10)),
HealthCheckInterval::default());
#[cfg(windows)]
let exit_status = ExitStatus::from(3);
#[cfg(unix)]
let exit_status = ExitStatus::from_raw(3);
let process_output =
ProcessOutput::from_raw(StandardStreams { stdout: None,
stderr: Some(String::from("stderr")), },
exit_status);
health_check(ServiceMetadata::default(),
HealthCheckResult::Unknown,
HealthCheckHookStatus::Ran(process_output, Duration::from_secs(15)),
HealthCheckInterval::default());
let events = rx.take(4).collect::<Vec<_>>().await;
let event = HealthCheckEvent::decode(events[0].payload()).unwrap();
assert_eq!(event.result, 0);
assert_eq!(event.execution, None);
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, None);
let default_interval = HealthCheckInterval::default();
let prost_interval = ProstDuration::from(Duration::from(default_interval));
let prost_interval_option = Some(prost_interval);
assert_eq!(event.interval, prost_interval_option);
let event = HealthCheckEvent::decode(events[1].payload()).unwrap();
assert_eq!(event.result, 1);
assert_eq!(event.execution.unwrap().seconds, 5);
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, None);
let event = HealthCheckEvent::decode(events[2].payload()).unwrap();
assert_eq!(event.result, 2);
assert_eq!(event.execution.unwrap().seconds, 10);
#[cfg(windows)]
assert_eq!(event.exit_status, Some(2));
// `ExitStatus::from_raw` sets the signal not the code
#[cfg(unix)]
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, Some(String::from("stdout")));
assert_eq!(event.stderr, Some(String::from("stderr")));
let event = HealthCheckEvent::decode(events[3].payload()).unwrap();
assert_eq!(event.result, 3);
assert_eq!(event.execution.unwrap().seconds, 15);
#[cfg(windows)]
assert_eq!(event.exit_status, Some(3));
// `ExitStatus::from_raw` sets the signal not the code
#[cfg(unix)]
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, Some(String::from("stderr")));
}
}
|
health_check_event
|
identifier_name
|
event.rs
|
//! Main interface for a stream of events the Supervisor can send out
//! in the course of its operations.
//!
//! Currently, the Supervisor is able to send events to a [NATS][1]
//! server. The `init_stream` function must be called
//! before sending events to initialize the publishing thread in the
//! background. Thereafter, you can pass "event" structs to the
//! `event` function, which will publish the event to the stream.
//!
//! All events are published under the "habitat" subject.
//!
//! [1]:https://github.com/nats-io/nats-server
mod error;
mod nats_message_stream;
mod types;
pub(crate) use self::types::ServiceMetadata;
use self::types::{EventMessage,
EventMetadata,
HealthCheckEvent,
ServiceStartedEvent,
ServiceStoppedEvent,
ServiceUpdateStartedEvent};
use crate::manager::{service::{HealthCheckHookStatus,
HealthCheckResult,
ProcessOutput,
Service,
StandardStreams},
sys::Sys};
pub use error::{Error,
Result};
use habitat_common::types::{EventStreamConnectMethod,
EventStreamMetadata,
EventStreamServerCertificate,
EventStreamToken};
use habitat_core::{package::ident::PackageIdent,
service::HealthCheckInterval};
use nats_message_stream::{NatsMessage,
NatsMessageStream};
use prost_types::Duration as ProstDuration;
use rants::{Address,
Subject};
use state::Storage;
use std::{net::SocketAddr,
time::Duration};
lazy_static! {
// TODO (CM): When const fn support lands in stable, we can ditch
// this lazy_static call.
// NATS subject names
static ref SERVICE_STARTED_SUBJECT: Subject =
"habitat.event.service_started".parse().expect("valid NATS subject");
static ref SERVICE_STOPPED_SUBJECT: Subject =
"habitat.event.service_stopped".parse().expect("valid NATS subject");
static ref SERVICE_UPDATE_STARTED_SUBJECT: Subject =
"habitat.event.service_update_started".parse().expect("valid NATS subject");
static ref HEALTHCHECK_SUBJECT: Subject =
"habitat.event.healthcheck".parse().expect("valid NATS subject");
/// Reference to the event stream.
static ref NATS_MESSAGE_STREAM: Storage<NatsMessageStream> = Storage::new();
/// Core information that is shared between all events.
static ref EVENT_CORE: Storage<EventCore> = Storage::new();
}
/// Starts a new task for sending events to a NATS Streaming
/// server. Stashes the handle to the stream, as well as the core
/// event information that will be a part of all events, in a global
/// static reference for access later.
pub async fn init(sys: &Sys, fqdn: String, config: EventStreamConfig) -> Result<()> {
// Only initialize once
if!initialized() {
let supervisor_id = sys.member_id.clone();
let ip_address = sys.gossip_listen();
let event_core = EventCore::new(&supervisor_id, ip_address, &fqdn, &config);
let stream = NatsMessageStream::new(&supervisor_id, config).await?;
NATS_MESSAGE_STREAM.set(stream);
EVENT_CORE.set(event_core);
}
Ok(())
}
/// Captures all event stream-related configuration options that would
/// be passed in by a user
// TODO (DM): The fields of this struct are only public for testing. We should refactor the crate
// layout so this can be avoided.
#[derive(Clone, Debug, PartialEq)]
pub struct EventStreamConfig {
pub environment: String,
pub application: String,
pub site: Option<String>,
pub meta: EventStreamMetadata,
pub token: EventStreamToken,
pub url: Address,
pub connect_method: EventStreamConnectMethod,
pub server_certificate: Option<EventStreamServerCertificate>,
}
/// Send an event for the start of a Service.
pub fn service_started(service: &Service) {
if initialized() {
publish(&SERVICE_STARTED_SUBJECT,
ServiceStartedEvent { service_metadata: Some(service.to_service_metadata()),
event_metadata: None, });
}
}
/// Send an event for the stop of a Service.
pub fn service_stopped(service: &Service) {
if initialized() {
publish(&SERVICE_STOPPED_SUBJECT,
ServiceStoppedEvent { service_metadata: Some(service.to_service_metadata()),
event_metadata: None, });
}
}
/// Send an event at the start of a Service update.
pub fn service_update_started(service: &Service, update: &PackageIdent) {
if initialized() {
publish(&SERVICE_UPDATE_STARTED_SUBJECT,
ServiceUpdateStartedEvent { event_metadata: None,
service_metadata:
Some(service.to_service_metadata()),
update_package_ident: update.clone().to_string(), });
}
}
// Takes metadata directly, rather than a `&Service` like other event
// functions, because of how the asynchronous health checking
// currently works. Revisit when async/await + Pin is all stabilized.
pub fn health_check(metadata: ServiceMetadata,
health_check_result: HealthCheckResult,
health_check_hook_status: HealthCheckHookStatus,
health_check_interval: HealthCheckInterval) {
if initialized() {
let health_check_result: types::HealthCheckResult = health_check_result.into();
let maybe_duration = health_check_hook_status.maybe_duration();
let maybe_process_output = health_check_hook_status.maybe_process_output();
let exit_status = maybe_process_output.as_ref()
.and_then(|o| o.exit_status().code());
let StandardStreams { stdout, stderr } =
maybe_process_output.map(ProcessOutput::standard_streams)
.unwrap_or_default();
let prost_interval = ProstDuration::from(Duration::from(health_check_interval));
publish(&HEALTHCHECK_SUBJECT,
HealthCheckEvent { service_metadata: Some(metadata),
event_metadata: None,
result: i32::from(health_check_result),
execution: maybe_duration.map(Duration::into),
exit_status,
stdout,
stderr,
interval: Some(prost_interval) });
}
}
////////////////////////////////////////////////////////////////////////
/// A collection of data that will be present in all events. Rather
/// than baking this into the structure of each event, we represent it
/// once and merge the information into the final rendered form of the
/// event.
///
/// This prevents us from having to thread information throughout the
/// system, just to get it to the places where the events are
/// generated (e.g., not all code has direct access to the
/// Supervisor's ID).
#[derive(Clone, Debug)]
struct EventCore {
/// The unique identifier of the Supervisor sending the event.
supervisor_id: String,
ip_address: SocketAddr,
fqdn: String,
application: String,
environment: String,
site: Option<String>,
meta: EventStreamMetadata,
}
impl EventCore {
fn new(supervisor_id: &str,
ip_address: SocketAddr,
fqdn: &str,
config: &EventStreamConfig)
-> Self {
EventCore { supervisor_id: String::from(supervisor_id),
ip_address,
fqdn: String::from(fqdn),
environment: config.environment.clone(),
application: config.application.clone(),
site: config.site.clone(),
meta: config.meta.clone() }
}
}
/// Internal helper function to know whether or not to go to the trouble of
/// creating event structures. If the event stream hasn't been
/// initialized, then we shouldn't need to do anything.
fn initialized() -> bool
|
/// Publish an event. This is the main interface that client code will
/// use.
///
/// If `init_stream` has not been called already, this function will
/// be a no-op.
fn publish(subject: &'static Subject, mut event: impl EventMessage) {
if let Some(stream) = NATS_MESSAGE_STREAM.try_get() {
// TODO (CM): Yeah... this is looking pretty gross. The
// intention is to be able to timestamp the events right as
// they go out.
//
// We *could* set the time when we convert the EventCore to a
// EventMetadata struct, but that seems odd.
//
// It probably means that this structure just isn't the right
// one.
//
// The ugliness is at least contained, though.
debug!("Publishing to event stream: event {:?} ", event);
event.event_metadata(EventMetadata { occurred_at:
Some(std::time::SystemTime::now().into()),
..EVENT_CORE.get().to_event_metadata() });
let packet = NatsMessage::new(subject, event.to_bytes());
stream.send(packet);
}
}
#[cfg(test)]
mod tests {
use super::{nats_message_stream::NatsMessageStream,
*};
use crate::prost::Message;
use futures::{channel::mpsc as futures_mpsc,
stream::StreamExt};
#[cfg(windows)]
use habitat_core::os::process::windows_child::ExitStatus;
use habitat_core::service::HealthCheckInterval;
#[cfg(unix)]
use std::{os::unix::process::ExitStatusExt,
process::ExitStatus};
#[tokio::test]
#[cfg(any(unix, windows))]
async fn health_check_event() {
let (tx, rx) = futures_mpsc::unbounded();
NATS_MESSAGE_STREAM.set(NatsMessageStream(tx));
EVENT_CORE.set(EventCore { supervisor_id: String::from("supervisor_id"),
ip_address: "127.0.0.1:8080".parse().unwrap(),
fqdn: String::from("fqdn"),
application: String::from("application"),
environment: String::from("environment"),
site: None,
meta: EventStreamMetadata::default(), });
health_check(ServiceMetadata::default(),
HealthCheckResult::Ok,
HealthCheckHookStatus::NoHook,
HealthCheckInterval::default());
health_check(ServiceMetadata::default(),
HealthCheckResult::Warning,
HealthCheckHookStatus::FailedToRun(Duration::from_secs(5)),
HealthCheckInterval::default());
#[cfg(windows)]
let exit_status = ExitStatus::from(2);
#[cfg(unix)]
let exit_status = ExitStatus::from_raw(2);
let process_output =
ProcessOutput::from_raw(StandardStreams { stdout: Some(String::from("stdout")),
stderr: Some(String::from("stderr")), },
exit_status);
health_check(ServiceMetadata::default(),
HealthCheckResult::Critical,
HealthCheckHookStatus::Ran(process_output, Duration::from_secs(10)),
HealthCheckInterval::default());
#[cfg(windows)]
let exit_status = ExitStatus::from(3);
#[cfg(unix)]
let exit_status = ExitStatus::from_raw(3);
let process_output =
ProcessOutput::from_raw(StandardStreams { stdout: None,
stderr: Some(String::from("stderr")), },
exit_status);
health_check(ServiceMetadata::default(),
HealthCheckResult::Unknown,
HealthCheckHookStatus::Ran(process_output, Duration::from_secs(15)),
HealthCheckInterval::default());
let events = rx.take(4).collect::<Vec<_>>().await;
let event = HealthCheckEvent::decode(events[0].payload()).unwrap();
assert_eq!(event.result, 0);
assert_eq!(event.execution, None);
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, None);
let default_interval = HealthCheckInterval::default();
let prost_interval = ProstDuration::from(Duration::from(default_interval));
let prost_interval_option = Some(prost_interval);
assert_eq!(event.interval, prost_interval_option);
let event = HealthCheckEvent::decode(events[1].payload()).unwrap();
assert_eq!(event.result, 1);
assert_eq!(event.execution.unwrap().seconds, 5);
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, None);
let event = HealthCheckEvent::decode(events[2].payload()).unwrap();
assert_eq!(event.result, 2);
assert_eq!(event.execution.unwrap().seconds, 10);
#[cfg(windows)]
assert_eq!(event.exit_status, Some(2));
// `ExitStatus::from_raw` sets the signal not the code
#[cfg(unix)]
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, Some(String::from("stdout")));
assert_eq!(event.stderr, Some(String::from("stderr")));
let event = HealthCheckEvent::decode(events[3].payload()).unwrap();
assert_eq!(event.result, 3);
assert_eq!(event.execution.unwrap().seconds, 15);
#[cfg(windows)]
assert_eq!(event.exit_status, Some(3));
// `ExitStatus::from_raw` sets the signal not the code
#[cfg(unix)]
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, Some(String::from("stderr")));
}
}
|
{ NATS_MESSAGE_STREAM.try_get().is_some() }
|
identifier_body
|
event.rs
|
//! Main interface for a stream of events the Supervisor can send out
//! in the course of its operations.
//!
//! Currently, the Supervisor is able to send events to a [NATS][1]
//! server. The `init_stream` function must be called
//! before sending events to initialize the publishing thread in the
//! background. Thereafter, you can pass "event" structs to the
//! `event` function, which will publish the event to the stream.
//!
//! All events are published under the "habitat" subject.
//!
//! [1]:https://github.com/nats-io/nats-server
mod error;
mod nats_message_stream;
mod types;
pub(crate) use self::types::ServiceMetadata;
use self::types::{EventMessage,
EventMetadata,
HealthCheckEvent,
ServiceStartedEvent,
ServiceStoppedEvent,
ServiceUpdateStartedEvent};
use crate::manager::{service::{HealthCheckHookStatus,
HealthCheckResult,
ProcessOutput,
Service,
StandardStreams},
sys::Sys};
pub use error::{Error,
Result};
use habitat_common::types::{EventStreamConnectMethod,
EventStreamMetadata,
EventStreamServerCertificate,
EventStreamToken};
use habitat_core::{package::ident::PackageIdent,
service::HealthCheckInterval};
use nats_message_stream::{NatsMessage,
NatsMessageStream};
use prost_types::Duration as ProstDuration;
use rants::{Address,
Subject};
use state::Storage;
use std::{net::SocketAddr,
time::Duration};
lazy_static! {
// TODO (CM): When const fn support lands in stable, we can ditch
// this lazy_static call.
// NATS subject names
static ref SERVICE_STARTED_SUBJECT: Subject =
"habitat.event.service_started".parse().expect("valid NATS subject");
static ref SERVICE_STOPPED_SUBJECT: Subject =
"habitat.event.service_stopped".parse().expect("valid NATS subject");
static ref SERVICE_UPDATE_STARTED_SUBJECT: Subject =
"habitat.event.service_update_started".parse().expect("valid NATS subject");
static ref HEALTHCHECK_SUBJECT: Subject =
"habitat.event.healthcheck".parse().expect("valid NATS subject");
/// Reference to the event stream.
static ref NATS_MESSAGE_STREAM: Storage<NatsMessageStream> = Storage::new();
/// Core information that is shared between all events.
static ref EVENT_CORE: Storage<EventCore> = Storage::new();
}
/// Starts a new task for sending events to a NATS Streaming
/// server. Stashes the handle to the stream, as well as the core
/// event information that will be a part of all events, in a global
/// static reference for access later.
pub async fn init(sys: &Sys, fqdn: String, config: EventStreamConfig) -> Result<()> {
// Only initialize once
if!initialized() {
let supervisor_id = sys.member_id.clone();
let ip_address = sys.gossip_listen();
let event_core = EventCore::new(&supervisor_id, ip_address, &fqdn, &config);
let stream = NatsMessageStream::new(&supervisor_id, config).await?;
NATS_MESSAGE_STREAM.set(stream);
EVENT_CORE.set(event_core);
}
Ok(())
}
/// Captures all event stream-related configuration options that would
/// be passed in by a user
// TODO (DM): The fields of this struct are only public for testing. We should refactor the crate
// layout so this can be avoided.
#[derive(Clone, Debug, PartialEq)]
pub struct EventStreamConfig {
pub environment: String,
pub application: String,
pub site: Option<String>,
pub meta: EventStreamMetadata,
pub token: EventStreamToken,
pub url: Address,
pub connect_method: EventStreamConnectMethod,
pub server_certificate: Option<EventStreamServerCertificate>,
}
/// Send an event for the start of a Service.
pub fn service_started(service: &Service) {
if initialized() {
publish(&SERVICE_STARTED_SUBJECT,
ServiceStartedEvent { service_metadata: Some(service.to_service_metadata()),
event_metadata: None, });
}
}
/// Send an event for the stop of a Service.
pub fn service_stopped(service: &Service) {
if initialized() {
publish(&SERVICE_STOPPED_SUBJECT,
ServiceStoppedEvent { service_metadata: Some(service.to_service_metadata()),
event_metadata: None, });
}
}
/// Send an event at the start of a Service update.
pub fn service_update_started(service: &Service, update: &PackageIdent) {
if initialized() {
publish(&SERVICE_UPDATE_STARTED_SUBJECT,
ServiceUpdateStartedEvent { event_metadata: None,
service_metadata:
Some(service.to_service_metadata()),
update_package_ident: update.clone().to_string(), });
}
}
// Takes metadata directly, rather than a `&Service` like other event
// functions, because of how the asynchronous health checking
// currently works. Revisit when async/await + Pin is all stabilized.
pub fn health_check(metadata: ServiceMetadata,
health_check_result: HealthCheckResult,
health_check_hook_status: HealthCheckHookStatus,
health_check_interval: HealthCheckInterval) {
if initialized() {
let health_check_result: types::HealthCheckResult = health_check_result.into();
let maybe_duration = health_check_hook_status.maybe_duration();
let maybe_process_output = health_check_hook_status.maybe_process_output();
let exit_status = maybe_process_output.as_ref()
.and_then(|o| o.exit_status().code());
let StandardStreams { stdout, stderr } =
maybe_process_output.map(ProcessOutput::standard_streams)
.unwrap_or_default();
let prost_interval = ProstDuration::from(Duration::from(health_check_interval));
publish(&HEALTHCHECK_SUBJECT,
HealthCheckEvent { service_metadata: Some(metadata),
event_metadata: None,
result: i32::from(health_check_result),
execution: maybe_duration.map(Duration::into),
exit_status,
stdout,
stderr,
interval: Some(prost_interval) });
}
}
////////////////////////////////////////////////////////////////////////
/// A collection of data that will be present in all events. Rather
/// than baking this into the structure of each event, we represent it
/// once and merge the information into the final rendered form of the
/// event.
///
/// This prevents us from having to thread information throughout the
/// system, just to get it to the places where the events are
/// generated (e.g., not all code has direct access to the
/// Supervisor's ID).
#[derive(Clone, Debug)]
struct EventCore {
/// The unique identifier of the Supervisor sending the event.
supervisor_id: String,
ip_address: SocketAddr,
fqdn: String,
application: String,
environment: String,
site: Option<String>,
meta: EventStreamMetadata,
}
impl EventCore {
fn new(supervisor_id: &str,
ip_address: SocketAddr,
fqdn: &str,
config: &EventStreamConfig)
-> Self {
EventCore { supervisor_id: String::from(supervisor_id),
ip_address,
fqdn: String::from(fqdn),
environment: config.environment.clone(),
application: config.application.clone(),
site: config.site.clone(),
meta: config.meta.clone() }
}
}
/// Internal helper function to know whether or not to go to the trouble of
/// creating event structures. If the event stream hasn't been
/// initialized, then we shouldn't need to do anything.
fn initialized() -> bool { NATS_MESSAGE_STREAM.try_get().is_some() }
/// Publish an event. This is the main interface that client code will
/// use.
///
/// If `init_stream` has not been called already, this function will
/// be a no-op.
fn publish(subject: &'static Subject, mut event: impl EventMessage) {
if let Some(stream) = NATS_MESSAGE_STREAM.try_get()
|
}
#[cfg(test)]
mod tests {
use super::{nats_message_stream::NatsMessageStream,
*};
use crate::prost::Message;
use futures::{channel::mpsc as futures_mpsc,
stream::StreamExt};
#[cfg(windows)]
use habitat_core::os::process::windows_child::ExitStatus;
use habitat_core::service::HealthCheckInterval;
#[cfg(unix)]
use std::{os::unix::process::ExitStatusExt,
process::ExitStatus};
#[tokio::test]
#[cfg(any(unix, windows))]
async fn health_check_event() {
let (tx, rx) = futures_mpsc::unbounded();
NATS_MESSAGE_STREAM.set(NatsMessageStream(tx));
EVENT_CORE.set(EventCore { supervisor_id: String::from("supervisor_id"),
ip_address: "127.0.0.1:8080".parse().unwrap(),
fqdn: String::from("fqdn"),
application: String::from("application"),
environment: String::from("environment"),
site: None,
meta: EventStreamMetadata::default(), });
health_check(ServiceMetadata::default(),
HealthCheckResult::Ok,
HealthCheckHookStatus::NoHook,
HealthCheckInterval::default());
health_check(ServiceMetadata::default(),
HealthCheckResult::Warning,
HealthCheckHookStatus::FailedToRun(Duration::from_secs(5)),
HealthCheckInterval::default());
#[cfg(windows)]
let exit_status = ExitStatus::from(2);
#[cfg(unix)]
let exit_status = ExitStatus::from_raw(2);
let process_output =
ProcessOutput::from_raw(StandardStreams { stdout: Some(String::from("stdout")),
stderr: Some(String::from("stderr")), },
exit_status);
health_check(ServiceMetadata::default(),
HealthCheckResult::Critical,
HealthCheckHookStatus::Ran(process_output, Duration::from_secs(10)),
HealthCheckInterval::default());
#[cfg(windows)]
let exit_status = ExitStatus::from(3);
#[cfg(unix)]
let exit_status = ExitStatus::from_raw(3);
let process_output =
ProcessOutput::from_raw(StandardStreams { stdout: None,
stderr: Some(String::from("stderr")), },
exit_status);
health_check(ServiceMetadata::default(),
HealthCheckResult::Unknown,
HealthCheckHookStatus::Ran(process_output, Duration::from_secs(15)),
HealthCheckInterval::default());
let events = rx.take(4).collect::<Vec<_>>().await;
let event = HealthCheckEvent::decode(events[0].payload()).unwrap();
assert_eq!(event.result, 0);
assert_eq!(event.execution, None);
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, None);
let default_interval = HealthCheckInterval::default();
let prost_interval = ProstDuration::from(Duration::from(default_interval));
let prost_interval_option = Some(prost_interval);
assert_eq!(event.interval, prost_interval_option);
let event = HealthCheckEvent::decode(events[1].payload()).unwrap();
assert_eq!(event.result, 1);
assert_eq!(event.execution.unwrap().seconds, 5);
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, None);
let event = HealthCheckEvent::decode(events[2].payload()).unwrap();
assert_eq!(event.result, 2);
assert_eq!(event.execution.unwrap().seconds, 10);
#[cfg(windows)]
assert_eq!(event.exit_status, Some(2));
// `ExitStatus::from_raw` sets the signal not the code
#[cfg(unix)]
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, Some(String::from("stdout")));
assert_eq!(event.stderr, Some(String::from("stderr")));
let event = HealthCheckEvent::decode(events[3].payload()).unwrap();
assert_eq!(event.result, 3);
assert_eq!(event.execution.unwrap().seconds, 15);
#[cfg(windows)]
assert_eq!(event.exit_status, Some(3));
// `ExitStatus::from_raw` sets the signal not the code
#[cfg(unix)]
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, Some(String::from("stderr")));
}
}
|
{
// TODO (CM): Yeah... this is looking pretty gross. The
// intention is to be able to timestamp the events right as
// they go out.
//
// We *could* set the time when we convert the EventCore to a
// EventMetadata struct, but that seems odd.
//
// It probably means that this structure just isn't the right
// one.
//
// The ugliness is at least contained, though.
debug!("Publishing to event stream: event {:?} ", event);
event.event_metadata(EventMetadata { occurred_at:
Some(std::time::SystemTime::now().into()),
..EVENT_CORE.get().to_event_metadata() });
let packet = NatsMessage::new(subject, event.to_bytes());
stream.send(packet);
}
|
conditional_block
|
event.rs
|
//! Main interface for a stream of events the Supervisor can send out
//! in the course of its operations.
//!
//! Currently, the Supervisor is able to send events to a [NATS][1]
//! server. The `init_stream` function must be called
//! before sending events to initialize the publishing thread in the
//! background. Thereafter, you can pass "event" structs to the
//! `event` function, which will publish the event to the stream.
//!
//! All events are published under the "habitat" subject.
//!
//! [1]:https://github.com/nats-io/nats-server
mod error;
mod nats_message_stream;
mod types;
pub(crate) use self::types::ServiceMetadata;
use self::types::{EventMessage,
EventMetadata,
HealthCheckEvent,
ServiceStartedEvent,
ServiceStoppedEvent,
ServiceUpdateStartedEvent};
use crate::manager::{service::{HealthCheckHookStatus,
HealthCheckResult,
ProcessOutput,
Service,
StandardStreams},
sys::Sys};
pub use error::{Error,
Result};
use habitat_common::types::{EventStreamConnectMethod,
EventStreamMetadata,
EventStreamServerCertificate,
EventStreamToken};
use habitat_core::{package::ident::PackageIdent,
service::HealthCheckInterval};
use nats_message_stream::{NatsMessage,
NatsMessageStream};
use prost_types::Duration as ProstDuration;
use rants::{Address,
Subject};
use state::Storage;
use std::{net::SocketAddr,
time::Duration};
lazy_static! {
// TODO (CM): When const fn support lands in stable, we can ditch
// this lazy_static call.
// NATS subject names
static ref SERVICE_STARTED_SUBJECT: Subject =
"habitat.event.service_started".parse().expect("valid NATS subject");
static ref SERVICE_STOPPED_SUBJECT: Subject =
"habitat.event.service_stopped".parse().expect("valid NATS subject");
static ref SERVICE_UPDATE_STARTED_SUBJECT: Subject =
"habitat.event.service_update_started".parse().expect("valid NATS subject");
static ref HEALTHCHECK_SUBJECT: Subject =
"habitat.event.healthcheck".parse().expect("valid NATS subject");
/// Reference to the event stream.
static ref NATS_MESSAGE_STREAM: Storage<NatsMessageStream> = Storage::new();
/// Core information that is shared between all events.
static ref EVENT_CORE: Storage<EventCore> = Storage::new();
}
/// Starts a new task for sending events to a NATS Streaming
/// server. Stashes the handle to the stream, as well as the core
/// event information that will be a part of all events, in a global
/// static reference for access later.
pub async fn init(sys: &Sys, fqdn: String, config: EventStreamConfig) -> Result<()> {
// Only initialize once
if!initialized() {
let supervisor_id = sys.member_id.clone();
let ip_address = sys.gossip_listen();
let event_core = EventCore::new(&supervisor_id, ip_address, &fqdn, &config);
let stream = NatsMessageStream::new(&supervisor_id, config).await?;
NATS_MESSAGE_STREAM.set(stream);
EVENT_CORE.set(event_core);
}
Ok(())
}
/// Captures all event stream-related configuration options that would
/// be passed in by a user
// TODO (DM): The fields of this struct are only public for testing. We should refactor the crate
// layout so this can be avoided.
#[derive(Clone, Debug, PartialEq)]
pub struct EventStreamConfig {
pub environment: String,
pub application: String,
pub site: Option<String>,
pub meta: EventStreamMetadata,
pub token: EventStreamToken,
pub url: Address,
pub connect_method: EventStreamConnectMethod,
pub server_certificate: Option<EventStreamServerCertificate>,
}
/// Send an event for the start of a Service.
pub fn service_started(service: &Service) {
if initialized() {
publish(&SERVICE_STARTED_SUBJECT,
ServiceStartedEvent { service_metadata: Some(service.to_service_metadata()),
event_metadata: None, });
}
}
/// Send an event for the stop of a Service.
pub fn service_stopped(service: &Service) {
if initialized() {
publish(&SERVICE_STOPPED_SUBJECT,
ServiceStoppedEvent { service_metadata: Some(service.to_service_metadata()),
event_metadata: None, });
}
}
/// Send an event at the start of a Service update.
pub fn service_update_started(service: &Service, update: &PackageIdent) {
if initialized() {
publish(&SERVICE_UPDATE_STARTED_SUBJECT,
ServiceUpdateStartedEvent { event_metadata: None,
service_metadata:
Some(service.to_service_metadata()),
update_package_ident: update.clone().to_string(), });
}
}
// Takes metadata directly, rather than a `&Service` like other event
// functions, because of how the asynchronous health checking
// currently works. Revisit when async/await + Pin is all stabilized.
pub fn health_check(metadata: ServiceMetadata,
health_check_result: HealthCheckResult,
health_check_hook_status: HealthCheckHookStatus,
health_check_interval: HealthCheckInterval) {
if initialized() {
let health_check_result: types::HealthCheckResult = health_check_result.into();
let maybe_duration = health_check_hook_status.maybe_duration();
let maybe_process_output = health_check_hook_status.maybe_process_output();
let exit_status = maybe_process_output.as_ref()
.and_then(|o| o.exit_status().code());
let StandardStreams { stdout, stderr } =
maybe_process_output.map(ProcessOutput::standard_streams)
.unwrap_or_default();
let prost_interval = ProstDuration::from(Duration::from(health_check_interval));
publish(&HEALTHCHECK_SUBJECT,
HealthCheckEvent { service_metadata: Some(metadata),
event_metadata: None,
result: i32::from(health_check_result),
execution: maybe_duration.map(Duration::into),
exit_status,
stdout,
stderr,
interval: Some(prost_interval) });
}
}
////////////////////////////////////////////////////////////////////////
/// A collection of data that will be present in all events. Rather
/// than baking this into the structure of each event, we represent it
/// once and merge the information into the final rendered form of the
/// event.
///
/// This prevents us from having to thread information throughout the
/// system, just to get it to the places where the events are
/// generated (e.g., not all code has direct access to the
/// Supervisor's ID).
#[derive(Clone, Debug)]
struct EventCore {
/// The unique identifier of the Supervisor sending the event.
supervisor_id: String,
ip_address: SocketAddr,
fqdn: String,
application: String,
environment: String,
site: Option<String>,
meta: EventStreamMetadata,
}
impl EventCore {
fn new(supervisor_id: &str,
ip_address: SocketAddr,
fqdn: &str,
config: &EventStreamConfig)
-> Self {
EventCore { supervisor_id: String::from(supervisor_id),
ip_address,
fqdn: String::from(fqdn),
environment: config.environment.clone(),
application: config.application.clone(),
site: config.site.clone(),
meta: config.meta.clone() }
}
}
/// Internal helper function to know whether or not to go to the trouble of
/// creating event structures. If the event stream hasn't been
/// initialized, then we shouldn't need to do anything.
fn initialized() -> bool { NATS_MESSAGE_STREAM.try_get().is_some() }
/// Publish an event. This is the main interface that client code will
/// use.
///
/// If `init_stream` has not been called already, this function will
/// be a no-op.
fn publish(subject: &'static Subject, mut event: impl EventMessage) {
if let Some(stream) = NATS_MESSAGE_STREAM.try_get() {
// TODO (CM): Yeah... this is looking pretty gross. The
// intention is to be able to timestamp the events right as
// they go out.
//
// We *could* set the time when we convert the EventCore to a
// EventMetadata struct, but that seems odd.
//
// It probably means that this structure just isn't the right
// one.
//
// The ugliness is at least contained, though.
debug!("Publishing to event stream: event {:?} ", event);
event.event_metadata(EventMetadata { occurred_at:
Some(std::time::SystemTime::now().into()),
..EVENT_CORE.get().to_event_metadata() });
let packet = NatsMessage::new(subject, event.to_bytes());
stream.send(packet);
}
}
#[cfg(test)]
mod tests {
use super::{nats_message_stream::NatsMessageStream,
*};
use crate::prost::Message;
use futures::{channel::mpsc as futures_mpsc,
stream::StreamExt};
#[cfg(windows)]
use habitat_core::os::process::windows_child::ExitStatus;
use habitat_core::service::HealthCheckInterval;
#[cfg(unix)]
use std::{os::unix::process::ExitStatusExt,
process::ExitStatus};
#[tokio::test]
#[cfg(any(unix, windows))]
async fn health_check_event() {
let (tx, rx) = futures_mpsc::unbounded();
NATS_MESSAGE_STREAM.set(NatsMessageStream(tx));
EVENT_CORE.set(EventCore { supervisor_id: String::from("supervisor_id"),
ip_address: "127.0.0.1:8080".parse().unwrap(),
fqdn: String::from("fqdn"),
application: String::from("application"),
environment: String::from("environment"),
site: None,
meta: EventStreamMetadata::default(), });
health_check(ServiceMetadata::default(),
HealthCheckResult::Ok,
HealthCheckHookStatus::NoHook,
HealthCheckInterval::default());
health_check(ServiceMetadata::default(),
HealthCheckResult::Warning,
HealthCheckHookStatus::FailedToRun(Duration::from_secs(5)),
HealthCheckInterval::default());
#[cfg(windows)]
let exit_status = ExitStatus::from(2);
#[cfg(unix)]
let exit_status = ExitStatus::from_raw(2);
let process_output =
ProcessOutput::from_raw(StandardStreams { stdout: Some(String::from("stdout")),
stderr: Some(String::from("stderr")), },
exit_status);
health_check(ServiceMetadata::default(),
HealthCheckResult::Critical,
HealthCheckHookStatus::Ran(process_output, Duration::from_secs(10)),
HealthCheckInterval::default());
#[cfg(windows)]
let exit_status = ExitStatus::from(3);
#[cfg(unix)]
let exit_status = ExitStatus::from_raw(3);
let process_output =
ProcessOutput::from_raw(StandardStreams { stdout: None,
stderr: Some(String::from("stderr")), },
exit_status);
health_check(ServiceMetadata::default(),
HealthCheckResult::Unknown,
HealthCheckHookStatus::Ran(process_output, Duration::from_secs(15)),
HealthCheckInterval::default());
let events = rx.take(4).collect::<Vec<_>>().await;
let event = HealthCheckEvent::decode(events[0].payload()).unwrap();
assert_eq!(event.result, 0);
assert_eq!(event.execution, None);
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, None);
let default_interval = HealthCheckInterval::default();
let prost_interval = ProstDuration::from(Duration::from(default_interval));
let prost_interval_option = Some(prost_interval);
assert_eq!(event.interval, prost_interval_option);
let event = HealthCheckEvent::decode(events[1].payload()).unwrap();
assert_eq!(event.result, 1);
assert_eq!(event.execution.unwrap().seconds, 5);
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, None);
let event = HealthCheckEvent::decode(events[2].payload()).unwrap();
assert_eq!(event.result, 2);
assert_eq!(event.execution.unwrap().seconds, 10);
#[cfg(windows)]
assert_eq!(event.exit_status, Some(2));
// `ExitStatus::from_raw` sets the signal not the code
#[cfg(unix)]
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, Some(String::from("stdout")));
assert_eq!(event.stderr, Some(String::from("stderr")));
let event = HealthCheckEvent::decode(events[3].payload()).unwrap();
assert_eq!(event.result, 3);
assert_eq!(event.execution.unwrap().seconds, 15);
#[cfg(windows)]
|
assert_eq!(event.exit_status, Some(3));
// `ExitStatus::from_raw` sets the signal not the code
#[cfg(unix)]
assert_eq!(event.exit_status, None);
assert_eq!(event.stdout, None);
assert_eq!(event.stderr, Some(String::from("stderr")));
}
}
|
random_line_split
|
|
response.rs
|
//! Data structures containing information about the outcome of an executed query.
//!
//! This module implements the step "Receive responses" as described in [Writing RethinkDB drivers]
//! (http://rethinkdb.com/docs/writing-drivers/#receive-responses).
use std::io::{self, Read};
use std::marker::PhantomData;
use byteorder::{ReadBytesExt, LittleEndian};
use serde::de::{self, Deserialize, Deserializer};
use serde_json;
/// A response containing the queried data and information about the execution of the query.
///
/// `O` is the type of queried data.
#[derive(Debug)]
pub struct Response<O> {
/// Indicates which query this response corresponds to.
token: u64,
/// Describes the kind of response got and contains the respective data.
variant: Variant<O>,
/// The category an error belongs to.
error_kind: Option<ErrorKind>,
|
/// The profile if requested in the corresponding query.
profile: Option<Box<Profile>>,
/// The backtrace says where in the query the error occurred.
backtrace: Option<Vec<Frame>>,
}
impl<O> Response<O> {
/// Returns the token identifying this `Response`.
pub fn token(&self) -> u64 {
self.token
}
/// Returns the response data.
pub fn variant(&self) -> &Variant<O> {
&self.variant
}
/// Returns the category of a runtime error, if one occured.
pub fn error_kind(&self) -> &Option<ErrorKind> {
&self.error_kind
}
}
impl<O: Deserialize> Response<O> {
/// Deserializes a `Response` from a `Read`er.
///
/// # Failure
///
/// If an error occurs in deserialization or validation it is not guaranteed that this method
/// reads a syntactically and logically complete block of data. The `Read`er is in a corrupted
/// state and further reads will fail with spurious reasons.
pub fn deserialize<R: Read>(reader: &mut R) -> io::Result<Response<O>> {
debug!("Load response");
let token = try!(reader.read_u64::<LittleEndian>());
let len = try!(reader.read_u32::<LittleEndian>());
let mut raw_response = vec![0u8; len as usize];
try!(reader.read_exact(&mut raw_response));
let mut response: Response<O> = serde_json::de::from_reader(&raw_response[..]).unwrap();
response.token = token;
Ok(response)
}
}
impl<O: Deserialize> Deserialize for Response<O> {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Response<O>, D::Error> {
deserializer.deserialize(ResponseVisitor { data: PhantomData })
}
}
struct ResponseVisitor<O> {
data: PhantomData<O>,
}
impl<O: Deserialize> de::Visitor for ResponseVisitor<O> {
type Value = Response<O>;
fn visit_map<V: de::MapVisitor>(&mut self, mut visitor: V) -> Result<Response<O>, V::Error> {
// Fields as originally found in the wire protocol
field_visitor!(
enum Field {
"t" => ResponseType,
"n" => ResponseNotes,
"r" => Response,
"b" => Backtrace,
"p" => Profile,
"e" => ErrorKind,
},
FieldVisitor
);
let mut variant_discriminant = None;
let mut variant = None;
let mut notes = None;
let mut backtrace = None;
let mut profile = None;
let mut error_kind = None;
// If a field appears more than once the previous value will be overwritten.
while let Some(key) = try!(visitor.visit_key()) {
match key {
Field::ResponseType => variant_discriminant = Some(try!(visitor.visit_value())),
Field::Response => {
// The fields in a json encoded object are not guaranteed to be ordered.
if let Some(discriminant) = variant_discriminant {
// If the `ResponseType` field was already deserialized the target type of
// the `Response` field can be determined quickly.
variant = Some(try!(Variant::visit(discriminant, &mut visitor)));
} else {
// If the `ResponseType` field wasn't deserialized yet there is an ambiguity
// between errors sent as `String` and the actual response in case `O`
// equals `String`.
return Err(de::Error::custom(
"The response (field `r`) was received but the response type (field \
`t`) wasn't yet. Although that ordering is compliant with the JSON \
specification it's not supported currently."));
}
}
Field::Backtrace => backtrace = Some(try!(visitor.visit_value())),
Field::Profile => profile = Some(try!(visitor.visit_value())),
Field::ResponseNotes => notes = Some(try!(visitor.visit_value())),
Field::ErrorKind => error_kind = Some(try!(visitor.visit_value())),
}
}
try!(visitor.end());
let variant = try!(variant.ok_or(de::Error::missing_field("r")));
Ok(Response {
// The token isn't contained in the serialized data, zero is a placeholder.
token: 0,
variant: variant,
error_kind: error_kind,
notes: notes,
profile: profile,
backtrace: backtrace,
})
}
}
/// The queried data or an error message. Contains information about the state of the query.
#[derive(PartialEq, Debug)]
pub enum Variant<O> {
/// Query returned a single RQL datatype.
Atom(O),
/// Query returned a sequence of RQL datatypes.
Sequence(Vec<O>),
/// Query returned a partial sequence of RQL datatypes.
///
/// If you send a `Continue` query with the same token as this response you will get more of the
/// sequence. Keep sending `Continue` queries until you get back `Variant::Sequence`.
Partial(Vec<O>),
/// A `NoReplyWait` query completed.
WaitComplete,
/// Means the client is buggy. An example is if the client sends a malformed protobuf, or tries
/// to send `Continue` for an unknown token.
///
/// The list of error messages which are associated with `ClientError` includes:
///
/// * "Query size ({}) greater than maximum ({}).", ErrorKind::ResourceLimit
/// * "Client is buggy (failed to deserialize query).", ErrorKind::QueryLogic
ClientError(String),
/// Means the query failed due to a unrecognized optional argument or the wrong number of
/// arguments to a command.
CompileError(String),
/// Means the query failed at runtime, basically all errors unrelated to parsing and
/// compilation.
///
/// An example is if you add together two values from a table, but they turn out at runtime to
/// be booleans rather than numbers.
///
/// One of the error messages which are associated with `RuntimeError` is:
///
/// * "Response size ({}) greater than maximum ({}).", ErrorKind::ResourceLimit
RuntimeError(String),
}
impl<O: Deserialize> Variant<O> {
fn visit<V: de::MapVisitor>(kind_id: u8, mut visitor: V) -> Result<Self, V::Error> {
match kind_id {
1 => {
let (atom,) = try!(visitor.visit_value());
Ok(Variant::Atom(atom))
}
2 => {
let seq = try!(visitor.visit_value());
Ok(Variant::Sequence(seq))
}
3 => {
let seq = try!(visitor.visit_value());
Ok(Variant::Partial(seq))
}
4 => {
let ((),) = try!(visitor.visit_value());
Ok(Variant::WaitComplete)
}
16 => {
let (error,) = try!(visitor.visit_value());
Ok(Variant::ClientError(error))
}
17 => {
let (error,) = try!(visitor.visit_value());
Ok(Variant::CompileError(error))
}
18 => {
let (error,) = try!(visitor.visit_value());
Ok(Variant::RuntimeError(error))
}
id => Err(de::Error::custom(format!("Got an invalid `Variant` id \"{}\"", id))),
}
}
}
/// Categories of runtime errors.
///
/// If `Variant` is `RuntimeError`, this may be filled in with more information about the error.
#[derive(Eq, PartialEq, Debug)]
pub enum ErrorKind {
/// Query execution stopped due to an internal error, i.e. a server bug.
Internal,
/// Query execution caused a resource limit (for example, the array size limit) to be exceeded.
ResourceLimit,
/// The query contains a logical impossibility, such as adding a number to a string.
QueryLogic,
/// A `QueryLogic` error that results from accessing a non-existent field or something else that
/// can be handled with the `default` command.
NonExistence,
/// The operation has failed due to cluster state, configuration or table availability.
OpFailed,
/// The status of the operation cannot be verified due to cluster state, configuration or table
/// availability.
OpIndeterminate,
/// An error produced by the `error` command.
User,
}
impl ErrorKind {
fn from_id(id: u64) -> Option<ErrorKind> {
use self::ErrorKind::*;
match id {
1_000_000 => Some(Internal),
2_000_000 => Some(ResourceLimit),
3_000_000 => Some(QueryLogic),
3_100_000 => Some(NonExistence),
4_100_000 => Some(OpFailed),
4_200_000 => Some(OpIndeterminate),
5_000_000 => Some(User),
_ => None,
}
}
}
impl Deserialize for ErrorKind {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<ErrorKind, D::Error> {
deserializer.deserialize(ErrorKindVisitor)
}
}
struct ErrorKindVisitor;
impl de::Visitor for ErrorKindVisitor {
type Value = ErrorKind;
fn visit_u64<E: de::Error>(&mut self, value: u64) -> Result<Self::Value, E> {
ErrorKind::from_id(value).ok_or_else(|| de::Error::custom("Invalid `ErrorKind` id."))
}
}
/// `Note`s are used to provide futher information about the query response. Currently all the notes
/// indicate that a stream has certain special properties.
#[derive(Debug)]
enum Note {
/// The stream is a changefeed stream.
///
/// e.g. `r.table("test").changes()`
SequenceFeed,
/// The stream is a point changefeed stream.
///
/// e.g. `r.table("test").get(0).changes()`
AtomFeed,
/// The stream is an order_by_limit changefeed stream.
///
/// e.g. `r.table("test").order_by(index: "id").limit(5).changes()`
OrderByLimitFeed,
/// The stream is a union of multiple changefeed types that can't be collapsed to a single type.
///
/// e.g. `r.table("test").changes().union(r.table("test").get(0).changes())`
UnionedFeed,
/// The stream is a changefeed stream and includes notes on what state the changefeed stream is
/// in.
///
/// e.g. object of the form `{state: "initializing"}`
IncludesStates,
}
impl Note {
fn from_u64(num: u64) -> Option<Note> {
match num {
1 => Some(Note::SequenceFeed),
2 => Some(Note::AtomFeed),
3 => Some(Note::OrderByLimitFeed),
4 => Some(Note::UnionedFeed),
5 => Some(Note::IncludesStates),
_ => None,
}
}
}
impl Deserialize for Note {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Note, D::Error> {
deserializer.deserialize(NoteVisitor)
}
}
struct NoteVisitor;
impl de::Visitor for NoteVisitor {
type Value = Note;
fn visit_u64<E: de::Error>(&mut self, value: u64) -> Result<Note, E> {
Note::from_u64(value).ok_or_else(|| de::Error::custom("Invalid `Note` id."))
}
}
/// *Incomplete:* Information about the execution of a query.
#[derive(Debug)]
struct Profile {
description: String,
duration: f64,
subtasks: Vec<Profile>,
}
impl Deserialize for Profile {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Profile, D::Error> {
// Original fields as found in the wire protocol
field_visitor!(
enum Field {
"description" => Description,
"duration(ms)" => Duration,
"sub_tasks" => Subtasks,
"parallel_tasks" => ParallelTasks,
},
FieldVisitor
);
struct ProfileVisitor;
impl de::Visitor for ProfileVisitor {
type Value = Profile;
fn visit_seq<V>(&mut self, mut visitor: V) -> Result<Profile, V::Error>
where V: de::SeqVisitor
{
// There should be only one element in the sequence.
let datum = visitor.visit()
.and_then(|x| x.ok_or_else(|| de::Error::custom("malformed")));
try!(visitor.end());
datum
}
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Profile, V::Error>
where V: de::MapVisitor
{
let mut description = None;
let mut duration = None;
let mut subtasks = None;
let mut parallel_tasks: Option<Vec<Profile>> = None;
while let Some(key) = try!(visitor.visit_key()) {
match key {
Field::Description => description = Some(try!(visitor.visit_value())),
Field::Duration => duration = Some(try!(visitor.visit_value())),
Field::Subtasks => subtasks = Some(try!(visitor.visit_value())),
Field::ParallelTasks => parallel_tasks = Some(try!(visitor.visit_value())),
}
}
try!(visitor.end());
// TODO: The data model can't represent parallel tasks. Fix it!
if let Some(profile) = parallel_tasks {
Ok(Profile {
description: "Parallel tasks".into(),
duration: 0.0,
subtasks: profile,
})
} else {
let description = try!(description
.ok_or(de::Error::missing_field("description")));
let duration = try!(duration.ok_or(de::Error::missing_field("duration(ms)")));
let subtasks = try!(subtasks.ok_or(de::Error::missing_field("sub_tasks")));
Ok(Profile {
description: description,
duration: duration,
subtasks: subtasks,
})
}
}
}
deserializer.deserialize(ProfileVisitor)
}
}
/// Describes the position in a `Query` where an error was found.
#[derive(Debug)]
pub enum Frame {
/// Error occured in a positional argument.
Pos(u32),
/// Error occured in a named optional argument.
Opt(String),
}
impl Deserialize for Frame {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Frame, D::Error> {
deserializer.deserialize(FrameVisitor)
}
}
struct FrameVisitor;
impl de::Visitor for FrameVisitor {
type Value = Frame;
fn visit_u64<E: de::Error>(&mut self, v: u64) -> Result<Self::Value, E> {
Ok(Frame::Pos(v as u32))
}
fn visit_str<E: de::Error>(&mut self, v: &str) -> Result<Self::Value, E> {
Ok(Frame::Opt(v.into()))
}
fn visit_string<E: de::Error>(&mut self, v: String) -> Result<Self::Value, E> {
Ok(Frame::Opt(v))
}
}
|
/// Additional notes which indicate special properties the response has.
notes: Option<Vec<Note>>,
|
random_line_split
|
response.rs
|
//! Data structures containing information about the outcome of an executed query.
//!
//! This module implements the step "Receive responses" as described in [Writing RethinkDB drivers]
//! (http://rethinkdb.com/docs/writing-drivers/#receive-responses).
use std::io::{self, Read};
use std::marker::PhantomData;
use byteorder::{ReadBytesExt, LittleEndian};
use serde::de::{self, Deserialize, Deserializer};
use serde_json;
/// A response containing the queried data and information about the execution of the query.
///
/// `O` is the type of queried data.
#[derive(Debug)]
pub struct Response<O> {
/// Indicates which query this response corresponds to.
token: u64,
/// Describes the kind of response got and contains the respective data.
variant: Variant<O>,
/// The category an error belongs to.
error_kind: Option<ErrorKind>,
/// Additional notes which indicate special properties the response has.
notes: Option<Vec<Note>>,
/// The profile if requested in the corresponding query.
profile: Option<Box<Profile>>,
/// The backtrace says where in the query the error occurred.
backtrace: Option<Vec<Frame>>,
}
impl<O> Response<O> {
/// Returns the token identifying this `Response`.
pub fn token(&self) -> u64 {
self.token
}
/// Returns the response data.
pub fn variant(&self) -> &Variant<O> {
&self.variant
}
/// Returns the category of a runtime error, if one occured.
pub fn error_kind(&self) -> &Option<ErrorKind> {
&self.error_kind
}
}
impl<O: Deserialize> Response<O> {
/// Deserializes a `Response` from a `Read`er.
///
/// # Failure
///
/// If an error occurs in deserialization or validation it is not guaranteed that this method
/// reads a syntactically and logically complete block of data. The `Read`er is in a corrupted
/// state and further reads will fail with spurious reasons.
pub fn deserialize<R: Read>(reader: &mut R) -> io::Result<Response<O>> {
debug!("Load response");
let token = try!(reader.read_u64::<LittleEndian>());
let len = try!(reader.read_u32::<LittleEndian>());
let mut raw_response = vec![0u8; len as usize];
try!(reader.read_exact(&mut raw_response));
let mut response: Response<O> = serde_json::de::from_reader(&raw_response[..]).unwrap();
response.token = token;
Ok(response)
}
}
impl<O: Deserialize> Deserialize for Response<O> {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Response<O>, D::Error> {
deserializer.deserialize(ResponseVisitor { data: PhantomData })
}
}
struct ResponseVisitor<O> {
data: PhantomData<O>,
}
impl<O: Deserialize> de::Visitor for ResponseVisitor<O> {
type Value = Response<O>;
fn visit_map<V: de::MapVisitor>(&mut self, mut visitor: V) -> Result<Response<O>, V::Error> {
// Fields as originally found in the wire protocol
field_visitor!(
enum Field {
"t" => ResponseType,
"n" => ResponseNotes,
"r" => Response,
"b" => Backtrace,
"p" => Profile,
"e" => ErrorKind,
},
FieldVisitor
);
let mut variant_discriminant = None;
let mut variant = None;
let mut notes = None;
let mut backtrace = None;
let mut profile = None;
let mut error_kind = None;
// If a field appears more than once the previous value will be overwritten.
while let Some(key) = try!(visitor.visit_key()) {
match key {
Field::ResponseType => variant_discriminant = Some(try!(visitor.visit_value())),
Field::Response => {
// The fields in a json encoded object are not guaranteed to be ordered.
if let Some(discriminant) = variant_discriminant {
// If the `ResponseType` field was already deserialized the target type of
// the `Response` field can be determined quickly.
variant = Some(try!(Variant::visit(discriminant, &mut visitor)));
} else {
// If the `ResponseType` field wasn't deserialized yet there is an ambiguity
// between errors sent as `String` and the actual response in case `O`
// equals `String`.
return Err(de::Error::custom(
"The response (field `r`) was received but the response type (field \
`t`) wasn't yet. Although that ordering is compliant with the JSON \
specification it's not supported currently."));
}
}
Field::Backtrace => backtrace = Some(try!(visitor.visit_value())),
Field::Profile => profile = Some(try!(visitor.visit_value())),
Field::ResponseNotes => notes = Some(try!(visitor.visit_value())),
Field::ErrorKind => error_kind = Some(try!(visitor.visit_value())),
}
}
try!(visitor.end());
let variant = try!(variant.ok_or(de::Error::missing_field("r")));
Ok(Response {
// The token isn't contained in the serialized data, zero is a placeholder.
token: 0,
variant: variant,
error_kind: error_kind,
notes: notes,
profile: profile,
backtrace: backtrace,
})
}
}
/// The queried data or an error message. Contains information about the state of the query.
#[derive(PartialEq, Debug)]
pub enum Variant<O> {
/// Query returned a single RQL datatype.
Atom(O),
/// Query returned a sequence of RQL datatypes.
Sequence(Vec<O>),
/// Query returned a partial sequence of RQL datatypes.
///
/// If you send a `Continue` query with the same token as this response you will get more of the
/// sequence. Keep sending `Continue` queries until you get back `Variant::Sequence`.
Partial(Vec<O>),
/// A `NoReplyWait` query completed.
WaitComplete,
/// Means the client is buggy. An example is if the client sends a malformed protobuf, or tries
/// to send `Continue` for an unknown token.
///
/// The list of error messages which are associated with `ClientError` includes:
///
/// * "Query size ({}) greater than maximum ({}).", ErrorKind::ResourceLimit
/// * "Client is buggy (failed to deserialize query).", ErrorKind::QueryLogic
ClientError(String),
/// Means the query failed due to a unrecognized optional argument or the wrong number of
/// arguments to a command.
CompileError(String),
/// Means the query failed at runtime, basically all errors unrelated to parsing and
/// compilation.
///
/// An example is if you add together two values from a table, but they turn out at runtime to
/// be booleans rather than numbers.
///
/// One of the error messages which are associated with `RuntimeError` is:
///
/// * "Response size ({}) greater than maximum ({}).", ErrorKind::ResourceLimit
RuntimeError(String),
}
impl<O: Deserialize> Variant<O> {
fn visit<V: de::MapVisitor>(kind_id: u8, mut visitor: V) -> Result<Self, V::Error> {
match kind_id {
1 => {
let (atom,) = try!(visitor.visit_value());
Ok(Variant::Atom(atom))
}
2 => {
let seq = try!(visitor.visit_value());
Ok(Variant::Sequence(seq))
}
3 => {
let seq = try!(visitor.visit_value());
Ok(Variant::Partial(seq))
}
4 => {
let ((),) = try!(visitor.visit_value());
Ok(Variant::WaitComplete)
}
16 => {
let (error,) = try!(visitor.visit_value());
Ok(Variant::ClientError(error))
}
17 => {
let (error,) = try!(visitor.visit_value());
Ok(Variant::CompileError(error))
}
18 => {
let (error,) = try!(visitor.visit_value());
Ok(Variant::RuntimeError(error))
}
id => Err(de::Error::custom(format!("Got an invalid `Variant` id \"{}\"", id))),
}
}
}
/// Categories of runtime errors.
///
/// If `Variant` is `RuntimeError`, this may be filled in with more information about the error.
#[derive(Eq, PartialEq, Debug)]
pub enum ErrorKind {
/// Query execution stopped due to an internal error, i.e. a server bug.
Internal,
/// Query execution caused a resource limit (for example, the array size limit) to be exceeded.
ResourceLimit,
/// The query contains a logical impossibility, such as adding a number to a string.
QueryLogic,
/// A `QueryLogic` error that results from accessing a non-existent field or something else that
/// can be handled with the `default` command.
NonExistence,
/// The operation has failed due to cluster state, configuration or table availability.
OpFailed,
/// The status of the operation cannot be verified due to cluster state, configuration or table
/// availability.
OpIndeterminate,
/// An error produced by the `error` command.
User,
}
impl ErrorKind {
fn from_id(id: u64) -> Option<ErrorKind> {
use self::ErrorKind::*;
match id {
1_000_000 => Some(Internal),
2_000_000 => Some(ResourceLimit),
3_000_000 => Some(QueryLogic),
3_100_000 => Some(NonExistence),
4_100_000 => Some(OpFailed),
4_200_000 => Some(OpIndeterminate),
5_000_000 => Some(User),
_ => None,
}
}
}
impl Deserialize for ErrorKind {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<ErrorKind, D::Error> {
deserializer.deserialize(ErrorKindVisitor)
}
}
struct ErrorKindVisitor;
impl de::Visitor for ErrorKindVisitor {
type Value = ErrorKind;
fn visit_u64<E: de::Error>(&mut self, value: u64) -> Result<Self::Value, E> {
ErrorKind::from_id(value).ok_or_else(|| de::Error::custom("Invalid `ErrorKind` id."))
}
}
/// `Note`s are used to provide futher information about the query response. Currently all the notes
/// indicate that a stream has certain special properties.
#[derive(Debug)]
enum Note {
/// The stream is a changefeed stream.
///
/// e.g. `r.table("test").changes()`
SequenceFeed,
/// The stream is a point changefeed stream.
///
/// e.g. `r.table("test").get(0).changes()`
AtomFeed,
/// The stream is an order_by_limit changefeed stream.
///
/// e.g. `r.table("test").order_by(index: "id").limit(5).changes()`
OrderByLimitFeed,
/// The stream is a union of multiple changefeed types that can't be collapsed to a single type.
///
/// e.g. `r.table("test").changes().union(r.table("test").get(0).changes())`
UnionedFeed,
/// The stream is a changefeed stream and includes notes on what state the changefeed stream is
/// in.
///
/// e.g. object of the form `{state: "initializing"}`
IncludesStates,
}
impl Note {
fn
|
(num: u64) -> Option<Note> {
match num {
1 => Some(Note::SequenceFeed),
2 => Some(Note::AtomFeed),
3 => Some(Note::OrderByLimitFeed),
4 => Some(Note::UnionedFeed),
5 => Some(Note::IncludesStates),
_ => None,
}
}
}
impl Deserialize for Note {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Note, D::Error> {
deserializer.deserialize(NoteVisitor)
}
}
struct NoteVisitor;
impl de::Visitor for NoteVisitor {
type Value = Note;
fn visit_u64<E: de::Error>(&mut self, value: u64) -> Result<Note, E> {
Note::from_u64(value).ok_or_else(|| de::Error::custom("Invalid `Note` id."))
}
}
/// *Incomplete:* Information about the execution of a query.
#[derive(Debug)]
struct Profile {
description: String,
duration: f64,
subtasks: Vec<Profile>,
}
impl Deserialize for Profile {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Profile, D::Error> {
// Original fields as found in the wire protocol
field_visitor!(
enum Field {
"description" => Description,
"duration(ms)" => Duration,
"sub_tasks" => Subtasks,
"parallel_tasks" => ParallelTasks,
},
FieldVisitor
);
struct ProfileVisitor;
impl de::Visitor for ProfileVisitor {
type Value = Profile;
fn visit_seq<V>(&mut self, mut visitor: V) -> Result<Profile, V::Error>
where V: de::SeqVisitor
{
// There should be only one element in the sequence.
let datum = visitor.visit()
.and_then(|x| x.ok_or_else(|| de::Error::custom("malformed")));
try!(visitor.end());
datum
}
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Profile, V::Error>
where V: de::MapVisitor
{
let mut description = None;
let mut duration = None;
let mut subtasks = None;
let mut parallel_tasks: Option<Vec<Profile>> = None;
while let Some(key) = try!(visitor.visit_key()) {
match key {
Field::Description => description = Some(try!(visitor.visit_value())),
Field::Duration => duration = Some(try!(visitor.visit_value())),
Field::Subtasks => subtasks = Some(try!(visitor.visit_value())),
Field::ParallelTasks => parallel_tasks = Some(try!(visitor.visit_value())),
}
}
try!(visitor.end());
// TODO: The data model can't represent parallel tasks. Fix it!
if let Some(profile) = parallel_tasks {
Ok(Profile {
description: "Parallel tasks".into(),
duration: 0.0,
subtasks: profile,
})
} else {
let description = try!(description
.ok_or(de::Error::missing_field("description")));
let duration = try!(duration.ok_or(de::Error::missing_field("duration(ms)")));
let subtasks = try!(subtasks.ok_or(de::Error::missing_field("sub_tasks")));
Ok(Profile {
description: description,
duration: duration,
subtasks: subtasks,
})
}
}
}
deserializer.deserialize(ProfileVisitor)
}
}
/// Describes the position in a `Query` where an error was found.
#[derive(Debug)]
pub enum Frame {
/// Error occured in a positional argument.
Pos(u32),
/// Error occured in a named optional argument.
Opt(String),
}
impl Deserialize for Frame {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Frame, D::Error> {
deserializer.deserialize(FrameVisitor)
}
}
struct FrameVisitor;
impl de::Visitor for FrameVisitor {
type Value = Frame;
fn visit_u64<E: de::Error>(&mut self, v: u64) -> Result<Self::Value, E> {
Ok(Frame::Pos(v as u32))
}
fn visit_str<E: de::Error>(&mut self, v: &str) -> Result<Self::Value, E> {
Ok(Frame::Opt(v.into()))
}
fn visit_string<E: de::Error>(&mut self, v: String) -> Result<Self::Value, E> {
Ok(Frame::Opt(v))
}
}
|
from_u64
|
identifier_name
|
actions.rs
|
use chrono::NaiveDateTime;
use diesel::{dsl::exists, prelude::*, select};
use regex::Regex;
use std::str::FromStr;
use url::Url;
use crate::{
models::{Mention, NewMention},
DbError,
};
pub fn target_exists(url: &Url, conn: &PgConnection) -> Option<(String, i32)> {
use crate::schema::articles::dsl::{articles, id as art_id};
use crate::schema::likes::dsl::{id as lik_id, likes};
use crate::schema::notes::dsl::{id as not_id, notes};
use crate::schema::pictures::dsl::{id as pic_id, pictures};
let (object_type, obj_id) = match get_object_type_and_id(url) {
Some((object_type, obj_id)) => (object_type, obj_id),
None => return None,
};
let exists = match object_type.as_str() {
"articles" => select(exists(articles.filter(art_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"notes" => select(exists(notes.filter(not_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"pictures" => select(exists(pictures.filter(pic_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"likes" => select(exists(likes.filter(lik_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
_ => false,
};
if exists {
Some((object_type, obj_id))
} else {
None
}
}
pub fn get_object_type_and_id(url: &Url) -> Option<(String, i32)> {
let path = url.path();
let re = Regex::new(r"^/([^/]+)/(\d+)$").unwrap();
let rslt = re.captures(path);
if rslt.is_none() {
return None;
}
let caps = rslt.unwrap();
let object_type = caps.get(1);
let id_str = caps.get(2);
if object_type.is_none() || id_str.is_none() {
return None;
}
let object_type = object_type.unwrap().as_str();
let id: Result<i32, _> = FromStr::from_str(id_str.unwrap().as_str());
if id.is_err() {
return None;
}
Some((object_type.to_owned(), id.unwrap()))
}
pub fn mention_exists(source: &str, target: &str, conn: &PgConnection) -> bool {
use crate::schema::mentions::dsl::*;
select(exists(
mentions.filter(target_url.eq(target)).filter(source_url.eq(source)),
))
.get_result(conn)
.unwrap_or_else(|_| false)
}
pub fn
|
(
source_url: String,
target_url: String,
object_type: &str,
id: i32,
author: String,
title: String,
conn: &PgConnection,
) -> Result<Mention, DbError> {
use crate::schema::mentions;
let now = select(diesel::dsl::now).get_result::<NaiveDateTime>(conn)?;
let mut data = NewMention {
source_url,
target_url,
author,
title,
mention_type: "TODO:".to_owned(),
inserted_at: Some(now),
updated_at: Some(now),
..Default::default()
};
match object_type {
"note" => {
data.note_id = Some(id);
}
"picture" => {
data.picture_id = Some(id);
}
"article" => {
data.article_id = Some(id);
}
_ => {}
};
let mention = diesel::insert_into(mentions::table)
.values(data)
.get_result::<Mention>(conn)?;
Ok(mention)
}
|
create_mention
|
identifier_name
|
actions.rs
|
use chrono::NaiveDateTime;
use diesel::{dsl::exists, prelude::*, select};
use regex::Regex;
use std::str::FromStr;
use url::Url;
use crate::{
models::{Mention, NewMention},
DbError,
};
pub fn target_exists(url: &Url, conn: &PgConnection) -> Option<(String, i32)> {
use crate::schema::articles::dsl::{articles, id as art_id};
use crate::schema::likes::dsl::{id as lik_id, likes};
use crate::schema::notes::dsl::{id as not_id, notes};
use crate::schema::pictures::dsl::{id as pic_id, pictures};
let (object_type, obj_id) = match get_object_type_and_id(url) {
Some((object_type, obj_id)) => (object_type, obj_id),
None => return None,
};
let exists = match object_type.as_str() {
"articles" => select(exists(articles.filter(art_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"notes" => select(exists(notes.filter(not_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"pictures" => select(exists(pictures.filter(pic_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"likes" => select(exists(likes.filter(lik_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
_ => false,
};
if exists {
Some((object_type, obj_id))
} else {
None
}
}
pub fn get_object_type_and_id(url: &Url) -> Option<(String, i32)> {
let path = url.path();
let re = Regex::new(r"^/([^/]+)/(\d+)$").unwrap();
let rslt = re.captures(path);
|
if rslt.is_none() {
return None;
}
let caps = rslt.unwrap();
let object_type = caps.get(1);
let id_str = caps.get(2);
if object_type.is_none() || id_str.is_none() {
return None;
}
let object_type = object_type.unwrap().as_str();
let id: Result<i32, _> = FromStr::from_str(id_str.unwrap().as_str());
if id.is_err() {
return None;
}
Some((object_type.to_owned(), id.unwrap()))
}
pub fn mention_exists(source: &str, target: &str, conn: &PgConnection) -> bool {
use crate::schema::mentions::dsl::*;
select(exists(
mentions.filter(target_url.eq(target)).filter(source_url.eq(source)),
))
.get_result(conn)
.unwrap_or_else(|_| false)
}
pub fn create_mention(
source_url: String,
target_url: String,
object_type: &str,
id: i32,
author: String,
title: String,
conn: &PgConnection,
) -> Result<Mention, DbError> {
use crate::schema::mentions;
let now = select(diesel::dsl::now).get_result::<NaiveDateTime>(conn)?;
let mut data = NewMention {
source_url,
target_url,
author,
title,
mention_type: "TODO:".to_owned(),
inserted_at: Some(now),
updated_at: Some(now),
..Default::default()
};
match object_type {
"note" => {
data.note_id = Some(id);
}
"picture" => {
data.picture_id = Some(id);
}
"article" => {
data.article_id = Some(id);
}
_ => {}
};
let mention = diesel::insert_into(mentions::table)
.values(data)
.get_result::<Mention>(conn)?;
Ok(mention)
}
|
random_line_split
|
|
actions.rs
|
use chrono::NaiveDateTime;
use diesel::{dsl::exists, prelude::*, select};
use regex::Regex;
use std::str::FromStr;
use url::Url;
use crate::{
models::{Mention, NewMention},
DbError,
};
pub fn target_exists(url: &Url, conn: &PgConnection) -> Option<(String, i32)> {
use crate::schema::articles::dsl::{articles, id as art_id};
use crate::schema::likes::dsl::{id as lik_id, likes};
use crate::schema::notes::dsl::{id as not_id, notes};
use crate::schema::pictures::dsl::{id as pic_id, pictures};
let (object_type, obj_id) = match get_object_type_and_id(url) {
Some((object_type, obj_id)) => (object_type, obj_id),
None => return None,
};
let exists = match object_type.as_str() {
"articles" => select(exists(articles.filter(art_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"notes" => select(exists(notes.filter(not_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"pictures" => select(exists(pictures.filter(pic_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"likes" => select(exists(likes.filter(lik_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
_ => false,
};
if exists {
Some((object_type, obj_id))
} else {
None
}
}
pub fn get_object_type_and_id(url: &Url) -> Option<(String, i32)> {
let path = url.path();
let re = Regex::new(r"^/([^/]+)/(\d+)$").unwrap();
let rslt = re.captures(path);
if rslt.is_none() {
return None;
}
let caps = rslt.unwrap();
let object_type = caps.get(1);
let id_str = caps.get(2);
if object_type.is_none() || id_str.is_none() {
return None;
}
let object_type = object_type.unwrap().as_str();
let id: Result<i32, _> = FromStr::from_str(id_str.unwrap().as_str());
if id.is_err()
|
Some((object_type.to_owned(), id.unwrap()))
}
pub fn mention_exists(source: &str, target: &str, conn: &PgConnection) -> bool {
use crate::schema::mentions::dsl::*;
select(exists(
mentions.filter(target_url.eq(target)).filter(source_url.eq(source)),
))
.get_result(conn)
.unwrap_or_else(|_| false)
}
pub fn create_mention(
source_url: String,
target_url: String,
object_type: &str,
id: i32,
author: String,
title: String,
conn: &PgConnection,
) -> Result<Mention, DbError> {
use crate::schema::mentions;
let now = select(diesel::dsl::now).get_result::<NaiveDateTime>(conn)?;
let mut data = NewMention {
source_url,
target_url,
author,
title,
mention_type: "TODO:".to_owned(),
inserted_at: Some(now),
updated_at: Some(now),
..Default::default()
};
match object_type {
"note" => {
data.note_id = Some(id);
}
"picture" => {
data.picture_id = Some(id);
}
"article" => {
data.article_id = Some(id);
}
_ => {}
};
let mention = diesel::insert_into(mentions::table)
.values(data)
.get_result::<Mention>(conn)?;
Ok(mention)
}
|
{
return None;
}
|
conditional_block
|
actions.rs
|
use chrono::NaiveDateTime;
use diesel::{dsl::exists, prelude::*, select};
use regex::Regex;
use std::str::FromStr;
use url::Url;
use crate::{
models::{Mention, NewMention},
DbError,
};
pub fn target_exists(url: &Url, conn: &PgConnection) -> Option<(String, i32)> {
use crate::schema::articles::dsl::{articles, id as art_id};
use crate::schema::likes::dsl::{id as lik_id, likes};
use crate::schema::notes::dsl::{id as not_id, notes};
use crate::schema::pictures::dsl::{id as pic_id, pictures};
let (object_type, obj_id) = match get_object_type_and_id(url) {
Some((object_type, obj_id)) => (object_type, obj_id),
None => return None,
};
let exists = match object_type.as_str() {
"articles" => select(exists(articles.filter(art_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"notes" => select(exists(notes.filter(not_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"pictures" => select(exists(pictures.filter(pic_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
"likes" => select(exists(likes.filter(lik_id.eq(obj_id))))
.get_result(conn)
.unwrap_or_else(|_| false),
_ => false,
};
if exists {
Some((object_type, obj_id))
} else {
None
}
}
pub fn get_object_type_and_id(url: &Url) -> Option<(String, i32)>
|
let id: Result<i32, _> = FromStr::from_str(id_str.unwrap().as_str());
if id.is_err() {
return None;
}
Some((object_type.to_owned(), id.unwrap()))
}
pub fn mention_exists(source: &str, target: &str, conn: &PgConnection) -> bool {
use crate::schema::mentions::dsl::*;
select(exists(
mentions.filter(target_url.eq(target)).filter(source_url.eq(source)),
))
.get_result(conn)
.unwrap_or_else(|_| false)
}
pub fn create_mention(
source_url: String,
target_url: String,
object_type: &str,
id: i32,
author: String,
title: String,
conn: &PgConnection,
) -> Result<Mention, DbError> {
use crate::schema::mentions;
let now = select(diesel::dsl::now).get_result::<NaiveDateTime>(conn)?;
let mut data = NewMention {
source_url,
target_url,
author,
title,
mention_type: "TODO:".to_owned(),
inserted_at: Some(now),
updated_at: Some(now),
..Default::default()
};
match object_type {
"note" => {
data.note_id = Some(id);
}
"picture" => {
data.picture_id = Some(id);
}
"article" => {
data.article_id = Some(id);
}
_ => {}
};
let mention = diesel::insert_into(mentions::table)
.values(data)
.get_result::<Mention>(conn)?;
Ok(mention)
}
|
{
let path = url.path();
let re = Regex::new(r"^/([^/]+)/(\d+)$").unwrap();
let rslt = re.captures(path);
if rslt.is_none() {
return None;
}
let caps = rslt.unwrap();
let object_type = caps.get(1);
let id_str = caps.get(2);
if object_type.is_none() || id_str.is_none() {
return None;
}
let object_type = object_type.unwrap().as_str();
|
identifier_body
|
icmp.rs
|
use core::mem::size_of;
use core::option::Option;
use common::debug::*;
use common::vec::*;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct ICMPHeader {
pub _type: u8,
pub code: u8,
pub checksum: Checksum,
pub data: [u8; 4]
}
pub struct ICMP {
pub header: ICMPHeader,
pub data: Vec<u8>
}
impl FromBytes for ICMP {
fn from_bytes(bytes: Vec<u8>) -> Option<ICMP> {
if bytes.len() >= size_of::<ICMPHeader>() {
unsafe {
return Option::Some(ICMP {
header: *(bytes.as_ptr() as *const ICMPHeader),
data: bytes.sub(size_of::<ICMPHeader>(), bytes.len() - size_of::<ICMPHeader>())
});
}
}
return Option::None;
}
}
impl ToBytes for ICMP {
fn to_bytes(&self) -> Vec<u8> {
unsafe{
let header_ptr: *const ICMPHeader = &self.header;
let mut ret = Vec::from_raw_buf(header_ptr as *const u8, size_of::<ICMPHeader>());
ret.push_all(&self.data);
return ret;
}
}
}
impl ICMP {
pub fn d(&self)
|
}
|
{
d("ICMP ");
dbh(self.header._type);
d(" code ");
dbh(self.header.code);
d(" data ");
dd(self.data.len());
}
|
identifier_body
|
icmp.rs
|
use core::mem::size_of;
use core::option::Option;
use common::debug::*;
use common::vec::*;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct ICMPHeader {
pub _type: u8,
pub code: u8,
pub checksum: Checksum,
pub data: [u8; 4]
}
pub struct ICMP {
pub header: ICMPHeader,
pub data: Vec<u8>
}
|
unsafe {
return Option::Some(ICMP {
header: *(bytes.as_ptr() as *const ICMPHeader),
data: bytes.sub(size_of::<ICMPHeader>(), bytes.len() - size_of::<ICMPHeader>())
});
}
}
return Option::None;
}
}
impl ToBytes for ICMP {
fn to_bytes(&self) -> Vec<u8> {
unsafe{
let header_ptr: *const ICMPHeader = &self.header;
let mut ret = Vec::from_raw_buf(header_ptr as *const u8, size_of::<ICMPHeader>());
ret.push_all(&self.data);
return ret;
}
}
}
impl ICMP {
pub fn d(&self){
d("ICMP ");
dbh(self.header._type);
d(" code ");
dbh(self.header.code);
d(" data ");
dd(self.data.len());
}
}
|
impl FromBytes for ICMP {
fn from_bytes(bytes: Vec<u8>) -> Option<ICMP> {
if bytes.len() >= size_of::<ICMPHeader>() {
|
random_line_split
|
icmp.rs
|
use core::mem::size_of;
use core::option::Option;
use common::debug::*;
use common::vec::*;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct ICMPHeader {
pub _type: u8,
pub code: u8,
pub checksum: Checksum,
pub data: [u8; 4]
}
pub struct ICMP {
pub header: ICMPHeader,
pub data: Vec<u8>
}
impl FromBytes for ICMP {
fn from_bytes(bytes: Vec<u8>) -> Option<ICMP> {
if bytes.len() >= size_of::<ICMPHeader>() {
unsafe {
return Option::Some(ICMP {
header: *(bytes.as_ptr() as *const ICMPHeader),
data: bytes.sub(size_of::<ICMPHeader>(), bytes.len() - size_of::<ICMPHeader>())
});
}
}
return Option::None;
}
}
impl ToBytes for ICMP {
fn
|
(&self) -> Vec<u8> {
unsafe{
let header_ptr: *const ICMPHeader = &self.header;
let mut ret = Vec::from_raw_buf(header_ptr as *const u8, size_of::<ICMPHeader>());
ret.push_all(&self.data);
return ret;
}
}
}
impl ICMP {
pub fn d(&self){
d("ICMP ");
dbh(self.header._type);
d(" code ");
dbh(self.header.code);
d(" data ");
dd(self.data.len());
}
}
|
to_bytes
|
identifier_name
|
icmp.rs
|
use core::mem::size_of;
use core::option::Option;
use common::debug::*;
use common::vec::*;
use network::common::*;
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct ICMPHeader {
pub _type: u8,
pub code: u8,
pub checksum: Checksum,
pub data: [u8; 4]
}
pub struct ICMP {
pub header: ICMPHeader,
pub data: Vec<u8>
}
impl FromBytes for ICMP {
fn from_bytes(bytes: Vec<u8>) -> Option<ICMP> {
if bytes.len() >= size_of::<ICMPHeader>()
|
return Option::None;
}
}
impl ToBytes for ICMP {
fn to_bytes(&self) -> Vec<u8> {
unsafe{
let header_ptr: *const ICMPHeader = &self.header;
let mut ret = Vec::from_raw_buf(header_ptr as *const u8, size_of::<ICMPHeader>());
ret.push_all(&self.data);
return ret;
}
}
}
impl ICMP {
pub fn d(&self){
d("ICMP ");
dbh(self.header._type);
d(" code ");
dbh(self.header.code);
d(" data ");
dd(self.data.len());
}
}
|
{
unsafe {
return Option::Some(ICMP {
header: *(bytes.as_ptr() as *const ICMPHeader),
data: bytes.sub(size_of::<ICMPHeader>(), bytes.len() - size_of::<ICMPHeader>())
});
}
}
|
conditional_block
|
error.rs
|
//! The module defining custom leveldb error type.
use libc::{c_void, free};
use std;
/// A leveldb error, just containing the error string
/// provided by leveldb.
#[derive(Debug)]
pub struct Error {
message: String,
}
impl Error {
/// create a new Error, using the String provided
pub fn new(message: String) -> Error {
Error { message: message }
}
/// create an error from a c-string buffer.
pub fn new_from_i8(message: *const i8) -> Error
|
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "LevelDB error: {}", self.message)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.message
}
fn cause(&self) -> Option<&std::error::Error> {
None
}
}
|
{
use std::str::from_utf8;
use std::ffi::CStr;
let err_string =
unsafe { from_utf8(CStr::from_ptr(message).to_bytes()).unwrap().to_string() };
unsafe { free(message as *mut c_void) };
Error::new(err_string)
}
|
identifier_body
|
error.rs
|
//! The module defining custom leveldb error type.
use libc::{c_void, free};
use std;
/// A leveldb error, just containing the error string
/// provided by leveldb.
#[derive(Debug)]
pub struct Error {
message: String,
}
impl Error {
/// create a new Error, using the String provided
pub fn new(message: String) -> Error {
Error { message: message }
}
/// create an error from a c-string buffer.
pub fn new_from_i8(message: *const i8) -> Error {
use std::str::from_utf8;
use std::ffi::CStr;
let err_string =
unsafe { from_utf8(CStr::from_ptr(message).to_bytes()).unwrap().to_string() };
unsafe { free(message as *mut c_void) };
Error::new(err_string)
}
}
impl std::fmt::Display for Error {
fn
|
(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "LevelDB error: {}", self.message)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.message
}
fn cause(&self) -> Option<&std::error::Error> {
None
}
}
|
fmt
|
identifier_name
|
error.rs
|
//! The module defining custom leveldb error type.
use libc::{c_void, free};
use std;
/// A leveldb error, just containing the error string
/// provided by leveldb.
#[derive(Debug)]
pub struct Error {
message: String,
}
impl Error {
/// create a new Error, using the String provided
pub fn new(message: String) -> Error {
Error { message: message }
}
/// create an error from a c-string buffer.
pub fn new_from_i8(message: *const i8) -> Error {
use std::str::from_utf8;
use std::ffi::CStr;
let err_string =
unsafe { from_utf8(CStr::from_ptr(message).to_bytes()).unwrap().to_string() };
unsafe { free(message as *mut c_void) };
|
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "LevelDB error: {}", self.message)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.message
}
fn cause(&self) -> Option<&std::error::Error> {
None
}
}
|
Error::new(err_string)
}
}
|
random_line_split
|
process-status-inherits-stdin.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-cloudabi no processes
// ignore-emscripten no processes
use std::env;
use std::io;
use std::io::Write;
use std::process::{Command, Stdio};
fn main()
|
let mut s = String::new();
io::stdin().read_line(&mut s).unwrap();
assert_eq!(s, "foo\n");
}
}
}
|
{
let mut args = env::args();
let me = args.next().unwrap();
let arg = args.next();
match arg.as_ref().map(|s| &s[..]) {
None => {
let mut s = Command::new(&me)
.arg("a1")
.stdin(Stdio::piped())
.spawn()
.unwrap();
s.stdin.take().unwrap().write_all(b"foo\n").unwrap();
let s = s.wait().unwrap();
assert!(s.success());
}
Some("a1") => {
let s = Command::new(&me).arg("a2").status().unwrap();
assert!(s.success());
}
Some(..) => {
|
identifier_body
|
process-status-inherits-stdin.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-cloudabi no processes
// ignore-emscripten no processes
use std::env;
use std::io;
use std::io::Write;
use std::process::{Command, Stdio};
fn
|
() {
let mut args = env::args();
let me = args.next().unwrap();
let arg = args.next();
match arg.as_ref().map(|s| &s[..]) {
None => {
let mut s = Command::new(&me)
.arg("a1")
.stdin(Stdio::piped())
.spawn()
.unwrap();
s.stdin.take().unwrap().write_all(b"foo\n").unwrap();
let s = s.wait().unwrap();
assert!(s.success());
}
Some("a1") => {
let s = Command::new(&me).arg("a2").status().unwrap();
assert!(s.success());
}
Some(..) => {
let mut s = String::new();
io::stdin().read_line(&mut s).unwrap();
assert_eq!(s, "foo\n");
}
}
}
|
main
|
identifier_name
|
process-status-inherits-stdin.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-cloudabi no processes
// ignore-emscripten no processes
use std::env;
use std::io;
use std::io::Write;
use std::process::{Command, Stdio};
fn main() {
let mut args = env::args();
let me = args.next().unwrap();
let arg = args.next();
match arg.as_ref().map(|s| &s[..]) {
None => {
let mut s = Command::new(&me)
.arg("a1")
.stdin(Stdio::piped())
.spawn()
.unwrap();
s.stdin.take().unwrap().write_all(b"foo\n").unwrap();
let s = s.wait().unwrap();
assert!(s.success());
}
Some("a1") =>
|
Some(..) => {
let mut s = String::new();
io::stdin().read_line(&mut s).unwrap();
assert_eq!(s, "foo\n");
}
}
}
|
{
let s = Command::new(&me).arg("a2").status().unwrap();
assert!(s.success());
}
|
conditional_block
|
process-status-inherits-stdin.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-cloudabi no processes
// ignore-emscripten no processes
use std::env;
use std::io;
use std::io::Write;
use std::process::{Command, Stdio};
fn main() {
let mut args = env::args();
let me = args.next().unwrap();
let arg = args.next();
match arg.as_ref().map(|s| &s[..]) {
None => {
let mut s = Command::new(&me)
.arg("a1")
.stdin(Stdio::piped())
.spawn()
.unwrap();
s.stdin.take().unwrap().write_all(b"foo\n").unwrap();
let s = s.wait().unwrap();
assert!(s.success());
}
Some("a1") => {
let s = Command::new(&me).arg("a2").status().unwrap();
assert!(s.success());
}
Some(..) => {
let mut s = String::new();
io::stdin().read_line(&mut s).unwrap();
assert_eq!(s, "foo\n");
}
}
}
|
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
tokens.rs
|
use std::collections::HashMap;
#[allow(dead_code)]
#[derive(PartialEq, Eq, Debug, Clone, Hash)]
pub enum Keyword {
AND,
BREAK,
DO,
ELSE,
ELSEIF,
END,
FALSE,
FOR,
FUNCTION,
GOTO,
IF,
IN,
LOCAL,
NIL,
NOT,
OR,
REPEAT,
RETURN,
THEN,
TRUE,
UNTIL,
WHILE,
// + - * / % ^ #
// & ~ | << >> //
// == ~= <= >= < > =
// ( ) { } [ ] ::
// ; : , . .. ...
PLUS,
MINUS,
MUL,
DIV,
MOD,
POW,
HASH,
SAND,
TILDA,
SOR,
SHLEFT,
SHRIGHT,
FLOORDIV,
EQ,
NEQ,
LEQ,
GEQ,
LESS,
GREATER,
ASSIGN,
LBRACE,
RBRACE,
LCBRACKET,
RCBRACKET,
LSBRACKET,
RSBRACKET,
PATH,
COLONS,
SEMICOLONS,
COMMA,
DOT,
DOT2,
DOT3,
}
impl Keyword {
// unop ::= ‘-’ | not | ‘#’ | ‘~’
pub fn is_unop(&self) -> bool {
match *self {
Keyword::MINUS | Keyword::NOT | Keyword::HASH | Keyword::TILDA => true,
_ => false,
}
}
// binop ::= ‘+’ | ‘-’ | ‘*’ | ‘/’ | ‘//’ | ‘^’ | ‘%’ |
// ‘&’ | ‘~’ | ‘|’ | ‘>>’ | ‘<<’ | ‘..’ |
// ‘<’ | ‘<=’ | ‘>’ | ‘>=’ | ‘==’ | ‘~=’ |
// and | or
pub fn is_binop(&self) -> bool {
match *self {
Keyword::PLUS
| Keyword::MINUS
| Keyword::MUL
| Keyword::DIV
| Keyword::POW
| Keyword::MOD
| Keyword::SAND
| Keyword::TILDA
| Keyword::SOR
| Keyword::SHRIGHT
| Keyword::SHLEFT
| Keyword::FLOORDIV
| Keyword::DOT2
| Keyword::LESS
| Keyword::LEQ
| Keyword::GREATER
| Keyword::GEQ
| Keyword::EQ
| Keyword::NEQ
| Keyword::AND
| Keyword::OR => true,
_ => false,
}
}
}
impl PartialEq<Token> for Keyword {
fn eq(&self, token: &Token) -> bool {
token.token == TokenType::from(self.clone())
}
}
pub fn get_token_table() -> HashMap<String, Keyword> {
string_hash_map![
("and", Keyword::AND),
("break", Keyword::BREAK),
("do", Keyword::DO),
("else", Keyword::ELSE),
("elseif", Keyword::ELSEIF),
("end", Keyword::END),
("false", Keyword::FALSE),
("for", Keyword::FOR),
("function", Keyword::FUNCTION),
("goto", Keyword::GOTO),
("if", Keyword::IF),
("in", Keyword::IN),
("local", Keyword::LOCAL),
("nil", Keyword::NIL),
("not", Keyword::NOT),
("or", Keyword::OR),
("repeat", Keyword::REPEAT),
("return", Keyword::RETURN),
("then", Keyword::THEN),
("true", Keyword::TRUE),
("until", Keyword::UNTIL),
("while", Keyword::WHILE),
]
}
pub fn get_operator_table() -> HashMap<String, Keyword> {
string_hash_map![
("+", Keyword::PLUS),
("-", Keyword::MINUS),
("*", Keyword::MUL),
("/", Keyword::DIV),
("%", Keyword::MOD),
("^", Keyword::POW),
("#", Keyword::HASH),
("&", Keyword::SAND),
("~", Keyword::TILDA),
("|", Keyword::SOR),
("<<", Keyword::SHLEFT),
(">>", Keyword::SHRIGHT),
("//", Keyword::FLOORDIV),
("==", Keyword::EQ),
("~=", Keyword::NEQ),
("<=", Keyword::LEQ),
(">=", Keyword::GEQ),
("<", Keyword::LESS),
(">", Keyword::GREATER),
("=", Keyword::ASSIGN),
("(", Keyword::LBRACE),
(")", Keyword::RBRACE),
("[", Keyword::LSBRACKET),
("]", Keyword::RSBRACKET),
("{", Keyword::LCBRACKET),
("}", Keyword::RCBRACKET),
("::", Keyword::PATH),
(";", Keyword::SEMICOLONS),
(":", Keyword::COLONS),
(",", Keyword::COMMA),
(".", Keyword::DOT),
("..", Keyword::DOT2),
("...", Keyword::DOT3),
]
}
#[derive(PartialEq, Debug, Clone)]
pub enum TokenType {
Keyword(Keyword),
Id(String),
String(String),
Number(f64),
None,
}
impl From<Keyword> for TokenType {
fn from(keyword: Keyword) -> Self {
TokenType::Keyword(keyword)
}
}
#[derive(PartialEq, Debug, Clone)]
pub struct Token {
pub token: TokenType,
pub row: usize,
pub column: usize,
}
impl Token {
pub fn new(token: TokenType, row: usize, column: usize) -> Token {
Token { token, row, column }
}
pub fn eof() -> Token {
Token::new(TokenType::None, 0, 0)
}
pub fn id(&self) -> Option<String> {
match self.token {
TokenType::Id(ref id) => Some(id.clone()),
|
TokenType::Keyword(ref keyword) => Some(keyword.clone()),
_ => None,
}
}
}
impl Into<TokenType> for Token {
fn into(self) -> TokenType {
self.token.clone()
}
}
|
_ => None,
}
}
pub fn keyword(&self) -> Option<Keyword> {
match self.token {
|
identifier_body
|
tokens.rs
|
use std::collections::HashMap;
#[allow(dead_code)]
#[derive(PartialEq, Eq, Debug, Clone, Hash)]
pub enum Keyword {
AND,
BREAK,
DO,
ELSE,
ELSEIF,
END,
FALSE,
FOR,
FUNCTION,
GOTO,
IF,
IN,
LOCAL,
NIL,
NOT,
OR,
REPEAT,
RETURN,
THEN,
TRUE,
UNTIL,
WHILE,
// + - * / % ^ #
// & ~ | << >> //
// == ~= <= >= < > =
// ( ) { } [ ] ::
// ; : , . .. ...
PLUS,
MINUS,
MUL,
DIV,
MOD,
POW,
HASH,
SAND,
TILDA,
SOR,
SHLEFT,
SHRIGHT,
FLOORDIV,
EQ,
NEQ,
LEQ,
GEQ,
LESS,
GREATER,
ASSIGN,
LBRACE,
RBRACE,
LCBRACKET,
RCBRACKET,
LSBRACKET,
RSBRACKET,
PATH,
COLONS,
SEMICOLONS,
COMMA,
DOT,
DOT2,
DOT3,
}
impl Keyword {
// unop ::= ‘-’ | not | ‘#’ | ‘~’
pub fn is_unop(&self) -> bool {
match *self {
Keyword::MINUS | Keyword::NOT | Keyword::HASH | Keyword::TILDA => true,
_ => false,
}
}
// binop ::= ‘+’ | ‘-’ | ‘*’ | ‘/’ | ‘//’ | ‘^’ | ‘%’ |
// ‘&’ | ‘~’ | ‘|’ | ‘>>’ | ‘<<’ | ‘..’ |
// ‘<’ | ‘<=’ | ‘>’ | ‘>=’ | ‘==’ | ‘~=’ |
// and | or
pub fn is_binop(&self) -> bool {
match *self {
Keyword::PLUS
| Keyword::MINUS
| Keyword::MUL
| Keyword::DIV
| Keyword::POW
| Keyword::MOD
| Keyword::SAND
| Keyword::TILDA
| Keyword::SOR
| Keyword::SHRIGHT
| Keyword::SHLEFT
| Keyword::FLOORDIV
| Keyword::DOT2
| Keyword::LESS
| Keyword::LEQ
| Keyword::GREATER
| Keyword::GEQ
| Keyword::EQ
| Keyword::NEQ
| Keyword::AND
| Keyword::OR => true,
_ => false,
}
}
}
impl PartialEq<Token> for Keyword {
fn eq(&self, token: &Token) -> bool {
token.token == TokenType::from(self.clone())
}
}
pub fn get_token_table() -> HashMap<String, Keyword> {
string_hash_map![
("and", Ke
|
("break", Keyword::BREAK),
("do", Keyword::DO),
("else", Keyword::ELSE),
("elseif", Keyword::ELSEIF),
("end", Keyword::END),
("false", Keyword::FALSE),
("for", Keyword::FOR),
("function", Keyword::FUNCTION),
("goto", Keyword::GOTO),
("if", Keyword::IF),
("in", Keyword::IN),
("local", Keyword::LOCAL),
("nil", Keyword::NIL),
("not", Keyword::NOT),
("or", Keyword::OR),
("repeat", Keyword::REPEAT),
("return", Keyword::RETURN),
("then", Keyword::THEN),
("true", Keyword::TRUE),
("until", Keyword::UNTIL),
("while", Keyword::WHILE),
]
}
pub fn get_operator_table() -> HashMap<String, Keyword> {
string_hash_map![
("+", Keyword::PLUS),
("-", Keyword::MINUS),
("*", Keyword::MUL),
("/", Keyword::DIV),
("%", Keyword::MOD),
("^", Keyword::POW),
("#", Keyword::HASH),
("&", Keyword::SAND),
("~", Keyword::TILDA),
("|", Keyword::SOR),
("<<", Keyword::SHLEFT),
(">>", Keyword::SHRIGHT),
("//", Keyword::FLOORDIV),
("==", Keyword::EQ),
("~=", Keyword::NEQ),
("<=", Keyword::LEQ),
(">=", Keyword::GEQ),
("<", Keyword::LESS),
(">", Keyword::GREATER),
("=", Keyword::ASSIGN),
("(", Keyword::LBRACE),
(")", Keyword::RBRACE),
("[", Keyword::LSBRACKET),
("]", Keyword::RSBRACKET),
("{", Keyword::LCBRACKET),
("}", Keyword::RCBRACKET),
("::", Keyword::PATH),
(";", Keyword::SEMICOLONS),
(":", Keyword::COLONS),
(",", Keyword::COMMA),
(".", Keyword::DOT),
("..", Keyword::DOT2),
("...", Keyword::DOT3),
]
}
#[derive(PartialEq, Debug, Clone)]
pub enum TokenType {
Keyword(Keyword),
Id(String),
String(String),
Number(f64),
None,
}
impl From<Keyword> for TokenType {
fn from(keyword: Keyword) -> Self {
TokenType::Keyword(keyword)
}
}
#[derive(PartialEq, Debug, Clone)]
pub struct Token {
pub token: TokenType,
pub row: usize,
pub column: usize,
}
impl Token {
pub fn new(token: TokenType, row: usize, column: usize) -> Token {
Token { token, row, column }
}
pub fn eof() -> Token {
Token::new(TokenType::None, 0, 0)
}
pub fn id(&self) -> Option<String> {
match self.token {
TokenType::Id(ref id) => Some(id.clone()),
_ => None,
}
}
pub fn keyword(&self) -> Option<Keyword> {
match self.token {
TokenType::Keyword(ref keyword) => Some(keyword.clone()),
_ => None,
}
}
}
impl Into<TokenType> for Token {
fn into(self) -> TokenType {
self.token.clone()
}
}
|
yword::AND),
|
identifier_name
|
tokens.rs
|
use std::collections::HashMap;
#[allow(dead_code)]
#[derive(PartialEq, Eq, Debug, Clone, Hash)]
pub enum Keyword {
AND,
BREAK,
DO,
ELSE,
ELSEIF,
END,
FALSE,
FOR,
FUNCTION,
GOTO,
IF,
IN,
LOCAL,
NIL,
NOT,
OR,
REPEAT,
RETURN,
THEN,
TRUE,
UNTIL,
WHILE,
// + - * / % ^ #
// & ~ | << >> //
// == ~= <= >= < > =
// ( ) { } [ ] ::
// ; : , . .. ...
PLUS,
MINUS,
MUL,
DIV,
MOD,
POW,
HASH,
SAND,
TILDA,
SOR,
SHLEFT,
SHRIGHT,
FLOORDIV,
EQ,
NEQ,
LEQ,
GEQ,
LESS,
GREATER,
ASSIGN,
LBRACE,
RBRACE,
LCBRACKET,
RCBRACKET,
LSBRACKET,
RSBRACKET,
PATH,
COLONS,
SEMICOLONS,
COMMA,
DOT,
DOT2,
DOT3,
}
impl Keyword {
// unop ::= ‘-’ | not | ‘#’ | ‘~’
pub fn is_unop(&self) -> bool {
match *self {
Keyword::MINUS | Keyword::NOT | Keyword::HASH | Keyword::TILDA => true,
_ => false,
}
}
// binop ::= ‘+’ | ‘-’ | ‘*’ | ‘/’ | ‘//’ | ‘^’ | ‘%’ |
// ‘&’ | ‘~’ | ‘|’ | ‘>>’ | ‘<<’ | ‘..’ |
// ‘<’ | ‘<=’ | ‘>’ | ‘>=’ | ‘==’ | ‘~=’ |
// and | or
pub fn is_binop(&self) -> bool {
match *self {
Keyword::PLUS
| Keyword::MINUS
| Keyword::MUL
| Keyword::DIV
| Keyword::POW
| Keyword::MOD
| Keyword::SAND
| Keyword::TILDA
| Keyword::SOR
| Keyword::SHRIGHT
| Keyword::SHLEFT
| Keyword::FLOORDIV
| Keyword::DOT2
| Keyword::LESS
| Keyword::LEQ
| Keyword::GREATER
| Keyword::GEQ
| Keyword::EQ
| Keyword::NEQ
| Keyword::AND
| Keyword::OR => true,
_ => false,
}
}
}
impl PartialEq<Token> for Keyword {
fn eq(&self, token: &Token) -> bool {
token.token == TokenType::from(self.clone())
}
}
pub fn get_token_table() -> HashMap<String, Keyword> {
string_hash_map![
("and", Keyword::AND),
("break", Keyword::BREAK),
("do", Keyword::DO),
("else", Keyword::ELSE),
("elseif", Keyword::ELSEIF),
("end", Keyword::END),
("false", Keyword::FALSE),
("for", Keyword::FOR),
("function", Keyword::FUNCTION),
("goto", Keyword::GOTO),
("if", Keyword::IF),
("in", Keyword::IN),
("local", Keyword::LOCAL),
("nil", Keyword::NIL),
("not", Keyword::NOT),
("or", Keyword::OR),
("repeat", Keyword::REPEAT),
("return", Keyword::RETURN),
("then", Keyword::THEN),
("true", Keyword::TRUE),
("until", Keyword::UNTIL),
("while", Keyword::WHILE),
]
}
pub fn get_operator_table() -> HashMap<String, Keyword> {
string_hash_map![
("+", Keyword::PLUS),
("-", Keyword::MINUS),
("*", Keyword::MUL),
("/", Keyword::DIV),
("%", Keyword::MOD),
("^", Keyword::POW),
("#", Keyword::HASH),
("&", Keyword::SAND),
("~", Keyword::TILDA),
("|", Keyword::SOR),
("<<", Keyword::SHLEFT),
(">>", Keyword::SHRIGHT),
("//", Keyword::FLOORDIV),
("==", Keyword::EQ),
("~=", Keyword::NEQ),
("<=", Keyword::LEQ),
(">=", Keyword::GEQ),
("<", Keyword::LESS),
(">", Keyword::GREATER),
("=", Keyword::ASSIGN),
|
("[", Keyword::LSBRACKET),
("]", Keyword::RSBRACKET),
("{", Keyword::LCBRACKET),
("}", Keyword::RCBRACKET),
("::", Keyword::PATH),
(";", Keyword::SEMICOLONS),
(":", Keyword::COLONS),
(",", Keyword::COMMA),
(".", Keyword::DOT),
("..", Keyword::DOT2),
("...", Keyword::DOT3),
]
}
#[derive(PartialEq, Debug, Clone)]
pub enum TokenType {
Keyword(Keyword),
Id(String),
String(String),
Number(f64),
None,
}
impl From<Keyword> for TokenType {
fn from(keyword: Keyword) -> Self {
TokenType::Keyword(keyword)
}
}
#[derive(PartialEq, Debug, Clone)]
pub struct Token {
pub token: TokenType,
pub row: usize,
pub column: usize,
}
impl Token {
pub fn new(token: TokenType, row: usize, column: usize) -> Token {
Token { token, row, column }
}
pub fn eof() -> Token {
Token::new(TokenType::None, 0, 0)
}
pub fn id(&self) -> Option<String> {
match self.token {
TokenType::Id(ref id) => Some(id.clone()),
_ => None,
}
}
pub fn keyword(&self) -> Option<Keyword> {
match self.token {
TokenType::Keyword(ref keyword) => Some(keyword.clone()),
_ => None,
}
}
}
impl Into<TokenType> for Token {
fn into(self) -> TokenType {
self.token.clone()
}
}
|
("(", Keyword::LBRACE),
(")", Keyword::RBRACE),
|
random_line_split
|
fns.rs
|
//! Random utility Lisp functions.
use std::{ptr, slice};
use libc;
use remacs_macros::lisp_fn;
use crate::{
casefiddle::downcase,
dispnew::{ding, sleep_for},
eval::{record_unwind_protect, un_autoload, unbind_to},
lisp::LispObject,
lists::{assq, car, get, mapcar1, member, memq, put},
lists::{LispCons, LispConsCircularChecks, LispConsEndChecks},
minibuf::read_from_minibuffer,
multibyte::{string_char_and_length, write_codepoint, LispStringRef},
numbers::LispNumber,
obarray::loadhist_attach,
objects::equal,
remacs_sys::Vautoload_queue,
remacs_sys::{
concat as lisp_concat, globals, make_uninit_bool_vector, make_uninit_multibyte_string,
make_uninit_string, make_uninit_vector, message1, redisplay_preserve_echo_area,
},
remacs_sys::{EmacsInt, Lisp_Type},
remacs_sys::{Fdiscard_input, Fload, Fx_popup_dialog},
remacs_sys::{
Qfuncall, Qlistp, Qnil, Qprovide, Qquote, Qrequire, Qsequencep, Qsubfeatures, Qt,
Qyes_or_no_p_history,
},
symbols::LispSymbolRef,
threads::c_specpdl_index,
vectors::length,
};
/// Return t if FEATURE is present in this Emacs.
///
/// Use this to conditionalize execution of lisp code based on the
/// presence or absence of Emacs or environment extensions.
/// Use `provide' to declare that a feature is available. This function
/// looks at the value of the variable `features'. The optional argument
/// SUBFEATURE can be used to check a specific subfeature of FEATURE.
#[lisp_fn(min = "1")]
pub fn featurep(feature: LispSymbolRef, subfeature: LispObject) -> bool {
let mut tem = memq(feature.into(), unsafe { globals.Vfeatures });
if tem.is_not_nil() && subfeature.is_not_nil() {
tem = member(subfeature, get(feature, Qsubfeatures));
}
tem.is_not_nil()
}
/// Announce that FEATURE is a feature of the current Emacs.
/// The optional argument SUBFEATURES should be a list of symbols listing
/// particular subfeatures supported in this version of FEATURE.
#[lisp_fn(min = "1")]
pub fn provide(feature: LispSymbolRef, subfeature: LispObject) -> LispObject {
if!subfeature.is_list() {
wrong_type!(Qlistp, subfeature)
}
unsafe {
if Vautoload_queue.is_not_nil() {
Vautoload_queue = ((0, globals.Vfeatures), Vautoload_queue).into();
}
}
if memq(feature.into(), unsafe { globals.Vfeatures }).is_nil() {
unsafe {
globals.Vfeatures = (feature, globals.Vfeatures).into();
}
}
if subfeature.is_not_nil() {
put(feature, Qsubfeatures, subfeature);
}
unsafe {
globals.Vcurrent_load_list = ((Qprovide, feature), globals.Vcurrent_load_list).into();
}
// Run any load-hooks for this file.
unsafe {
if let Some((_, d)) = assq(feature.into(), globals.Vafter_load_alist).into() {
Fmapc(Qfuncall, d);
}
}
feature.into()
}
/// Return the argument, without evaluating it. `(quote x)' yields `x'.
/// Warning: `quote' does not construct its return value, but just returns
/// the value that was pre-constructed by the Lisp reader (see info node
/// `(elisp)Printed Representation').
/// This means that \\='(a. b) is not identical to (cons \\='a \\='b): the former
/// does not cons. Quoting should be reserved for constants that will
/// never be modified by side-effects, unless you like self-modifying code.
/// See the common pitfall in info node `(elisp)Rearrangement' for an example
/// of unexpected results when a quoted object is modified.
/// usage: (quote ARG)
#[lisp_fn(unevalled = "true")]
pub fn quote(args: LispCons) -> LispObject {
if args.cdr().is_not_nil() {
wrong_number_of_arguments!(Qquote, args.length());
}
args.car()
}
/// Apply FUNCTION to each element of SEQUENCE, and make a list of the
/// results. The result is a list just as long as SEQUENCE. SEQUENCE
/// may be a list, a vector, a bool-vector, or a string.
#[lisp_fn]
pub fn mapc(function: LispObject, sequence: LispObject) -> LispObject {
let leni = length(sequence) as EmacsInt;
if sequence.is_char_table()
|
mapcar1(leni, ptr::null_mut(), function, sequence);
sequence
}
/* List of features currently being require'd, innermost first. */
declare_GC_protected_static!(require_nesting_list, Qnil);
unsafe extern "C" fn require_unwind(old_value: LispObject) {
require_nesting_list = old_value;
}
/// If feature FEATURE is not loaded, load it from FILENAME.
/// If FEATURE is not a member of the list `features', then the feature is
/// not loaded; so load the file FILENAME.
///
/// If FILENAME is omitted, the printname of FEATURE is used as the file
/// name, and `load' will try to load this name appended with the suffix
/// `.elc', `.el', or the system-dependent suffix for dynamic module
/// files, in that order. The name without appended suffix will not be
/// used. See `get-load-suffixes' for the complete list of suffixes.
///
/// The directories in `load-path' are searched when trying to find the
/// file name.
///
/// If the optional third argument NOERROR is non-nil, then return nil if
/// the file is not found instead of signaling an error. Normally the
/// return value is FEATURE.
///
/// The normal messages at start and end of loading FILENAME are
/// suppressed.
#[lisp_fn(min = "1")]
pub fn require(feature: LispObject, filename: LispObject, noerror: LispObject) -> LispObject {
let feature_sym: LispSymbolRef = feature.into();
let current_load_list = unsafe { globals.Vcurrent_load_list };
// Record the presence of `require' in this file
// even if the feature specified is already loaded.
// But not more than once in any file,
// and not when we aren't loading or reading from a file.
let from_file = unsafe { globals.load_in_progress }
|| current_load_list
.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::off)
.any(|elt| elt.cdr().is_nil() && elt.car().is_string());
if from_file {
let tem = (Qrequire, feature).into();
if member(tem, current_load_list).is_nil() {
loadhist_attach(tem);
}
}
if memq(feature, unsafe { globals.Vfeatures }).is_not_nil() {
return feature;
}
let count = c_specpdl_index();
// This is to make sure that loadup.el gives a clear picture
// of what files are preloaded and when.
if unsafe { globals.Vpurify_flag.is_not_nil() } {
error!(
"(require {}) while preparing to dump",
feature_sym.symbol_name()
);
}
// A certain amount of recursive `require' is legitimate,
// but if we require the same feature recursively 3 times,
// signal an error.
let nesting = unsafe { require_nesting_list }
.iter_cars(LispConsEndChecks::off, LispConsCircularChecks::off)
.filter(|elt| equal(feature, *elt))
.count();
if nesting > 3 {
error!(
"Recursive `require' for feature `{}'",
feature_sym.symbol_name()
);
}
unsafe {
// Update the list for any nested `require's that occur.
record_unwind_protect(Some(require_unwind), require_nesting_list);
require_nesting_list = (feature, require_nesting_list).into();
// Value saved here is to be restored into Vautoload_queue
record_unwind_protect(Some(un_autoload), Vautoload_queue);
Vautoload_queue = Qt;
// Load the file.
let tem = Fload(
if filename.is_nil() {
feature_sym.symbol_name()
} else {
filename
},
noerror,
Qt,
Qnil,
filename.is_nil().into(),
);
// If load failed entirely, return nil.
if tem.is_nil() {
return unbind_to(count, Qnil);
}
}
let tem = memq(feature, unsafe { globals.Vfeatures });
if tem.is_nil() {
let tem3 = car(car(unsafe { globals.Vload_history }));
if tem3.is_nil() {
error!("Required feature `{}' was not provided", feature);
} else {
// Cf autoload-do-load.
error!(
"Loading file {} failed to provide feature `{}'",
tem3, feature
);
}
}
// Once loading finishes, don't undo it.
unsafe {
Vautoload_queue = Qt;
}
unbind_to(count, feature)
}
def_lisp_sym!(Qrequire, "require");
/// Concatenate all the arguments and make the result a list.
/// The result is a list whose elements are the elements of all the arguments.
/// Each argument may be a list, vector or string.
/// The last argument is not copied, just used as the tail of the new list.
/// usage: (append &rest SEQUENCES)
#[lisp_fn]
pub fn append(args: &mut [LispObject]) -> LispObject {
unsafe {
lisp_concat(
args.len() as isize,
args.as_mut_ptr() as *mut LispObject,
Lisp_Type::Lisp_Cons,
true,
)
}
}
/// Concatenate all the arguments and make the result a string.
/// The result is a string whose elements are the elements of all the arguments.
/// Each argument may be a string or a list or vector of characters (integers).
/// usage: (concat &rest SEQUENCES)
#[lisp_fn]
pub fn concat(args: &mut [LispObject]) -> LispObject {
unsafe {
lisp_concat(
args.len() as isize,
args.as_mut_ptr() as *mut LispObject,
Lisp_Type::Lisp_String,
false,
)
}
}
/// Return the reversed copy of list, vector, or string SEQ.
/// See also the function `nreverse', which is used more often.
#[lisp_fn]
pub fn reverse(seq: LispObject) -> LispObject {
if seq.is_nil() {
Qnil
} else if let Some(cons) = seq.as_cons() {
cons.iter_cars(LispConsEndChecks::on, LispConsCircularChecks::on)
.fold(Qnil, |cons, elt| LispObject::cons(elt, cons))
} else if let Some(vector) = seq.as_vector() {
let size = vector.len();
let mut new = unsafe { make_uninit_vector(size as isize) }.force_vector();
for (i, item) in vector.iter().enumerate() {
unsafe { new.set_unchecked(size - 1 - i, item) };
}
new.into()
} else if let Some(boolvec) = seq.as_bool_vector() {
let nbits = boolvec.len();
let mut new = unsafe { make_uninit_bool_vector(nbits as i64) }.force_bool_vector();
for (i, item) in boolvec.iter().enumerate() {
unsafe { new.set_unchecked(nbits - 1 - i, item.into()) }
}
new.into()
} else if let Some(string) = seq.as_string() {
let size = string.len_chars();
let bytes = string.len_bytes();
if!string.is_multibyte() {
let mut new = unsafe { make_uninit_string(size as i64) }.force_string();
for (i, c) in string.as_slice().iter().enumerate() {
new.set_byte(size - i as isize - 1, *c);
}
new.into()
} else {
let mut new =
unsafe { make_uninit_multibyte_string(size as i64, bytes as i64) }.force_string();
let mut p = string.const_data_ptr();
let mut q = unsafe { new.data_ptr().add(bytes as usize) };
let end = new.data_ptr();
while q > end {
unsafe {
let (c, len) = string_char_and_length(p);
p = p.add(len);
q = q.sub(len);
write_codepoint(slice::from_raw_parts_mut(q, len), c);
}
}
new.into()
}
} else {
wrong_type!(Qsequencep, seq);
}
}
// Return true if O1 and O2 are equal. Do not quit or check for cycles.
// Use this only on arguments that are cycle-free and not too large and
// are not window configurations.
#[no_mangle]
pub extern "C" fn equal_no_quit(o1: LispObject, o2: LispObject) -> bool {
o1.equal_no_quit(o2)
}
#[cfg(windows)]
unsafe fn getloadaverage(loadavg: *mut libc::c_double, nelem: libc::c_int) -> libc::c_int {
crate::remacs_sys::getloadavg(loadavg, nelem)
}
#[cfg(not(windows))]
unsafe fn getloadaverage(loadavg: *mut libc::c_double, nelem: libc::c_int) -> libc::c_int {
libc::getloadavg(loadavg, nelem)
}
/// Return list of 1 minute, 5 minute and 15 minute load averages.
///
/// Each of the three load averages is multiplied by 100, then converted
/// to integer.
///
/// When USE-FLOATS is non-nil, floats will be used instead of integers.
/// These floats are not multiplied by 100.
///
/// If the 5-minute or 15-minute load averages are not available, return a
/// shortened list, containing only those averages which are available.
///
/// An error is thrown if the load average can't be obtained. In some
/// cases making it work would require Emacs being installed setuid or
/// setgid so that it can read kernel information, and that usually isn't
/// advisable.
#[lisp_fn(min = "0")]
pub fn load_average(use_floats: bool) -> Vec<LispNumber> {
let mut load_avg: [libc::c_double; 3] = [0.0, 0.0, 0.0];
let loads = unsafe { getloadaverage(load_avg.as_mut_ptr(), 3) };
if loads < 0 {
error!("load-average not implemented for this operating system");
}
(0..loads as usize)
.map(|i| {
if use_floats {
LispNumber::Float(load_avg[i])
} else {
LispNumber::Fixnum((100.0 * load_avg[i]) as i64)
}
})
.collect()
}
/// Return a copy of ALIST.
/// This is an alist which represents the same mapping from objects to objects,
/// but does not share the alist structure with ALIST.
/// The objects mapped (cars and cdrs of elements of the alist)
/// are shared, however.
/// Elements of ALIST that are not conses are also shared.
#[lisp_fn]
pub fn copy_alist(mut alist: LispObject) -> LispObject {
if alist.is_nil() {
return alist;
}
let new_alist = unsafe { lisp_concat(1, &mut alist, Lisp_Type::Lisp_Cons, false) };
for elt in new_alist.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::off) {
let front = elt.car();
// To make a copy, unpack the cons and then make a new one while re-using the car and cdr.
if let Some((car, cdr)) = front.into() {
elt.set_car((car, cdr));
}
}
new_alist
}
/// Ask user a yes-or-no question.
///
/// Return t if answer is yes, and nil if the answer is no. PROMPT is
/// the string to display to ask the question. It should end in a
/// space; `yes-or-no-p' adds \"(yes or no) \" to it.
///
/// The user must confirm the answer with RET, and can edit it until
/// it has been confirmed.
///
/// If dialog boxes are supported, a dialog box will be used if
/// `last-nonmenu-event' is nil, and `use-dialog-box' is non-nil.
#[lisp_fn]
pub fn yes_or_no_p(prompt: LispStringRef) -> bool {
let use_popup = unsafe {
(globals.last_nonmenu_event.is_nil() || globals.last_nonmenu_event.is_cons())
&& globals.use_dialog_box
&& globals.last_input_event.is_not_nil()
};
if use_popup {
unsafe { redisplay_preserve_echo_area(4) };
let menu = (prompt, (("Yes", true), ("No", false)));
return unsafe { Fx_popup_dialog(Qt, menu.into(), Qnil) }.into();
}
let yes_or_no: LispObject = "(yes or no) ".into();
let prompt = concat(&mut [prompt.into(), yes_or_no]).into();
loop {
let ans: LispStringRef = downcase(read_from_minibuffer(
prompt,
Qnil,
Qnil,
false,
Qyes_or_no_p_history,
Qnil,
false,
))
.into();
match ans.as_slice() {
b"yes" => {
return true;
}
b"no" => {
return false;
}
_ => {
ding(Qnil);
unsafe {
Fdiscard_input();
message1("Please answer yes or no.\0".as_ptr() as *const libc::c_char);
}
sleep_for(2.0, None);
}
}
}
}
/// Concatenate any number of lists by altering them.
/// Only the last argument is not altered, and need not be a list.
/// usage: (nconc &rest LISTS)
#[lisp_fn]
pub fn nconc(args: &mut [LispObject]) -> LispObject {
let mut val = Qnil;
let len = args.len();
for i in 0..len {
let elt = args[i];
if elt.is_nil() {
continue;
}
if val.is_nil() {
val = elt;
}
if (i + 1) == len {
break;
}
let cons: LispCons = elt.into();
let tail = cons
.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::on)
.last()
.unwrap();
let next = args[i + 1];
tail.set_cdr(next);
if next.is_nil() {
args[i + 1] = tail.into();
}
}
val
}
include!(concat!(env!("OUT_DIR"), "/fns_exports.rs"));
|
{
wrong_type!(Qlistp, sequence);
}
|
conditional_block
|
fns.rs
|
//! Random utility Lisp functions.
use std::{ptr, slice};
use libc;
use remacs_macros::lisp_fn;
use crate::{
casefiddle::downcase,
dispnew::{ding, sleep_for},
eval::{record_unwind_protect, un_autoload, unbind_to},
lisp::LispObject,
lists::{assq, car, get, mapcar1, member, memq, put},
lists::{LispCons, LispConsCircularChecks, LispConsEndChecks},
minibuf::read_from_minibuffer,
multibyte::{string_char_and_length, write_codepoint, LispStringRef},
numbers::LispNumber,
obarray::loadhist_attach,
objects::equal,
remacs_sys::Vautoload_queue,
remacs_sys::{
concat as lisp_concat, globals, make_uninit_bool_vector, make_uninit_multibyte_string,
make_uninit_string, make_uninit_vector, message1, redisplay_preserve_echo_area,
},
remacs_sys::{EmacsInt, Lisp_Type},
remacs_sys::{Fdiscard_input, Fload, Fx_popup_dialog},
remacs_sys::{
Qfuncall, Qlistp, Qnil, Qprovide, Qquote, Qrequire, Qsequencep, Qsubfeatures, Qt,
Qyes_or_no_p_history,
},
symbols::LispSymbolRef,
threads::c_specpdl_index,
vectors::length,
};
/// Return t if FEATURE is present in this Emacs.
///
/// Use this to conditionalize execution of lisp code based on the
/// presence or absence of Emacs or environment extensions.
/// Use `provide' to declare that a feature is available. This function
/// looks at the value of the variable `features'. The optional argument
/// SUBFEATURE can be used to check a specific subfeature of FEATURE.
#[lisp_fn(min = "1")]
pub fn featurep(feature: LispSymbolRef, subfeature: LispObject) -> bool {
let mut tem = memq(feature.into(), unsafe { globals.Vfeatures });
if tem.is_not_nil() && subfeature.is_not_nil() {
tem = member(subfeature, get(feature, Qsubfeatures));
}
tem.is_not_nil()
}
/// Announce that FEATURE is a feature of the current Emacs.
/// The optional argument SUBFEATURES should be a list of symbols listing
/// particular subfeatures supported in this version of FEATURE.
#[lisp_fn(min = "1")]
pub fn provide(feature: LispSymbolRef, subfeature: LispObject) -> LispObject {
if!subfeature.is_list() {
wrong_type!(Qlistp, subfeature)
}
unsafe {
if Vautoload_queue.is_not_nil() {
Vautoload_queue = ((0, globals.Vfeatures), Vautoload_queue).into();
}
}
if memq(feature.into(), unsafe { globals.Vfeatures }).is_nil() {
unsafe {
globals.Vfeatures = (feature, globals.Vfeatures).into();
}
}
if subfeature.is_not_nil() {
put(feature, Qsubfeatures, subfeature);
}
unsafe {
globals.Vcurrent_load_list = ((Qprovide, feature), globals.Vcurrent_load_list).into();
}
// Run any load-hooks for this file.
unsafe {
if let Some((_, d)) = assq(feature.into(), globals.Vafter_load_alist).into() {
Fmapc(Qfuncall, d);
}
}
feature.into()
}
/// Return the argument, without evaluating it. `(quote x)' yields `x'.
/// Warning: `quote' does not construct its return value, but just returns
/// the value that was pre-constructed by the Lisp reader (see info node
/// `(elisp)Printed Representation').
/// This means that \\='(a. b) is not identical to (cons \\='a \\='b): the former
/// does not cons. Quoting should be reserved for constants that will
/// never be modified by side-effects, unless you like self-modifying code.
/// See the common pitfall in info node `(elisp)Rearrangement' for an example
/// of unexpected results when a quoted object is modified.
/// usage: (quote ARG)
#[lisp_fn(unevalled = "true")]
pub fn quote(args: LispCons) -> LispObject {
if args.cdr().is_not_nil() {
wrong_number_of_arguments!(Qquote, args.length());
}
args.car()
}
/// Apply FUNCTION to each element of SEQUENCE, and make a list of the
/// results. The result is a list just as long as SEQUENCE. SEQUENCE
/// may be a list, a vector, a bool-vector, or a string.
#[lisp_fn]
pub fn mapc(function: LispObject, sequence: LispObject) -> LispObject {
let leni = length(sequence) as EmacsInt;
if sequence.is_char_table() {
wrong_type!(Qlistp, sequence);
}
mapcar1(leni, ptr::null_mut(), function, sequence);
sequence
}
/* List of features currently being require'd, innermost first. */
declare_GC_protected_static!(require_nesting_list, Qnil);
unsafe extern "C" fn require_unwind(old_value: LispObject) {
require_nesting_list = old_value;
}
/// If feature FEATURE is not loaded, load it from FILENAME.
/// If FEATURE is not a member of the list `features', then the feature is
/// not loaded; so load the file FILENAME.
///
/// If FILENAME is omitted, the printname of FEATURE is used as the file
/// name, and `load' will try to load this name appended with the suffix
/// `.elc', `.el', or the system-dependent suffix for dynamic module
/// files, in that order. The name without appended suffix will not be
/// used. See `get-load-suffixes' for the complete list of suffixes.
///
/// The directories in `load-path' are searched when trying to find the
/// file name.
///
/// If the optional third argument NOERROR is non-nil, then return nil if
/// the file is not found instead of signaling an error. Normally the
/// return value is FEATURE.
///
/// The normal messages at start and end of loading FILENAME are
/// suppressed.
#[lisp_fn(min = "1")]
pub fn require(feature: LispObject, filename: LispObject, noerror: LispObject) -> LispObject {
let feature_sym: LispSymbolRef = feature.into();
let current_load_list = unsafe { globals.Vcurrent_load_list };
// Record the presence of `require' in this file
// even if the feature specified is already loaded.
// But not more than once in any file,
// and not when we aren't loading or reading from a file.
let from_file = unsafe { globals.load_in_progress }
|| current_load_list
.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::off)
.any(|elt| elt.cdr().is_nil() && elt.car().is_string());
if from_file {
let tem = (Qrequire, feature).into();
if member(tem, current_load_list).is_nil() {
loadhist_attach(tem);
}
}
if memq(feature, unsafe { globals.Vfeatures }).is_not_nil() {
return feature;
}
let count = c_specpdl_index();
// This is to make sure that loadup.el gives a clear picture
// of what files are preloaded and when.
if unsafe { globals.Vpurify_flag.is_not_nil() } {
error!(
"(require {}) while preparing to dump",
feature_sym.symbol_name()
);
}
// A certain amount of recursive `require' is legitimate,
// but if we require the same feature recursively 3 times,
// signal an error.
let nesting = unsafe { require_nesting_list }
.iter_cars(LispConsEndChecks::off, LispConsCircularChecks::off)
.filter(|elt| equal(feature, *elt))
.count();
if nesting > 3 {
error!(
"Recursive `require' for feature `{}'",
feature_sym.symbol_name()
);
}
unsafe {
// Update the list for any nested `require's that occur.
record_unwind_protect(Some(require_unwind), require_nesting_list);
require_nesting_list = (feature, require_nesting_list).into();
// Value saved here is to be restored into Vautoload_queue
record_unwind_protect(Some(un_autoload), Vautoload_queue);
Vautoload_queue = Qt;
// Load the file.
let tem = Fload(
if filename.is_nil() {
feature_sym.symbol_name()
} else {
filename
},
noerror,
Qt,
Qnil,
filename.is_nil().into(),
);
// If load failed entirely, return nil.
if tem.is_nil() {
return unbind_to(count, Qnil);
}
}
let tem = memq(feature, unsafe { globals.Vfeatures });
if tem.is_nil() {
let tem3 = car(car(unsafe { globals.Vload_history }));
if tem3.is_nil() {
error!("Required feature `{}' was not provided", feature);
} else {
// Cf autoload-do-load.
error!(
"Loading file {} failed to provide feature `{}'",
tem3, feature
);
}
}
// Once loading finishes, don't undo it.
unsafe {
Vautoload_queue = Qt;
}
unbind_to(count, feature)
}
def_lisp_sym!(Qrequire, "require");
/// Concatenate all the arguments and make the result a list.
/// The result is a list whose elements are the elements of all the arguments.
/// Each argument may be a list, vector or string.
/// The last argument is not copied, just used as the tail of the new list.
/// usage: (append &rest SEQUENCES)
#[lisp_fn]
pub fn append(args: &mut [LispObject]) -> LispObject {
unsafe {
lisp_concat(
args.len() as isize,
args.as_mut_ptr() as *mut LispObject,
Lisp_Type::Lisp_Cons,
true,
)
}
}
/// Concatenate all the arguments and make the result a string.
/// The result is a string whose elements are the elements of all the arguments.
/// Each argument may be a string or a list or vector of characters (integers).
/// usage: (concat &rest SEQUENCES)
#[lisp_fn]
pub fn concat(args: &mut [LispObject]) -> LispObject {
unsafe {
lisp_concat(
args.len() as isize,
args.as_mut_ptr() as *mut LispObject,
Lisp_Type::Lisp_String,
false,
)
}
}
/// Return the reversed copy of list, vector, or string SEQ.
/// See also the function `nreverse', which is used more often.
#[lisp_fn]
pub fn reverse(seq: LispObject) -> LispObject {
if seq.is_nil() {
Qnil
} else if let Some(cons) = seq.as_cons() {
cons.iter_cars(LispConsEndChecks::on, LispConsCircularChecks::on)
.fold(Qnil, |cons, elt| LispObject::cons(elt, cons))
} else if let Some(vector) = seq.as_vector() {
let size = vector.len();
let mut new = unsafe { make_uninit_vector(size as isize) }.force_vector();
for (i, item) in vector.iter().enumerate() {
unsafe { new.set_unchecked(size - 1 - i, item) };
}
new.into()
} else if let Some(boolvec) = seq.as_bool_vector() {
let nbits = boolvec.len();
let mut new = unsafe { make_uninit_bool_vector(nbits as i64) }.force_bool_vector();
for (i, item) in boolvec.iter().enumerate() {
unsafe { new.set_unchecked(nbits - 1 - i, item.into()) }
}
new.into()
} else if let Some(string) = seq.as_string() {
let size = string.len_chars();
let bytes = string.len_bytes();
if!string.is_multibyte() {
let mut new = unsafe { make_uninit_string(size as i64) }.force_string();
for (i, c) in string.as_slice().iter().enumerate() {
new.set_byte(size - i as isize - 1, *c);
}
new.into()
} else {
let mut new =
unsafe { make_uninit_multibyte_string(size as i64, bytes as i64) }.force_string();
let mut p = string.const_data_ptr();
let mut q = unsafe { new.data_ptr().add(bytes as usize) };
let end = new.data_ptr();
while q > end {
unsafe {
let (c, len) = string_char_and_length(p);
p = p.add(len);
q = q.sub(len);
write_codepoint(slice::from_raw_parts_mut(q, len), c);
}
}
new.into()
}
} else {
wrong_type!(Qsequencep, seq);
}
}
// Return true if O1 and O2 are equal. Do not quit or check for cycles.
// Use this only on arguments that are cycle-free and not too large and
// are not window configurations.
#[no_mangle]
pub extern "C" fn equal_no_quit(o1: LispObject, o2: LispObject) -> bool
|
#[cfg(windows)]
unsafe fn getloadaverage(loadavg: *mut libc::c_double, nelem: libc::c_int) -> libc::c_int {
crate::remacs_sys::getloadavg(loadavg, nelem)
}
#[cfg(not(windows))]
unsafe fn getloadaverage(loadavg: *mut libc::c_double, nelem: libc::c_int) -> libc::c_int {
libc::getloadavg(loadavg, nelem)
}
/// Return list of 1 minute, 5 minute and 15 minute load averages.
///
/// Each of the three load averages is multiplied by 100, then converted
/// to integer.
///
/// When USE-FLOATS is non-nil, floats will be used instead of integers.
/// These floats are not multiplied by 100.
///
/// If the 5-minute or 15-minute load averages are not available, return a
/// shortened list, containing only those averages which are available.
///
/// An error is thrown if the load average can't be obtained. In some
/// cases making it work would require Emacs being installed setuid or
/// setgid so that it can read kernel information, and that usually isn't
/// advisable.
#[lisp_fn(min = "0")]
pub fn load_average(use_floats: bool) -> Vec<LispNumber> {
let mut load_avg: [libc::c_double; 3] = [0.0, 0.0, 0.0];
let loads = unsafe { getloadaverage(load_avg.as_mut_ptr(), 3) };
if loads < 0 {
error!("load-average not implemented for this operating system");
}
(0..loads as usize)
.map(|i| {
if use_floats {
LispNumber::Float(load_avg[i])
} else {
LispNumber::Fixnum((100.0 * load_avg[i]) as i64)
}
})
.collect()
}
/// Return a copy of ALIST.
/// This is an alist which represents the same mapping from objects to objects,
/// but does not share the alist structure with ALIST.
/// The objects mapped (cars and cdrs of elements of the alist)
/// are shared, however.
/// Elements of ALIST that are not conses are also shared.
#[lisp_fn]
pub fn copy_alist(mut alist: LispObject) -> LispObject {
if alist.is_nil() {
return alist;
}
let new_alist = unsafe { lisp_concat(1, &mut alist, Lisp_Type::Lisp_Cons, false) };
for elt in new_alist.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::off) {
let front = elt.car();
// To make a copy, unpack the cons and then make a new one while re-using the car and cdr.
if let Some((car, cdr)) = front.into() {
elt.set_car((car, cdr));
}
}
new_alist
}
/// Ask user a yes-or-no question.
///
/// Return t if answer is yes, and nil if the answer is no. PROMPT is
/// the string to display to ask the question. It should end in a
/// space; `yes-or-no-p' adds \"(yes or no) \" to it.
///
/// The user must confirm the answer with RET, and can edit it until
/// it has been confirmed.
///
/// If dialog boxes are supported, a dialog box will be used if
/// `last-nonmenu-event' is nil, and `use-dialog-box' is non-nil.
#[lisp_fn]
pub fn yes_or_no_p(prompt: LispStringRef) -> bool {
let use_popup = unsafe {
(globals.last_nonmenu_event.is_nil() || globals.last_nonmenu_event.is_cons())
&& globals.use_dialog_box
&& globals.last_input_event.is_not_nil()
};
if use_popup {
unsafe { redisplay_preserve_echo_area(4) };
let menu = (prompt, (("Yes", true), ("No", false)));
return unsafe { Fx_popup_dialog(Qt, menu.into(), Qnil) }.into();
}
let yes_or_no: LispObject = "(yes or no) ".into();
let prompt = concat(&mut [prompt.into(), yes_or_no]).into();
loop {
let ans: LispStringRef = downcase(read_from_minibuffer(
prompt,
Qnil,
Qnil,
false,
Qyes_or_no_p_history,
Qnil,
false,
))
.into();
match ans.as_slice() {
b"yes" => {
return true;
}
b"no" => {
return false;
}
_ => {
ding(Qnil);
unsafe {
Fdiscard_input();
message1("Please answer yes or no.\0".as_ptr() as *const libc::c_char);
}
sleep_for(2.0, None);
}
}
}
}
/// Concatenate any number of lists by altering them.
/// Only the last argument is not altered, and need not be a list.
/// usage: (nconc &rest LISTS)
#[lisp_fn]
pub fn nconc(args: &mut [LispObject]) -> LispObject {
let mut val = Qnil;
let len = args.len();
for i in 0..len {
let elt = args[i];
if elt.is_nil() {
continue;
}
if val.is_nil() {
val = elt;
}
if (i + 1) == len {
break;
}
let cons: LispCons = elt.into();
let tail = cons
.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::on)
.last()
.unwrap();
let next = args[i + 1];
tail.set_cdr(next);
if next.is_nil() {
args[i + 1] = tail.into();
}
}
val
}
include!(concat!(env!("OUT_DIR"), "/fns_exports.rs"));
|
{
o1.equal_no_quit(o2)
}
|
identifier_body
|
fns.rs
|
//! Random utility Lisp functions.
use std::{ptr, slice};
use libc;
use remacs_macros::lisp_fn;
use crate::{
casefiddle::downcase,
dispnew::{ding, sleep_for},
eval::{record_unwind_protect, un_autoload, unbind_to},
lisp::LispObject,
lists::{assq, car, get, mapcar1, member, memq, put},
lists::{LispCons, LispConsCircularChecks, LispConsEndChecks},
minibuf::read_from_minibuffer,
multibyte::{string_char_and_length, write_codepoint, LispStringRef},
numbers::LispNumber,
obarray::loadhist_attach,
objects::equal,
remacs_sys::Vautoload_queue,
remacs_sys::{
concat as lisp_concat, globals, make_uninit_bool_vector, make_uninit_multibyte_string,
make_uninit_string, make_uninit_vector, message1, redisplay_preserve_echo_area,
},
remacs_sys::{EmacsInt, Lisp_Type},
remacs_sys::{Fdiscard_input, Fload, Fx_popup_dialog},
remacs_sys::{
Qfuncall, Qlistp, Qnil, Qprovide, Qquote, Qrequire, Qsequencep, Qsubfeatures, Qt,
Qyes_or_no_p_history,
},
symbols::LispSymbolRef,
threads::c_specpdl_index,
vectors::length,
};
/// Return t if FEATURE is present in this Emacs.
///
/// Use this to conditionalize execution of lisp code based on the
/// presence or absence of Emacs or environment extensions.
/// Use `provide' to declare that a feature is available. This function
/// looks at the value of the variable `features'. The optional argument
/// SUBFEATURE can be used to check a specific subfeature of FEATURE.
#[lisp_fn(min = "1")]
pub fn featurep(feature: LispSymbolRef, subfeature: LispObject) -> bool {
let mut tem = memq(feature.into(), unsafe { globals.Vfeatures });
if tem.is_not_nil() && subfeature.is_not_nil() {
tem = member(subfeature, get(feature, Qsubfeatures));
}
tem.is_not_nil()
}
/// Announce that FEATURE is a feature of the current Emacs.
/// The optional argument SUBFEATURES should be a list of symbols listing
/// particular subfeatures supported in this version of FEATURE.
#[lisp_fn(min = "1")]
pub fn provide(feature: LispSymbolRef, subfeature: LispObject) -> LispObject {
if!subfeature.is_list() {
wrong_type!(Qlistp, subfeature)
}
unsafe {
if Vautoload_queue.is_not_nil() {
Vautoload_queue = ((0, globals.Vfeatures), Vautoload_queue).into();
}
}
if memq(feature.into(), unsafe { globals.Vfeatures }).is_nil() {
unsafe {
globals.Vfeatures = (feature, globals.Vfeatures).into();
}
}
if subfeature.is_not_nil() {
put(feature, Qsubfeatures, subfeature);
}
unsafe {
globals.Vcurrent_load_list = ((Qprovide, feature), globals.Vcurrent_load_list).into();
}
// Run any load-hooks for this file.
unsafe {
if let Some((_, d)) = assq(feature.into(), globals.Vafter_load_alist).into() {
Fmapc(Qfuncall, d);
}
}
feature.into()
}
/// Return the argument, without evaluating it. `(quote x)' yields `x'.
/// Warning: `quote' does not construct its return value, but just returns
/// the value that was pre-constructed by the Lisp reader (see info node
/// `(elisp)Printed Representation').
/// This means that \\='(a. b) is not identical to (cons \\='a \\='b): the former
/// does not cons. Quoting should be reserved for constants that will
/// never be modified by side-effects, unless you like self-modifying code.
/// See the common pitfall in info node `(elisp)Rearrangement' for an example
/// of unexpected results when a quoted object is modified.
/// usage: (quote ARG)
#[lisp_fn(unevalled = "true")]
pub fn quote(args: LispCons) -> LispObject {
if args.cdr().is_not_nil() {
wrong_number_of_arguments!(Qquote, args.length());
}
args.car()
}
/// Apply FUNCTION to each element of SEQUENCE, and make a list of the
/// results. The result is a list just as long as SEQUENCE. SEQUENCE
/// may be a list, a vector, a bool-vector, or a string.
#[lisp_fn]
pub fn mapc(function: LispObject, sequence: LispObject) -> LispObject {
let leni = length(sequence) as EmacsInt;
if sequence.is_char_table() {
wrong_type!(Qlistp, sequence);
}
mapcar1(leni, ptr::null_mut(), function, sequence);
sequence
}
/* List of features currently being require'd, innermost first. */
declare_GC_protected_static!(require_nesting_list, Qnil);
unsafe extern "C" fn require_unwind(old_value: LispObject) {
require_nesting_list = old_value;
}
/// If feature FEATURE is not loaded, load it from FILENAME.
/// If FEATURE is not a member of the list `features', then the feature is
/// not loaded; so load the file FILENAME.
///
/// If FILENAME is omitted, the printname of FEATURE is used as the file
/// name, and `load' will try to load this name appended with the suffix
/// `.elc', `.el', or the system-dependent suffix for dynamic module
/// files, in that order. The name without appended suffix will not be
/// used. See `get-load-suffixes' for the complete list of suffixes.
///
/// The directories in `load-path' are searched when trying to find the
/// file name.
///
/// If the optional third argument NOERROR is non-nil, then return nil if
/// the file is not found instead of signaling an error. Normally the
/// return value is FEATURE.
///
/// The normal messages at start and end of loading FILENAME are
/// suppressed.
#[lisp_fn(min = "1")]
pub fn require(feature: LispObject, filename: LispObject, noerror: LispObject) -> LispObject {
let feature_sym: LispSymbolRef = feature.into();
let current_load_list = unsafe { globals.Vcurrent_load_list };
// Record the presence of `require' in this file
// even if the feature specified is already loaded.
// But not more than once in any file,
// and not when we aren't loading or reading from a file.
let from_file = unsafe { globals.load_in_progress }
|| current_load_list
.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::off)
.any(|elt| elt.cdr().is_nil() && elt.car().is_string());
if from_file {
let tem = (Qrequire, feature).into();
if member(tem, current_load_list).is_nil() {
loadhist_attach(tem);
}
}
if memq(feature, unsafe { globals.Vfeatures }).is_not_nil() {
return feature;
}
let count = c_specpdl_index();
// This is to make sure that loadup.el gives a clear picture
// of what files are preloaded and when.
if unsafe { globals.Vpurify_flag.is_not_nil() } {
error!(
"(require {}) while preparing to dump",
feature_sym.symbol_name()
);
}
// A certain amount of recursive `require' is legitimate,
// but if we require the same feature recursively 3 times,
// signal an error.
let nesting = unsafe { require_nesting_list }
.iter_cars(LispConsEndChecks::off, LispConsCircularChecks::off)
.filter(|elt| equal(feature, *elt))
.count();
if nesting > 3 {
error!(
"Recursive `require' for feature `{}'",
feature_sym.symbol_name()
);
}
unsafe {
// Update the list for any nested `require's that occur.
record_unwind_protect(Some(require_unwind), require_nesting_list);
require_nesting_list = (feature, require_nesting_list).into();
// Value saved here is to be restored into Vautoload_queue
record_unwind_protect(Some(un_autoload), Vautoload_queue);
Vautoload_queue = Qt;
// Load the file.
let tem = Fload(
if filename.is_nil() {
feature_sym.symbol_name()
} else {
filename
},
noerror,
Qt,
Qnil,
filename.is_nil().into(),
);
// If load failed entirely, return nil.
if tem.is_nil() {
return unbind_to(count, Qnil);
}
}
let tem = memq(feature, unsafe { globals.Vfeatures });
if tem.is_nil() {
let tem3 = car(car(unsafe { globals.Vload_history }));
if tem3.is_nil() {
error!("Required feature `{}' was not provided", feature);
} else {
// Cf autoload-do-load.
error!(
"Loading file {} failed to provide feature `{}'",
tem3, feature
);
}
}
// Once loading finishes, don't undo it.
unsafe {
Vautoload_queue = Qt;
}
unbind_to(count, feature)
}
def_lisp_sym!(Qrequire, "require");
/// Concatenate all the arguments and make the result a list.
/// The result is a list whose elements are the elements of all the arguments.
/// Each argument may be a list, vector or string.
/// The last argument is not copied, just used as the tail of the new list.
/// usage: (append &rest SEQUENCES)
#[lisp_fn]
pub fn append(args: &mut [LispObject]) -> LispObject {
unsafe {
lisp_concat(
args.len() as isize,
args.as_mut_ptr() as *mut LispObject,
Lisp_Type::Lisp_Cons,
true,
)
}
}
/// Concatenate all the arguments and make the result a string.
/// The result is a string whose elements are the elements of all the arguments.
/// Each argument may be a string or a list or vector of characters (integers).
/// usage: (concat &rest SEQUENCES)
#[lisp_fn]
pub fn concat(args: &mut [LispObject]) -> LispObject {
unsafe {
lisp_concat(
args.len() as isize,
args.as_mut_ptr() as *mut LispObject,
Lisp_Type::Lisp_String,
false,
)
}
}
/// Return the reversed copy of list, vector, or string SEQ.
/// See also the function `nreverse', which is used more often.
#[lisp_fn]
pub fn reverse(seq: LispObject) -> LispObject {
if seq.is_nil() {
Qnil
} else if let Some(cons) = seq.as_cons() {
cons.iter_cars(LispConsEndChecks::on, LispConsCircularChecks::on)
.fold(Qnil, |cons, elt| LispObject::cons(elt, cons))
} else if let Some(vector) = seq.as_vector() {
let size = vector.len();
let mut new = unsafe { make_uninit_vector(size as isize) }.force_vector();
for (i, item) in vector.iter().enumerate() {
unsafe { new.set_unchecked(size - 1 - i, item) };
}
new.into()
} else if let Some(boolvec) = seq.as_bool_vector() {
let nbits = boolvec.len();
let mut new = unsafe { make_uninit_bool_vector(nbits as i64) }.force_bool_vector();
for (i, item) in boolvec.iter().enumerate() {
unsafe { new.set_unchecked(nbits - 1 - i, item.into()) }
}
new.into()
} else if let Some(string) = seq.as_string() {
let size = string.len_chars();
let bytes = string.len_bytes();
if!string.is_multibyte() {
let mut new = unsafe { make_uninit_string(size as i64) }.force_string();
for (i, c) in string.as_slice().iter().enumerate() {
new.set_byte(size - i as isize - 1, *c);
}
new.into()
} else {
let mut new =
unsafe { make_uninit_multibyte_string(size as i64, bytes as i64) }.force_string();
let mut p = string.const_data_ptr();
let mut q = unsafe { new.data_ptr().add(bytes as usize) };
let end = new.data_ptr();
while q > end {
unsafe {
let (c, len) = string_char_and_length(p);
p = p.add(len);
q = q.sub(len);
write_codepoint(slice::from_raw_parts_mut(q, len), c);
}
}
new.into()
}
} else {
wrong_type!(Qsequencep, seq);
}
}
// Return true if O1 and O2 are equal. Do not quit or check for cycles.
// Use this only on arguments that are cycle-free and not too large and
// are not window configurations.
#[no_mangle]
pub extern "C" fn equal_no_quit(o1: LispObject, o2: LispObject) -> bool {
o1.equal_no_quit(o2)
}
#[cfg(windows)]
unsafe fn getloadaverage(loadavg: *mut libc::c_double, nelem: libc::c_int) -> libc::c_int {
crate::remacs_sys::getloadavg(loadavg, nelem)
}
#[cfg(not(windows))]
unsafe fn getloadaverage(loadavg: *mut libc::c_double, nelem: libc::c_int) -> libc::c_int {
libc::getloadavg(loadavg, nelem)
}
/// Return list of 1 minute, 5 minute and 15 minute load averages.
///
/// Each of the three load averages is multiplied by 100, then converted
/// to integer.
///
/// When USE-FLOATS is non-nil, floats will be used instead of integers.
/// These floats are not multiplied by 100.
///
/// If the 5-minute or 15-minute load averages are not available, return a
/// shortened list, containing only those averages which are available.
///
/// An error is thrown if the load average can't be obtained. In some
/// cases making it work would require Emacs being installed setuid or
/// setgid so that it can read kernel information, and that usually isn't
/// advisable.
#[lisp_fn(min = "0")]
pub fn load_average(use_floats: bool) -> Vec<LispNumber> {
let mut load_avg: [libc::c_double; 3] = [0.0, 0.0, 0.0];
let loads = unsafe { getloadaverage(load_avg.as_mut_ptr(), 3) };
if loads < 0 {
error!("load-average not implemented for this operating system");
}
(0..loads as usize)
.map(|i| {
if use_floats {
LispNumber::Float(load_avg[i])
} else {
LispNumber::Fixnum((100.0 * load_avg[i]) as i64)
}
})
.collect()
}
/// Return a copy of ALIST.
/// This is an alist which represents the same mapping from objects to objects,
/// but does not share the alist structure with ALIST.
/// The objects mapped (cars and cdrs of elements of the alist)
/// are shared, however.
/// Elements of ALIST that are not conses are also shared.
#[lisp_fn]
pub fn copy_alist(mut alist: LispObject) -> LispObject {
if alist.is_nil() {
return alist;
}
let new_alist = unsafe { lisp_concat(1, &mut alist, Lisp_Type::Lisp_Cons, false) };
for elt in new_alist.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::off) {
let front = elt.car();
// To make a copy, unpack the cons and then make a new one while re-using the car and cdr.
if let Some((car, cdr)) = front.into() {
elt.set_car((car, cdr));
}
}
new_alist
}
/// Ask user a yes-or-no question.
///
/// Return t if answer is yes, and nil if the answer is no. PROMPT is
/// the string to display to ask the question. It should end in a
/// space; `yes-or-no-p' adds \"(yes or no) \" to it.
///
/// The user must confirm the answer with RET, and can edit it until
/// it has been confirmed.
///
/// If dialog boxes are supported, a dialog box will be used if
/// `last-nonmenu-event' is nil, and `use-dialog-box' is non-nil.
#[lisp_fn]
pub fn yes_or_no_p(prompt: LispStringRef) -> bool {
let use_popup = unsafe {
(globals.last_nonmenu_event.is_nil() || globals.last_nonmenu_event.is_cons())
&& globals.use_dialog_box
&& globals.last_input_event.is_not_nil()
};
if use_popup {
unsafe { redisplay_preserve_echo_area(4) };
let menu = (prompt, (("Yes", true), ("No", false)));
return unsafe { Fx_popup_dialog(Qt, menu.into(), Qnil) }.into();
}
let yes_or_no: LispObject = "(yes or no) ".into();
let prompt = concat(&mut [prompt.into(), yes_or_no]).into();
loop {
let ans: LispStringRef = downcase(read_from_minibuffer(
prompt,
Qnil,
Qnil,
false,
Qyes_or_no_p_history,
Qnil,
false,
))
.into();
match ans.as_slice() {
b"yes" => {
return true;
}
b"no" => {
return false;
}
_ => {
ding(Qnil);
unsafe {
Fdiscard_input();
message1("Please answer yes or no.\0".as_ptr() as *const libc::c_char);
}
sleep_for(2.0, None);
}
}
}
}
/// Concatenate any number of lists by altering them.
/// Only the last argument is not altered, and need not be a list.
/// usage: (nconc &rest LISTS)
#[lisp_fn]
pub fn nconc(args: &mut [LispObject]) -> LispObject {
let mut val = Qnil;
let len = args.len();
for i in 0..len {
let elt = args[i];
if elt.is_nil() {
continue;
}
if val.is_nil() {
val = elt;
}
if (i + 1) == len {
break;
}
let cons: LispCons = elt.into();
let tail = cons
.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::on)
.last()
.unwrap();
|
let next = args[i + 1];
tail.set_cdr(next);
if next.is_nil() {
args[i + 1] = tail.into();
}
}
val
}
include!(concat!(env!("OUT_DIR"), "/fns_exports.rs"));
|
random_line_split
|
|
fns.rs
|
//! Random utility Lisp functions.
use std::{ptr, slice};
use libc;
use remacs_macros::lisp_fn;
use crate::{
casefiddle::downcase,
dispnew::{ding, sleep_for},
eval::{record_unwind_protect, un_autoload, unbind_to},
lisp::LispObject,
lists::{assq, car, get, mapcar1, member, memq, put},
lists::{LispCons, LispConsCircularChecks, LispConsEndChecks},
minibuf::read_from_minibuffer,
multibyte::{string_char_and_length, write_codepoint, LispStringRef},
numbers::LispNumber,
obarray::loadhist_attach,
objects::equal,
remacs_sys::Vautoload_queue,
remacs_sys::{
concat as lisp_concat, globals, make_uninit_bool_vector, make_uninit_multibyte_string,
make_uninit_string, make_uninit_vector, message1, redisplay_preserve_echo_area,
},
remacs_sys::{EmacsInt, Lisp_Type},
remacs_sys::{Fdiscard_input, Fload, Fx_popup_dialog},
remacs_sys::{
Qfuncall, Qlistp, Qnil, Qprovide, Qquote, Qrequire, Qsequencep, Qsubfeatures, Qt,
Qyes_or_no_p_history,
},
symbols::LispSymbolRef,
threads::c_specpdl_index,
vectors::length,
};
/// Return t if FEATURE is present in this Emacs.
///
/// Use this to conditionalize execution of lisp code based on the
/// presence or absence of Emacs or environment extensions.
/// Use `provide' to declare that a feature is available. This function
/// looks at the value of the variable `features'. The optional argument
/// SUBFEATURE can be used to check a specific subfeature of FEATURE.
#[lisp_fn(min = "1")]
pub fn featurep(feature: LispSymbolRef, subfeature: LispObject) -> bool {
let mut tem = memq(feature.into(), unsafe { globals.Vfeatures });
if tem.is_not_nil() && subfeature.is_not_nil() {
tem = member(subfeature, get(feature, Qsubfeatures));
}
tem.is_not_nil()
}
/// Announce that FEATURE is a feature of the current Emacs.
/// The optional argument SUBFEATURES should be a list of symbols listing
/// particular subfeatures supported in this version of FEATURE.
#[lisp_fn(min = "1")]
pub fn provide(feature: LispSymbolRef, subfeature: LispObject) -> LispObject {
if!subfeature.is_list() {
wrong_type!(Qlistp, subfeature)
}
unsafe {
if Vautoload_queue.is_not_nil() {
Vautoload_queue = ((0, globals.Vfeatures), Vautoload_queue).into();
}
}
if memq(feature.into(), unsafe { globals.Vfeatures }).is_nil() {
unsafe {
globals.Vfeatures = (feature, globals.Vfeatures).into();
}
}
if subfeature.is_not_nil() {
put(feature, Qsubfeatures, subfeature);
}
unsafe {
globals.Vcurrent_load_list = ((Qprovide, feature), globals.Vcurrent_load_list).into();
}
// Run any load-hooks for this file.
unsafe {
if let Some((_, d)) = assq(feature.into(), globals.Vafter_load_alist).into() {
Fmapc(Qfuncall, d);
}
}
feature.into()
}
/// Return the argument, without evaluating it. `(quote x)' yields `x'.
/// Warning: `quote' does not construct its return value, but just returns
/// the value that was pre-constructed by the Lisp reader (see info node
/// `(elisp)Printed Representation').
/// This means that \\='(a. b) is not identical to (cons \\='a \\='b): the former
/// does not cons. Quoting should be reserved for constants that will
/// never be modified by side-effects, unless you like self-modifying code.
/// See the common pitfall in info node `(elisp)Rearrangement' for an example
/// of unexpected results when a quoted object is modified.
/// usage: (quote ARG)
#[lisp_fn(unevalled = "true")]
pub fn quote(args: LispCons) -> LispObject {
if args.cdr().is_not_nil() {
wrong_number_of_arguments!(Qquote, args.length());
}
args.car()
}
/// Apply FUNCTION to each element of SEQUENCE, and make a list of the
/// results. The result is a list just as long as SEQUENCE. SEQUENCE
/// may be a list, a vector, a bool-vector, or a string.
#[lisp_fn]
pub fn mapc(function: LispObject, sequence: LispObject) -> LispObject {
let leni = length(sequence) as EmacsInt;
if sequence.is_char_table() {
wrong_type!(Qlistp, sequence);
}
mapcar1(leni, ptr::null_mut(), function, sequence);
sequence
}
/* List of features currently being require'd, innermost first. */
declare_GC_protected_static!(require_nesting_list, Qnil);
unsafe extern "C" fn require_unwind(old_value: LispObject) {
require_nesting_list = old_value;
}
/// If feature FEATURE is not loaded, load it from FILENAME.
/// If FEATURE is not a member of the list `features', then the feature is
/// not loaded; so load the file FILENAME.
///
/// If FILENAME is omitted, the printname of FEATURE is used as the file
/// name, and `load' will try to load this name appended with the suffix
/// `.elc', `.el', or the system-dependent suffix for dynamic module
/// files, in that order. The name without appended suffix will not be
/// used. See `get-load-suffixes' for the complete list of suffixes.
///
/// The directories in `load-path' are searched when trying to find the
/// file name.
///
/// If the optional third argument NOERROR is non-nil, then return nil if
/// the file is not found instead of signaling an error. Normally the
/// return value is FEATURE.
///
/// The normal messages at start and end of loading FILENAME are
/// suppressed.
#[lisp_fn(min = "1")]
pub fn require(feature: LispObject, filename: LispObject, noerror: LispObject) -> LispObject {
let feature_sym: LispSymbolRef = feature.into();
let current_load_list = unsafe { globals.Vcurrent_load_list };
// Record the presence of `require' in this file
// even if the feature specified is already loaded.
// But not more than once in any file,
// and not when we aren't loading or reading from a file.
let from_file = unsafe { globals.load_in_progress }
|| current_load_list
.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::off)
.any(|elt| elt.cdr().is_nil() && elt.car().is_string());
if from_file {
let tem = (Qrequire, feature).into();
if member(tem, current_load_list).is_nil() {
loadhist_attach(tem);
}
}
if memq(feature, unsafe { globals.Vfeatures }).is_not_nil() {
return feature;
}
let count = c_specpdl_index();
// This is to make sure that loadup.el gives a clear picture
// of what files are preloaded and when.
if unsafe { globals.Vpurify_flag.is_not_nil() } {
error!(
"(require {}) while preparing to dump",
feature_sym.symbol_name()
);
}
// A certain amount of recursive `require' is legitimate,
// but if we require the same feature recursively 3 times,
// signal an error.
let nesting = unsafe { require_nesting_list }
.iter_cars(LispConsEndChecks::off, LispConsCircularChecks::off)
.filter(|elt| equal(feature, *elt))
.count();
if nesting > 3 {
error!(
"Recursive `require' for feature `{}'",
feature_sym.symbol_name()
);
}
unsafe {
// Update the list for any nested `require's that occur.
record_unwind_protect(Some(require_unwind), require_nesting_list);
require_nesting_list = (feature, require_nesting_list).into();
// Value saved here is to be restored into Vautoload_queue
record_unwind_protect(Some(un_autoload), Vautoload_queue);
Vautoload_queue = Qt;
// Load the file.
let tem = Fload(
if filename.is_nil() {
feature_sym.symbol_name()
} else {
filename
},
noerror,
Qt,
Qnil,
filename.is_nil().into(),
);
// If load failed entirely, return nil.
if tem.is_nil() {
return unbind_to(count, Qnil);
}
}
let tem = memq(feature, unsafe { globals.Vfeatures });
if tem.is_nil() {
let tem3 = car(car(unsafe { globals.Vload_history }));
if tem3.is_nil() {
error!("Required feature `{}' was not provided", feature);
} else {
// Cf autoload-do-load.
error!(
"Loading file {} failed to provide feature `{}'",
tem3, feature
);
}
}
// Once loading finishes, don't undo it.
unsafe {
Vautoload_queue = Qt;
}
unbind_to(count, feature)
}
def_lisp_sym!(Qrequire, "require");
/// Concatenate all the arguments and make the result a list.
/// The result is a list whose elements are the elements of all the arguments.
/// Each argument may be a list, vector or string.
/// The last argument is not copied, just used as the tail of the new list.
/// usage: (append &rest SEQUENCES)
#[lisp_fn]
pub fn append(args: &mut [LispObject]) -> LispObject {
unsafe {
lisp_concat(
args.len() as isize,
args.as_mut_ptr() as *mut LispObject,
Lisp_Type::Lisp_Cons,
true,
)
}
}
/// Concatenate all the arguments and make the result a string.
/// The result is a string whose elements are the elements of all the arguments.
/// Each argument may be a string or a list or vector of characters (integers).
/// usage: (concat &rest SEQUENCES)
#[lisp_fn]
pub fn
|
(args: &mut [LispObject]) -> LispObject {
unsafe {
lisp_concat(
args.len() as isize,
args.as_mut_ptr() as *mut LispObject,
Lisp_Type::Lisp_String,
false,
)
}
}
/// Return the reversed copy of list, vector, or string SEQ.
/// See also the function `nreverse', which is used more often.
#[lisp_fn]
pub fn reverse(seq: LispObject) -> LispObject {
if seq.is_nil() {
Qnil
} else if let Some(cons) = seq.as_cons() {
cons.iter_cars(LispConsEndChecks::on, LispConsCircularChecks::on)
.fold(Qnil, |cons, elt| LispObject::cons(elt, cons))
} else if let Some(vector) = seq.as_vector() {
let size = vector.len();
let mut new = unsafe { make_uninit_vector(size as isize) }.force_vector();
for (i, item) in vector.iter().enumerate() {
unsafe { new.set_unchecked(size - 1 - i, item) };
}
new.into()
} else if let Some(boolvec) = seq.as_bool_vector() {
let nbits = boolvec.len();
let mut new = unsafe { make_uninit_bool_vector(nbits as i64) }.force_bool_vector();
for (i, item) in boolvec.iter().enumerate() {
unsafe { new.set_unchecked(nbits - 1 - i, item.into()) }
}
new.into()
} else if let Some(string) = seq.as_string() {
let size = string.len_chars();
let bytes = string.len_bytes();
if!string.is_multibyte() {
let mut new = unsafe { make_uninit_string(size as i64) }.force_string();
for (i, c) in string.as_slice().iter().enumerate() {
new.set_byte(size - i as isize - 1, *c);
}
new.into()
} else {
let mut new =
unsafe { make_uninit_multibyte_string(size as i64, bytes as i64) }.force_string();
let mut p = string.const_data_ptr();
let mut q = unsafe { new.data_ptr().add(bytes as usize) };
let end = new.data_ptr();
while q > end {
unsafe {
let (c, len) = string_char_and_length(p);
p = p.add(len);
q = q.sub(len);
write_codepoint(slice::from_raw_parts_mut(q, len), c);
}
}
new.into()
}
} else {
wrong_type!(Qsequencep, seq);
}
}
// Return true if O1 and O2 are equal. Do not quit or check for cycles.
// Use this only on arguments that are cycle-free and not too large and
// are not window configurations.
#[no_mangle]
pub extern "C" fn equal_no_quit(o1: LispObject, o2: LispObject) -> bool {
o1.equal_no_quit(o2)
}
#[cfg(windows)]
unsafe fn getloadaverage(loadavg: *mut libc::c_double, nelem: libc::c_int) -> libc::c_int {
crate::remacs_sys::getloadavg(loadavg, nelem)
}
#[cfg(not(windows))]
unsafe fn getloadaverage(loadavg: *mut libc::c_double, nelem: libc::c_int) -> libc::c_int {
libc::getloadavg(loadavg, nelem)
}
/// Return list of 1 minute, 5 minute and 15 minute load averages.
///
/// Each of the three load averages is multiplied by 100, then converted
/// to integer.
///
/// When USE-FLOATS is non-nil, floats will be used instead of integers.
/// These floats are not multiplied by 100.
///
/// If the 5-minute or 15-minute load averages are not available, return a
/// shortened list, containing only those averages which are available.
///
/// An error is thrown if the load average can't be obtained. In some
/// cases making it work would require Emacs being installed setuid or
/// setgid so that it can read kernel information, and that usually isn't
/// advisable.
#[lisp_fn(min = "0")]
pub fn load_average(use_floats: bool) -> Vec<LispNumber> {
let mut load_avg: [libc::c_double; 3] = [0.0, 0.0, 0.0];
let loads = unsafe { getloadaverage(load_avg.as_mut_ptr(), 3) };
if loads < 0 {
error!("load-average not implemented for this operating system");
}
(0..loads as usize)
.map(|i| {
if use_floats {
LispNumber::Float(load_avg[i])
} else {
LispNumber::Fixnum((100.0 * load_avg[i]) as i64)
}
})
.collect()
}
/// Return a copy of ALIST.
/// This is an alist which represents the same mapping from objects to objects,
/// but does not share the alist structure with ALIST.
/// The objects mapped (cars and cdrs of elements of the alist)
/// are shared, however.
/// Elements of ALIST that are not conses are also shared.
#[lisp_fn]
pub fn copy_alist(mut alist: LispObject) -> LispObject {
if alist.is_nil() {
return alist;
}
let new_alist = unsafe { lisp_concat(1, &mut alist, Lisp_Type::Lisp_Cons, false) };
for elt in new_alist.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::off) {
let front = elt.car();
// To make a copy, unpack the cons and then make a new one while re-using the car and cdr.
if let Some((car, cdr)) = front.into() {
elt.set_car((car, cdr));
}
}
new_alist
}
/// Ask user a yes-or-no question.
///
/// Return t if answer is yes, and nil if the answer is no. PROMPT is
/// the string to display to ask the question. It should end in a
/// space; `yes-or-no-p' adds \"(yes or no) \" to it.
///
/// The user must confirm the answer with RET, and can edit it until
/// it has been confirmed.
///
/// If dialog boxes are supported, a dialog box will be used if
/// `last-nonmenu-event' is nil, and `use-dialog-box' is non-nil.
#[lisp_fn]
pub fn yes_or_no_p(prompt: LispStringRef) -> bool {
let use_popup = unsafe {
(globals.last_nonmenu_event.is_nil() || globals.last_nonmenu_event.is_cons())
&& globals.use_dialog_box
&& globals.last_input_event.is_not_nil()
};
if use_popup {
unsafe { redisplay_preserve_echo_area(4) };
let menu = (prompt, (("Yes", true), ("No", false)));
return unsafe { Fx_popup_dialog(Qt, menu.into(), Qnil) }.into();
}
let yes_or_no: LispObject = "(yes or no) ".into();
let prompt = concat(&mut [prompt.into(), yes_or_no]).into();
loop {
let ans: LispStringRef = downcase(read_from_minibuffer(
prompt,
Qnil,
Qnil,
false,
Qyes_or_no_p_history,
Qnil,
false,
))
.into();
match ans.as_slice() {
b"yes" => {
return true;
}
b"no" => {
return false;
}
_ => {
ding(Qnil);
unsafe {
Fdiscard_input();
message1("Please answer yes or no.\0".as_ptr() as *const libc::c_char);
}
sleep_for(2.0, None);
}
}
}
}
/// Concatenate any number of lists by altering them.
/// Only the last argument is not altered, and need not be a list.
/// usage: (nconc &rest LISTS)
#[lisp_fn]
pub fn nconc(args: &mut [LispObject]) -> LispObject {
let mut val = Qnil;
let len = args.len();
for i in 0..len {
let elt = args[i];
if elt.is_nil() {
continue;
}
if val.is_nil() {
val = elt;
}
if (i + 1) == len {
break;
}
let cons: LispCons = elt.into();
let tail = cons
.iter_tails(LispConsEndChecks::off, LispConsCircularChecks::on)
.last()
.unwrap();
let next = args[i + 1];
tail.set_cdr(next);
if next.is_nil() {
args[i + 1] = tail.into();
}
}
val
}
include!(concat!(env!("OUT_DIR"), "/fns_exports.rs"));
|
concat
|
identifier_name
|
build.rs
|
extern crate build_utils;
extern crate protoc_grpcio;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use build_utils::BuildRoot;
fn main()
|
"google/rpc/status.proto",
"google/longrunning/operations.proto",
"google/protobuf/empty.proto",
],
&[
thirdpartyprotobuf.join("googleapis"),
thirdpartyprotobuf.join("standard"),
thirdpartyprotobuf.join("rust-protobuf"),
],
&gen_dir,
).expect("Failed to compile protos!");
let listing = gen_dir.read_dir().unwrap();
let mut pub_mod_stmts = listing
.filter_map(|d| {
let dirent = d.unwrap();
let file_name = dirent.file_name().into_string().unwrap();
match file_name.trim_right_matches(".rs") {
"mod" | ".gitignore" => None,
module_name => Some(format!("pub mod {};", module_name)),
}
})
.collect::<Vec<_>>();
pub_mod_stmts.sort();
let contents = format!(
"\
// This file is generated. Do not edit.
{}
",
pub_mod_stmts.join("\n")
);
File::create(gen_dir.join("mod.rs"))
.and_then(|mut f| f.write_all(contents.as_bytes()))
.expect("Failed to write mod.rs")
}
|
{
let build_root = BuildRoot::find().unwrap();
let thirdpartyprotobuf = build_root.join("3rdparty/protobuf");
println!(
"cargo:rerun-if-changed={}",
thirdpartyprotobuf.to_str().unwrap()
);
let gen_dir = PathBuf::from("src/gen");
// Re-gen if, say, someone does a git clean on the gen dir but not the target dir. This ensures
// generated sources are available for reading by programmers and tools like rustfmt alike.
println!("cargo:rerun-if-changed={}", gen_dir.to_str().unwrap());
protoc_grpcio::compile_grpc_protos(
&[
"google/devtools/remoteexecution/v1test/remote_execution.proto",
"google/bytestream/bytestream.proto",
"google/rpc/code.proto",
"google/rpc/error_details.proto",
|
identifier_body
|
build.rs
|
extern crate build_utils;
extern crate protoc_grpcio;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use build_utils::BuildRoot;
fn main() {
let build_root = BuildRoot::find().unwrap();
let thirdpartyprotobuf = build_root.join("3rdparty/protobuf");
println!(
"cargo:rerun-if-changed={}",
thirdpartyprotobuf.to_str().unwrap()
);
let gen_dir = PathBuf::from("src/gen");
// Re-gen if, say, someone does a git clean on the gen dir but not the target dir. This ensures
// generated sources are available for reading by programmers and tools like rustfmt alike.
println!("cargo:rerun-if-changed={}", gen_dir.to_str().unwrap());
protoc_grpcio::compile_grpc_protos(
&[
"google/devtools/remoteexecution/v1test/remote_execution.proto",
"google/bytestream/bytestream.proto",
"google/rpc/code.proto",
"google/rpc/error_details.proto",
"google/rpc/status.proto",
"google/longrunning/operations.proto",
"google/protobuf/empty.proto",
],
&[
thirdpartyprotobuf.join("googleapis"),
thirdpartyprotobuf.join("standard"),
thirdpartyprotobuf.join("rust-protobuf"),
],
&gen_dir,
).expect("Failed to compile protos!");
|
.filter_map(|d| {
let dirent = d.unwrap();
let file_name = dirent.file_name().into_string().unwrap();
match file_name.trim_right_matches(".rs") {
"mod" | ".gitignore" => None,
module_name => Some(format!("pub mod {};", module_name)),
}
})
.collect::<Vec<_>>();
pub_mod_stmts.sort();
let contents = format!(
"\
// This file is generated. Do not edit.
{}
",
pub_mod_stmts.join("\n")
);
File::create(gen_dir.join("mod.rs"))
.and_then(|mut f| f.write_all(contents.as_bytes()))
.expect("Failed to write mod.rs")
}
|
let listing = gen_dir.read_dir().unwrap();
let mut pub_mod_stmts = listing
|
random_line_split
|
build.rs
|
extern crate build_utils;
extern crate protoc_grpcio;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
use build_utils::BuildRoot;
fn
|
() {
let build_root = BuildRoot::find().unwrap();
let thirdpartyprotobuf = build_root.join("3rdparty/protobuf");
println!(
"cargo:rerun-if-changed={}",
thirdpartyprotobuf.to_str().unwrap()
);
let gen_dir = PathBuf::from("src/gen");
// Re-gen if, say, someone does a git clean on the gen dir but not the target dir. This ensures
// generated sources are available for reading by programmers and tools like rustfmt alike.
println!("cargo:rerun-if-changed={}", gen_dir.to_str().unwrap());
protoc_grpcio::compile_grpc_protos(
&[
"google/devtools/remoteexecution/v1test/remote_execution.proto",
"google/bytestream/bytestream.proto",
"google/rpc/code.proto",
"google/rpc/error_details.proto",
"google/rpc/status.proto",
"google/longrunning/operations.proto",
"google/protobuf/empty.proto",
],
&[
thirdpartyprotobuf.join("googleapis"),
thirdpartyprotobuf.join("standard"),
thirdpartyprotobuf.join("rust-protobuf"),
],
&gen_dir,
).expect("Failed to compile protos!");
let listing = gen_dir.read_dir().unwrap();
let mut pub_mod_stmts = listing
.filter_map(|d| {
let dirent = d.unwrap();
let file_name = dirent.file_name().into_string().unwrap();
match file_name.trim_right_matches(".rs") {
"mod" | ".gitignore" => None,
module_name => Some(format!("pub mod {};", module_name)),
}
})
.collect::<Vec<_>>();
pub_mod_stmts.sort();
let contents = format!(
"\
// This file is generated. Do not edit.
{}
",
pub_mod_stmts.join("\n")
);
File::create(gen_dir.join("mod.rs"))
.and_then(|mut f| f.write_all(contents.as_bytes()))
.expect("Failed to write mod.rs")
}
|
main
|
identifier_name
|
main.rs
|
extern crate rusty_machine as rm;
extern crate rand;
use rm::linalg::{Matrix, BaseMatrix};
use rand::{Rng, ThreadRng};
use std::collections::HashMap;
fn generate_arm(content_id: &usize, arms: &HashMap<usize, Arm>, feature_dim: &usize) -> Arm {
let arm: Arm;
if!arms.contains_key(content_id) {
arm = Arm::new(*feature_dim, *content_id);
}else{
arm = arms.get(content_id).unwrap().clone();
};
arm
}
#[derive(Debug, Clone)]
struct Arm{
content_id : usize,
alpha : f64,
norm_mean : Matrix<f64>,
cov_matrix: Matrix<f64>,
win_rate : f64,
win : f64,
lose : f64,
}
impl Arm {
fn new(feature_dim: usize, content_id: usize) -> Arm {
Arm {
content_id: content_id,
norm_mean: Matrix::zeros(1, feature_dim),
cov_matrix: Matrix::ones(1, feature_dim),
alpha: 0.0001,
win_rate: 0.,
win: 0.,
lose: 0.,
}
}
fn update(&mut self, features: Matrix<f64>, is_click: bool) {
// error of diag
self.cov_matrix += Matrix::new(self.cov_matrix.rows(),
self.cov_matrix.cols(),
(&features.transpose()*&features).diag());
let cost_of_click = is_click as i8 as f64;
let feat_mul_cost_vec = features.iter().map(|x| cost_of_click*x).collect::<Vec<f64>>();
let feat_mul_cost = Matrix::new(features.rows(), features.cols(), feat_mul_cost_vec);
self.norm_mean += feat_mul_cost;
if is_click{
self.win += 1.;
} else{
self.lose += 1.;
}
self.win_rate = &self.win/(&self.win + &self.lose);
}
fn predict(&self, features: &Matrix<f64>) -> f64 {
let one_div_cov_vec = self.cov_matrix.iter().map(|x| 1.0/x).collect::<Vec<f64>>();
let one_div_cov = Matrix::new(self.cov_matrix.rows(),
self.cov_matrix.cols(), one_div_cov_vec);
// Since the covariance matrix preserves only the diagonal components,
// it suffices to take the inverse matrix
let theta = &one_div_cov.elemul(&self.norm_mean);
// Again, the inverse matrix of the covariance matrix
// is computed by taking reciprocal
let mut tmp: f64 = ((features.elemul(&one_div_cov))*features.transpose()).data()[0];
tmp = &tmp.sqrt()*&self.alpha;
(theta*features.transpose()).data()[0] + &tmp
}
fn print_result(&self) {
println!("content_id:{}, total_num:{}, win_rate:{}",
&self.content_id, &self.win+&self.lose, &self.win_rate);
}
}
struct Viewer{
gender: String,
rng: ThreadRng,
}
impl Viewer {
fn new(gender: String) -> Viewer{
Viewer{
gender: gender,
rng: rand::thread_rng(),
}
}
fn view(&mut self, content_id: &usize) -> bool{
|
struct Rulet{
rng: ThreadRng,
}
impl Rulet{
fn new() -> Rulet {
Rulet{
rng: rand::thread_rng(),
}
}
fn generate_features(&mut self, viewer: &Viewer) -> Matrix<f64> {
let features = Some(&viewer.gender)
.and_then(|gender|
if gender=="man" {Some(Matrix::new(1,2,vec![1.,0.]))}
else {Some(Matrix::new(1,2,vec![0.,1.]))}
).unwrap();
features
}
fn generate_content(&mut self) -> usize{
self.rng.gen_range(0, 10)
}
fn generate_gender(&mut self) -> String {
if self.rng.next_f32() > 0.5{
return "man".to_string();
} else{
return "women".to_string();
}
}
}
fn main() {
/*Context is for men and women only
Men are easy to click on ads with id 5 or less
Women are easy to click on ads with id 6 or higher
*/
let feature_dim = 2;
let num_of_views = 10000;
let mut rulet = Rulet::new();
let mut content_id: usize;
let mut features: Matrix<f64>;
let mut is_clicked: bool;
let mut arms: HashMap<usize, Arm> = HashMap::new();
for _ in 0..num_of_views {
let mut viewer = Viewer::new(rulet.generate_gender());
content_id = rulet.generate_content();
features = rulet.generate_features(&viewer);
is_clicked = viewer.view(&content_id);
let mut arm = generate_arm(&content_id, &arms, &feature_dim);
arm.update(features, is_clicked);
arms.remove(&content_id);
arms.insert(content_id, arm);
}
let man_mat: Matrix<f64> = Matrix::new(1,2, vec![1.,0.]);
let woman_mat: Matrix<f64> = Matrix::new(1,2, vec![0.,1.]);
println!("print result======");
for (_, arm) in arms.iter() {
&arm.print_result();
println!("Click rate when men browse: {}", &arm.predict(&man_mat) );
println!("Click rate when women browse: {}", &arm.predict(&woman_mat) );
}
}
|
if &self.gender=="man" {
// Men are easy to click on ads with id 5 or less
if *content_id < 6 {
return Some(self.rng.next_f32()).and_then(|n| if n>0.3 {Some(true)} else {Some(false)}).unwrap();
} else{
return Some(self.rng.next_f32()).and_then(|n| if n>0.7 {Some(true)} else {Some(false)}).unwrap();
}
} else {
// Women are easy to click on ads with id 6 or higher
if *content_id > 5{
return Some(self.rng.next_f32()).and_then(|n| if n>0.3 {Some(true)} else {Some(false)}).unwrap();
} else {
return Some(self.rng.next_f32()).and_then(|n| if n>0.7 {Some(true)} else {Some(false)}).unwrap();
}
}
}
}
|
identifier_body
|
main.rs
|
extern crate rusty_machine as rm;
extern crate rand;
use rm::linalg::{Matrix, BaseMatrix};
use rand::{Rng, ThreadRng};
use std::collections::HashMap;
fn generate_arm(content_id: &usize, arms: &HashMap<usize, Arm>, feature_dim: &usize) -> Arm {
let arm: Arm;
if!arms.contains_key(content_id) {
arm = Arm::new(*feature_dim, *content_id);
}else{
arm = arms.get(content_id).unwrap().clone();
};
arm
}
#[derive(Debug, Clone)]
struct Arm{
content_id : usize,
alpha : f64,
norm_mean : Matrix<f64>,
cov_matrix: Matrix<f64>,
win_rate : f64,
win : f64,
lose : f64,
}
impl Arm {
fn new(feature_dim: usize, content_id: usize) -> Arm {
Arm {
content_id: content_id,
norm_mean: Matrix::zeros(1, feature_dim),
cov_matrix: Matrix::ones(1, feature_dim),
alpha: 0.0001,
win_rate: 0.,
win: 0.,
lose: 0.,
}
}
fn update(&mut self, features: Matrix<f64>, is_click: bool) {
// error of diag
self.cov_matrix += Matrix::new(self.cov_matrix.rows(),
self.cov_matrix.cols(),
(&features.transpose()*&features).diag());
let cost_of_click = is_click as i8 as f64;
let feat_mul_cost_vec = features.iter().map(|x| cost_of_click*x).collect::<Vec<f64>>();
let feat_mul_cost = Matrix::new(features.rows(), features.cols(), feat_mul_cost_vec);
self.norm_mean += feat_mul_cost;
if is_click{
self.win += 1.;
} else{
self.lose += 1.;
}
self.win_rate = &self.win/(&self.win + &self.lose);
}
fn predict(&self, features: &Matrix<f64>) -> f64 {
let one_div_cov_vec = self.cov_matrix.iter().map(|x| 1.0/x).collect::<Vec<f64>>();
let one_div_cov = Matrix::new(self.cov_matrix.rows(),
self.cov_matrix.cols(), one_div_cov_vec);
// Since the covariance matrix preserves only the diagonal components,
// it suffices to take the inverse matrix
let theta = &one_div_cov.elemul(&self.norm_mean);
// Again, the inverse matrix of the covariance matrix
// is computed by taking reciprocal
let mut tmp: f64 = ((features.elemul(&one_div_cov))*features.transpose()).data()[0];
tmp = &tmp.sqrt()*&self.alpha;
(theta*features.transpose()).data()[0] + &tmp
}
fn print_result(&self) {
println!("content_id:{}, total_num:{}, win_rate:{}",
&self.content_id, &self.win+&self.lose, &self.win_rate);
}
}
struct Viewer{
gender: String,
rng: ThreadRng,
}
impl Viewer {
fn new(gender: String) -> Viewer{
Viewer{
gender: gender,
rng: rand::thread_rng(),
}
}
fn view(&mut self, content_id: &usize) -> bool{
if &self.gender=="man" {
// Men are easy to click on ads with id 5 or less
if *content_id < 6 {
return Some(self.rng.next_f32()).and_then(|n| if n>0.3 {Some(true)} else {Some(false)}).unwrap();
} else{
return Some(self.rng.next_f32()).and_then(|n| if n>0.7 {Some(true)} else {Some(false)}).unwrap();
}
} else {
// Women are easy to click on ads with id 6 or higher
if *content_id > 5{
return Some(self.rng.next_f32()).and_then(|n| if n>0.3 {Some(true)} else {Some(false)}).unwrap();
} else {
return Some(self.rng.next_f32()).and_then(|n| if n>0.7 {Some(true)} else {Some(false)}).unwrap();
}
}
}
}
struct Rulet{
rng: ThreadRng,
}
impl Rulet{
fn new() -> Rulet {
Rulet{
rng: rand::thread_rng(),
}
}
fn generate_features(&mut self, viewer: &Viewer) -> Matrix<f64> {
let features = Some(&viewer.gender)
.and_then(|gender|
if gender=="man" {Some(Matrix::new(1,2,vec![1.,0.]))}
else {Some(Matrix::new(1,2,vec![0.,1.]))}
).unwrap();
features
}
fn generate_content(&mut self) -> usize{
self.rng.gen_range(0, 10)
}
fn generate_gender(&mut self) -> String {
if self.rng.next_f32() > 0.5{
return "man".to_string();
} else{
|
}
}
fn main() {
/*Context is for men and women only
Men are easy to click on ads with id 5 or less
Women are easy to click on ads with id 6 or higher
*/
let feature_dim = 2;
let num_of_views = 10000;
let mut rulet = Rulet::new();
let mut content_id: usize;
let mut features: Matrix<f64>;
let mut is_clicked: bool;
let mut arms: HashMap<usize, Arm> = HashMap::new();
for _ in 0..num_of_views {
let mut viewer = Viewer::new(rulet.generate_gender());
content_id = rulet.generate_content();
features = rulet.generate_features(&viewer);
is_clicked = viewer.view(&content_id);
let mut arm = generate_arm(&content_id, &arms, &feature_dim);
arm.update(features, is_clicked);
arms.remove(&content_id);
arms.insert(content_id, arm);
}
let man_mat: Matrix<f64> = Matrix::new(1,2, vec![1.,0.]);
let woman_mat: Matrix<f64> = Matrix::new(1,2, vec![0.,1.]);
println!("print result======");
for (_, arm) in arms.iter() {
&arm.print_result();
println!("Click rate when men browse: {}", &arm.predict(&man_mat) );
println!("Click rate when women browse: {}", &arm.predict(&woman_mat) );
}
}
|
return "women".to_string();
}
|
conditional_block
|
main.rs
|
extern crate rusty_machine as rm;
extern crate rand;
use rm::linalg::{Matrix, BaseMatrix};
use rand::{Rng, ThreadRng};
use std::collections::HashMap;
fn generate_arm(content_id: &usize, arms: &HashMap<usize, Arm>, feature_dim: &usize) -> Arm {
let arm: Arm;
if!arms.contains_key(content_id) {
arm = Arm::new(*feature_dim, *content_id);
}else{
arm = arms.get(content_id).unwrap().clone();
};
arm
}
#[derive(Debug, Clone)]
struct Arm{
content_id : usize,
alpha : f64,
norm_mean : Matrix<f64>,
cov_matrix: Matrix<f64>,
win_rate : f64,
win : f64,
lose : f64,
}
impl Arm {
fn new(feature_dim: usize, content_id: usize) -> Arm {
Arm {
content_id: content_id,
norm_mean: Matrix::zeros(1, feature_dim),
cov_matrix: Matrix::ones(1, feature_dim),
alpha: 0.0001,
win_rate: 0.,
win: 0.,
lose: 0.,
}
}
fn update(&mut self, features: Matrix<f64>, is_click: bool) {
// error of diag
self.cov_matrix += Matrix::new(self.cov_matrix.rows(),
self.cov_matrix.cols(),
(&features.transpose()*&features).diag());
let cost_of_click = is_click as i8 as f64;
let feat_mul_cost_vec = features.iter().map(|x| cost_of_click*x).collect::<Vec<f64>>();
let feat_mul_cost = Matrix::new(features.rows(), features.cols(), feat_mul_cost_vec);
self.norm_mean += feat_mul_cost;
if is_click{
self.win += 1.;
} else{
self.lose += 1.;
}
self.win_rate = &self.win/(&self.win + &self.lose);
}
fn predict(&self, features: &Matrix<f64>) -> f64 {
let one_div_cov_vec = self.cov_matrix.iter().map(|x| 1.0/x).collect::<Vec<f64>>();
let one_div_cov = Matrix::new(self.cov_matrix.rows(),
|
let theta = &one_div_cov.elemul(&self.norm_mean);
// Again, the inverse matrix of the covariance matrix
// is computed by taking reciprocal
let mut tmp: f64 = ((features.elemul(&one_div_cov))*features.transpose()).data()[0];
tmp = &tmp.sqrt()*&self.alpha;
(theta*features.transpose()).data()[0] + &tmp
}
fn print_result(&self) {
println!("content_id:{}, total_num:{}, win_rate:{}",
&self.content_id, &self.win+&self.lose, &self.win_rate);
}
}
struct Viewer{
gender: String,
rng: ThreadRng,
}
impl Viewer {
fn new(gender: String) -> Viewer{
Viewer{
gender: gender,
rng: rand::thread_rng(),
}
}
fn view(&mut self, content_id: &usize) -> bool{
if &self.gender=="man" {
// Men are easy to click on ads with id 5 or less
if *content_id < 6 {
return Some(self.rng.next_f32()).and_then(|n| if n>0.3 {Some(true)} else {Some(false)}).unwrap();
} else{
return Some(self.rng.next_f32()).and_then(|n| if n>0.7 {Some(true)} else {Some(false)}).unwrap();
}
} else {
// Women are easy to click on ads with id 6 or higher
if *content_id > 5{
return Some(self.rng.next_f32()).and_then(|n| if n>0.3 {Some(true)} else {Some(false)}).unwrap();
} else {
return Some(self.rng.next_f32()).and_then(|n| if n>0.7 {Some(true)} else {Some(false)}).unwrap();
}
}
}
}
struct Rulet{
rng: ThreadRng,
}
impl Rulet{
fn new() -> Rulet {
Rulet{
rng: rand::thread_rng(),
}
}
fn generate_features(&mut self, viewer: &Viewer) -> Matrix<f64> {
let features = Some(&viewer.gender)
.and_then(|gender|
if gender=="man" {Some(Matrix::new(1,2,vec![1.,0.]))}
else {Some(Matrix::new(1,2,vec![0.,1.]))}
).unwrap();
features
}
fn generate_content(&mut self) -> usize{
self.rng.gen_range(0, 10)
}
fn generate_gender(&mut self) -> String {
if self.rng.next_f32() > 0.5{
return "man".to_string();
} else{
return "women".to_string();
}
}
}
fn main() {
/*Context is for men and women only
Men are easy to click on ads with id 5 or less
Women are easy to click on ads with id 6 or higher
*/
let feature_dim = 2;
let num_of_views = 10000;
let mut rulet = Rulet::new();
let mut content_id: usize;
let mut features: Matrix<f64>;
let mut is_clicked: bool;
let mut arms: HashMap<usize, Arm> = HashMap::new();
for _ in 0..num_of_views {
let mut viewer = Viewer::new(rulet.generate_gender());
content_id = rulet.generate_content();
features = rulet.generate_features(&viewer);
is_clicked = viewer.view(&content_id);
let mut arm = generate_arm(&content_id, &arms, &feature_dim);
arm.update(features, is_clicked);
arms.remove(&content_id);
arms.insert(content_id, arm);
}
let man_mat: Matrix<f64> = Matrix::new(1,2, vec![1.,0.]);
let woman_mat: Matrix<f64> = Matrix::new(1,2, vec![0.,1.]);
println!("print result======");
for (_, arm) in arms.iter() {
&arm.print_result();
println!("Click rate when men browse: {}", &arm.predict(&man_mat) );
println!("Click rate when women browse: {}", &arm.predict(&woman_mat) );
}
}
|
self.cov_matrix.cols(), one_div_cov_vec);
// Since the covariance matrix preserves only the diagonal components,
// it suffices to take the inverse matrix
|
random_line_split
|
main.rs
|
extern crate rusty_machine as rm;
extern crate rand;
use rm::linalg::{Matrix, BaseMatrix};
use rand::{Rng, ThreadRng};
use std::collections::HashMap;
fn generate_arm(content_id: &usize, arms: &HashMap<usize, Arm>, feature_dim: &usize) -> Arm {
let arm: Arm;
if!arms.contains_key(content_id) {
arm = Arm::new(*feature_dim, *content_id);
}else{
arm = arms.get(content_id).unwrap().clone();
};
arm
}
#[derive(Debug, Clone)]
struct Arm{
content_id : usize,
alpha : f64,
norm_mean : Matrix<f64>,
cov_matrix: Matrix<f64>,
win_rate : f64,
win : f64,
lose : f64,
}
impl Arm {
fn new(feature_dim: usize, content_id: usize) -> Arm {
Arm {
content_id: content_id,
norm_mean: Matrix::zeros(1, feature_dim),
cov_matrix: Matrix::ones(1, feature_dim),
alpha: 0.0001,
win_rate: 0.,
win: 0.,
lose: 0.,
}
}
fn update(&mut self, features: Matrix<f64>, is_click: bool) {
// error of diag
self.cov_matrix += Matrix::new(self.cov_matrix.rows(),
self.cov_matrix.cols(),
(&features.transpose()*&features).diag());
let cost_of_click = is_click as i8 as f64;
let feat_mul_cost_vec = features.iter().map(|x| cost_of_click*x).collect::<Vec<f64>>();
let feat_mul_cost = Matrix::new(features.rows(), features.cols(), feat_mul_cost_vec);
self.norm_mean += feat_mul_cost;
if is_click{
self.win += 1.;
} else{
self.lose += 1.;
}
self.win_rate = &self.win/(&self.win + &self.lose);
}
fn predict(&self, features: &Matrix<f64>) -> f64 {
let one_div_cov_vec = self.cov_matrix.iter().map(|x| 1.0/x).collect::<Vec<f64>>();
let one_div_cov = Matrix::new(self.cov_matrix.rows(),
self.cov_matrix.cols(), one_div_cov_vec);
// Since the covariance matrix preserves only the diagonal components,
// it suffices to take the inverse matrix
let theta = &one_div_cov.elemul(&self.norm_mean);
// Again, the inverse matrix of the covariance matrix
// is computed by taking reciprocal
let mut tmp: f64 = ((features.elemul(&one_div_cov))*features.transpose()).data()[0];
tmp = &tmp.sqrt()*&self.alpha;
(theta*features.transpose()).data()[0] + &tmp
}
fn print_result(&self) {
println!("content_id:{}, total_num:{}, win_rate:{}",
&self.content_id, &self.win+&self.lose, &self.win_rate);
}
}
struct Viewer{
gender: String,
rng: ThreadRng,
}
impl Viewer {
fn new(gender: String) -> Viewer{
Viewer{
gender: gender,
rng: rand::thread_rng(),
}
}
fn view(&mut self, content_id: &usize) -> bool{
if &self.gender=="man" {
// Men are easy to click on ads with id 5 or less
if *content_id < 6 {
return Some(self.rng.next_f32()).and_then(|n| if n>0.3 {Some(true)} else {Some(false)}).unwrap();
} else{
return Some(self.rng.next_f32()).and_then(|n| if n>0.7 {Some(true)} else {Some(false)}).unwrap();
}
} else {
// Women are easy to click on ads with id 6 or higher
if *content_id > 5{
return Some(self.rng.next_f32()).and_then(|n| if n>0.3 {Some(true)} else {Some(false)}).unwrap();
} else {
return Some(self.rng.next_f32()).and_then(|n| if n>0.7 {Some(true)} else {Some(false)}).unwrap();
}
}
}
}
struct Rule
|
rng: ThreadRng,
}
impl Rulet{
fn new() -> Rulet {
Rulet{
rng: rand::thread_rng(),
}
}
fn generate_features(&mut self, viewer: &Viewer) -> Matrix<f64> {
let features = Some(&viewer.gender)
.and_then(|gender|
if gender=="man" {Some(Matrix::new(1,2,vec![1.,0.]))}
else {Some(Matrix::new(1,2,vec![0.,1.]))}
).unwrap();
features
}
fn generate_content(&mut self) -> usize{
self.rng.gen_range(0, 10)
}
fn generate_gender(&mut self) -> String {
if self.rng.next_f32() > 0.5{
return "man".to_string();
} else{
return "women".to_string();
}
}
}
fn main() {
/*Context is for men and women only
Men are easy to click on ads with id 5 or less
Women are easy to click on ads with id 6 or higher
*/
let feature_dim = 2;
let num_of_views = 10000;
let mut rulet = Rulet::new();
let mut content_id: usize;
let mut features: Matrix<f64>;
let mut is_clicked: bool;
let mut arms: HashMap<usize, Arm> = HashMap::new();
for _ in 0..num_of_views {
let mut viewer = Viewer::new(rulet.generate_gender());
content_id = rulet.generate_content();
features = rulet.generate_features(&viewer);
is_clicked = viewer.view(&content_id);
let mut arm = generate_arm(&content_id, &arms, &feature_dim);
arm.update(features, is_clicked);
arms.remove(&content_id);
arms.insert(content_id, arm);
}
let man_mat: Matrix<f64> = Matrix::new(1,2, vec![1.,0.]);
let woman_mat: Matrix<f64> = Matrix::new(1,2, vec![0.,1.]);
println!("print result======");
for (_, arm) in arms.iter() {
&arm.print_result();
println!("Click rate when men browse: {}", &arm.predict(&man_mat) );
println!("Click rate when women browse: {}", &arm.predict(&woman_mat) );
}
}
|
t{
|
identifier_name
|
temporary_roles.rs
|
use crate::database::{
schema::*,
models::U64,
};
insertable! {
#[derive(Debug, Queryable, Identifiable, AsChangeset)]
pub struct TemporaryRole,
#[derive(Debug, Insertable)]
#[table_name = "temporary_roles"]
pub struct NewTemporaryRole {
pub guild_id: U64,
pub user_id: U64,
pub role_id: U64,
pub message_id: U64,
pub channel_id: Option<i64>,
pub messages: Option<i32>,
pub expires_on: Option<i64>,
}
}
impl NewTemporaryRole {
pub fn new(guild_id: u64, user_id: u64, role_id: u64, message_id: u64, channel_id: Option<u64>, messages: Option<i32>, expires_on: Option<i64>) -> Self
|
}
|
{
NewTemporaryRole {
guild_id: guild_id.into(),
user_id: user_id.into(),
role_id: role_id.into(),
message_id: message_id.into(),
channel_id: channel_id.map(|x| x as i64),
messages,
expires_on,
}
}
|
identifier_body
|
temporary_roles.rs
|
use crate::database::{
schema::*,
models::U64,
};
insertable! {
#[derive(Debug, Queryable, Identifiable, AsChangeset)]
pub struct TemporaryRole,
#[derive(Debug, Insertable)]
#[table_name = "temporary_roles"]
pub struct NewTemporaryRole {
pub guild_id: U64,
pub user_id: U64,
pub role_id: U64,
pub message_id: U64,
pub channel_id: Option<i64>,
pub messages: Option<i32>,
pub expires_on: Option<i64>,
}
}
impl NewTemporaryRole {
pub fn
|
(guild_id: u64, user_id: u64, role_id: u64, message_id: u64, channel_id: Option<u64>, messages: Option<i32>, expires_on: Option<i64>) -> Self {
NewTemporaryRole {
guild_id: guild_id.into(),
user_id: user_id.into(),
role_id: role_id.into(),
message_id: message_id.into(),
channel_id: channel_id.map(|x| x as i64),
messages,
expires_on,
}
}
}
|
new
|
identifier_name
|
temporary_roles.rs
|
use crate::database::{
schema::*,
models::U64,
};
insertable! {
#[derive(Debug, Queryable, Identifiable, AsChangeset)]
pub struct TemporaryRole,
#[derive(Debug, Insertable)]
#[table_name = "temporary_roles"]
pub struct NewTemporaryRole {
pub guild_id: U64,
pub user_id: U64,
pub role_id: U64,
pub message_id: U64,
pub channel_id: Option<i64>,
pub messages: Option<i32>,
pub expires_on: Option<i64>,
}
}
|
impl NewTemporaryRole {
pub fn new(guild_id: u64, user_id: u64, role_id: u64, message_id: u64, channel_id: Option<u64>, messages: Option<i32>, expires_on: Option<i64>) -> Self {
NewTemporaryRole {
guild_id: guild_id.into(),
user_id: user_id.into(),
role_id: role_id.into(),
message_id: message_id.into(),
channel_id: channel_id.map(|x| x as i64),
messages,
expires_on,
}
}
}
|
random_line_split
|
|
time.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ipc_channel::ipc;
use profile::time;
use profile_traits::ipc as ProfiledIpc;
use profile_traits::time::{ProfilerCategory, ProfilerData, ProfilerMsg};
use servo_config::opts::OutputOptions;
use std::thread;
use std::time::Duration;
#[test]
fn time_profiler_smoke_test() {
let chan = time::Profiler::create(&None, None);
assert!(true, "Can create the profiler thread");
let (ipcchan, _ipcport) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Exit(ipcchan));
assert!(true, "Can tell the profiler thread to exit");
}
#[test]
fn time_profiler_stats_test() {
let even_data = vec![1.234, 3.24567, 3.54578, 5.0, 5.324, 7.345,
9.2345, 10.2342345, 13.2599, 15.0];
let (even_mean, even_median, even_min, even_max) = time::Profiler::get_statistics(&even_data);
assert_eq!(7.34230845, even_mean);
assert_eq!(7.345, even_median);
assert_eq!(1.234, even_min);
assert_eq!(15.0, even_max);
let odd_data = vec![1.234, 3.24567, 3.54578, 5.0, 5.324, 7.345,
9.2345, 10.2342345, 13.2599];
let (odd_mean, odd_median, odd_min, odd_max) = time::Profiler::get_statistics(&odd_data);
assert_eq!(6.491453833333334, odd_mean);
assert_eq!(5.324, odd_median);
assert_eq!(1.234, odd_min);
assert_eq!(13.2599, odd_max);
}
#[test]
fn channel_profiler_test() {
let chan = time::Profiler::create(&Some(OutputOptions::Stdout(5.0)), None);
let (profiled_sender, profiled_receiver) = ProfiledIpc::channel(chan.clone()).unwrap();
thread::spawn(move || {
thread::sleep(Duration::from_secs(2));
profiled_sender.send(43).unwrap();
});
let val_profile_receiver = profiled_receiver.recv().unwrap();
assert_eq!(val_profile_receiver, 43);
let (sender, receiver) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Get((ProfilerCategory::IpcReceiver, None), sender.clone()));
match receiver.recv().unwrap() {
// asserts that the time spent in the sleeping thread is more than 1500 milliseconds
ProfilerData::Record(time_data) => assert!(time_data[0] > 1.5e3),
ProfilerData::NoRecords => assert!(false),
};
}
#[test]
fn bytes_channel_profiler_test() {
let chan = time::Profiler::create(&Some(OutputOptions::Stdout(5.0)), None);
let (profiled_sender, profiled_receiver) = ProfiledIpc::bytes_channel(chan.clone()).unwrap();
thread::spawn(move || {
thread::sleep(Duration::from_secs(2));
profiled_sender.send(&[1, 2, 3]).unwrap();
});
let val_profile_receiver = profiled_receiver.recv().unwrap();
assert_eq!(val_profile_receiver, [1, 2, 3]);
let (sender, receiver) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Get((ProfilerCategory::IpcBytesReceiver, None), sender.clone()));
match receiver.recv().unwrap() {
// asserts that the time spent in the sleeping thread is more than 1500 milliseconds
ProfilerData::Record(time_data) => assert!(time_data[0] > 1.5e3),
ProfilerData::NoRecords => assert!(false),
};
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
|
time::Profiler::get_statistics(&unsorted_data);
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn time_profiler_data_len_zero() {
let zero_data = vec![];
time::Profiler::get_statistics(&zero_data);
}
|
fn time_profiler_unsorted_stats_test() {
let unsorted_data = vec![5.0, 7.5, 1.0, 8.9];
|
random_line_split
|
time.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ipc_channel::ipc;
use profile::time;
use profile_traits::ipc as ProfiledIpc;
use profile_traits::time::{ProfilerCategory, ProfilerData, ProfilerMsg};
use servo_config::opts::OutputOptions;
use std::thread;
use std::time::Duration;
#[test]
fn time_profiler_smoke_test() {
let chan = time::Profiler::create(&None, None);
assert!(true, "Can create the profiler thread");
let (ipcchan, _ipcport) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Exit(ipcchan));
assert!(true, "Can tell the profiler thread to exit");
}
#[test]
fn time_profiler_stats_test()
|
#[test]
fn channel_profiler_test() {
let chan = time::Profiler::create(&Some(OutputOptions::Stdout(5.0)), None);
let (profiled_sender, profiled_receiver) = ProfiledIpc::channel(chan.clone()).unwrap();
thread::spawn(move || {
thread::sleep(Duration::from_secs(2));
profiled_sender.send(43).unwrap();
});
let val_profile_receiver = profiled_receiver.recv().unwrap();
assert_eq!(val_profile_receiver, 43);
let (sender, receiver) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Get((ProfilerCategory::IpcReceiver, None), sender.clone()));
match receiver.recv().unwrap() {
// asserts that the time spent in the sleeping thread is more than 1500 milliseconds
ProfilerData::Record(time_data) => assert!(time_data[0] > 1.5e3),
ProfilerData::NoRecords => assert!(false),
};
}
#[test]
fn bytes_channel_profiler_test() {
let chan = time::Profiler::create(&Some(OutputOptions::Stdout(5.0)), None);
let (profiled_sender, profiled_receiver) = ProfiledIpc::bytes_channel(chan.clone()).unwrap();
thread::spawn(move || {
thread::sleep(Duration::from_secs(2));
profiled_sender.send(&[1, 2, 3]).unwrap();
});
let val_profile_receiver = profiled_receiver.recv().unwrap();
assert_eq!(val_profile_receiver, [1, 2, 3]);
let (sender, receiver) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Get((ProfilerCategory::IpcBytesReceiver, None), sender.clone()));
match receiver.recv().unwrap() {
// asserts that the time spent in the sleeping thread is more than 1500 milliseconds
ProfilerData::Record(time_data) => assert!(time_data[0] > 1.5e3),
ProfilerData::NoRecords => assert!(false),
};
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn time_profiler_unsorted_stats_test() {
let unsorted_data = vec![5.0, 7.5, 1.0, 8.9];
time::Profiler::get_statistics(&unsorted_data);
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn time_profiler_data_len_zero() {
let zero_data = vec![];
time::Profiler::get_statistics(&zero_data);
}
|
{
let even_data = vec![1.234, 3.24567, 3.54578, 5.0, 5.324, 7.345,
9.2345, 10.2342345, 13.2599, 15.0];
let (even_mean, even_median, even_min, even_max) = time::Profiler::get_statistics(&even_data);
assert_eq!(7.34230845, even_mean);
assert_eq!(7.345, even_median);
assert_eq!(1.234, even_min);
assert_eq!(15.0, even_max);
let odd_data = vec![1.234, 3.24567, 3.54578, 5.0, 5.324, 7.345,
9.2345, 10.2342345, 13.2599];
let (odd_mean, odd_median, odd_min, odd_max) = time::Profiler::get_statistics(&odd_data);
assert_eq!(6.491453833333334, odd_mean);
assert_eq!(5.324, odd_median);
assert_eq!(1.234, odd_min);
assert_eq!(13.2599, odd_max);
}
|
identifier_body
|
time.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ipc_channel::ipc;
use profile::time;
use profile_traits::ipc as ProfiledIpc;
use profile_traits::time::{ProfilerCategory, ProfilerData, ProfilerMsg};
use servo_config::opts::OutputOptions;
use std::thread;
use std::time::Duration;
#[test]
fn
|
() {
let chan = time::Profiler::create(&None, None);
assert!(true, "Can create the profiler thread");
let (ipcchan, _ipcport) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Exit(ipcchan));
assert!(true, "Can tell the profiler thread to exit");
}
#[test]
fn time_profiler_stats_test() {
let even_data = vec![1.234, 3.24567, 3.54578, 5.0, 5.324, 7.345,
9.2345, 10.2342345, 13.2599, 15.0];
let (even_mean, even_median, even_min, even_max) = time::Profiler::get_statistics(&even_data);
assert_eq!(7.34230845, even_mean);
assert_eq!(7.345, even_median);
assert_eq!(1.234, even_min);
assert_eq!(15.0, even_max);
let odd_data = vec![1.234, 3.24567, 3.54578, 5.0, 5.324, 7.345,
9.2345, 10.2342345, 13.2599];
let (odd_mean, odd_median, odd_min, odd_max) = time::Profiler::get_statistics(&odd_data);
assert_eq!(6.491453833333334, odd_mean);
assert_eq!(5.324, odd_median);
assert_eq!(1.234, odd_min);
assert_eq!(13.2599, odd_max);
}
#[test]
fn channel_profiler_test() {
let chan = time::Profiler::create(&Some(OutputOptions::Stdout(5.0)), None);
let (profiled_sender, profiled_receiver) = ProfiledIpc::channel(chan.clone()).unwrap();
thread::spawn(move || {
thread::sleep(Duration::from_secs(2));
profiled_sender.send(43).unwrap();
});
let val_profile_receiver = profiled_receiver.recv().unwrap();
assert_eq!(val_profile_receiver, 43);
let (sender, receiver) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Get((ProfilerCategory::IpcReceiver, None), sender.clone()));
match receiver.recv().unwrap() {
// asserts that the time spent in the sleeping thread is more than 1500 milliseconds
ProfilerData::Record(time_data) => assert!(time_data[0] > 1.5e3),
ProfilerData::NoRecords => assert!(false),
};
}
#[test]
fn bytes_channel_profiler_test() {
let chan = time::Profiler::create(&Some(OutputOptions::Stdout(5.0)), None);
let (profiled_sender, profiled_receiver) = ProfiledIpc::bytes_channel(chan.clone()).unwrap();
thread::spawn(move || {
thread::sleep(Duration::from_secs(2));
profiled_sender.send(&[1, 2, 3]).unwrap();
});
let val_profile_receiver = profiled_receiver.recv().unwrap();
assert_eq!(val_profile_receiver, [1, 2, 3]);
let (sender, receiver) = ipc::channel().unwrap();
chan.send(ProfilerMsg::Get((ProfilerCategory::IpcBytesReceiver, None), sender.clone()));
match receiver.recv().unwrap() {
// asserts that the time spent in the sleeping thread is more than 1500 milliseconds
ProfilerData::Record(time_data) => assert!(time_data[0] > 1.5e3),
ProfilerData::NoRecords => assert!(false),
};
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn time_profiler_unsorted_stats_test() {
let unsorted_data = vec![5.0, 7.5, 1.0, 8.9];
time::Profiler::get_statistics(&unsorted_data);
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn time_profiler_data_len_zero() {
let zero_data = vec![];
time::Profiler::get_statistics(&zero_data);
}
|
time_profiler_smoke_test
|
identifier_name
|
non_blocking.rs
|
//! A demonstration of constructing and using a non-blocking stream.
//!
//! Audio from the default input device is passed directly to the default output device in a duplex
//! stream, so beware of feedback!
extern crate portaudio;
use portaudio as pa;
const SAMPLE_RATE: f64 = 44_100.0;
const FRAMES: u32 = 256;
const CHANNELS: i32 = 2;
const INTERLEAVED: bool = true;
fn main() {
run().unwrap()
}
fn run() -> Result<(), pa::Error> {
let pa = try!(pa::PortAudio::new());
println!("PortAudio:");
println!("version: {}", pa.version());
println!("version text: {:?}", pa.version_text());
println!("host count: {}", try!(pa.host_api_count()));
let default_host = try!(pa.default_host_api());
println!("default host: {:#?}", pa.host_api_info(default_host));
let def_input = try!(pa.default_input_device());
let input_info = try!(pa.device_info(def_input));
println!("Default input device info: {:#?}", &input_info);
// Construct the input stream parameters.
let latency = input_info.default_low_input_latency;
let input_params = pa::StreamParameters::<f32>::new(def_input, CHANNELS, INTERLEAVED, latency);
let def_output = try!(pa.default_output_device());
let output_info = try!(pa.device_info(def_output));
println!("Default output device info: {:#?}", &output_info);
// Construct the output stream parameters.
let latency = output_info.default_low_output_latency;
let output_params = pa::StreamParameters::new(def_output, CHANNELS, INTERLEAVED, latency);
// Check that the stream format is supported.
try!(pa.is_duplex_format_supported(input_params, output_params, SAMPLE_RATE));
// Construct the settings with which we'll open our duplex stream.
let settings = pa::DuplexStreamSettings::new(input_params, output_params, SAMPLE_RATE, FRAMES);
// Once the countdown reaches 0 we'll close the stream.
let mut count_down = 3.0;
// Keep track of the last `current_time` so we can calculate the delta time.
let mut maybe_last_time = None;
// We'll use this channel to send the count_down to the main thread for fun.
let (sender, receiver) = ::std::sync::mpsc::channel();
// A callback to pass to the non-blocking stream.
let callback = move |pa::DuplexStreamCallbackArgs { in_buffer, out_buffer, frames, time,.. }| {
let current_time = time.current;
let prev_time = maybe_last_time.unwrap_or(current_time);
let dt = current_time - prev_time;
count_down -= dt;
maybe_last_time = Some(current_time);
assert!(frames == FRAMES as usize);
sender.send(count_down).ok();
// Pass the input straight to the output - BEWARE OF FEEDBACK!
for (output_sample, input_sample) in out_buffer.iter_mut().zip(in_buffer.iter()) {
*output_sample = *input_sample;
}
if count_down > 0.0 { pa::Continue } else
|
};
// Construct a stream with input and output sample types of f32.
let mut stream = try!(pa.open_non_blocking_stream(settings, callback));
try!(stream.start());
// Loop while the non-blocking stream is active.
while let true = try!(stream.is_active()) {
// Do some stuff!
while let Ok(count_down) = receiver.try_recv() {
println!("count_down: {:?}", count_down);
}
}
try!(stream.stop());
Ok(())
}
|
{ pa::Complete }
|
conditional_block
|
non_blocking.rs
|
//! A demonstration of constructing and using a non-blocking stream.
//!
//! Audio from the default input device is passed directly to the default output device in a duplex
//! stream, so beware of feedback!
extern crate portaudio;
use portaudio as pa;
const SAMPLE_RATE: f64 = 44_100.0;
const FRAMES: u32 = 256;
const CHANNELS: i32 = 2;
const INTERLEAVED: bool = true;
fn main() {
run().unwrap()
}
fn run() -> Result<(), pa::Error> {
let pa = try!(pa::PortAudio::new());
println!("PortAudio:");
println!("version: {}", pa.version());
println!("version text: {:?}", pa.version_text());
println!("host count: {}", try!(pa.host_api_count()));
let default_host = try!(pa.default_host_api());
println!("default host: {:#?}", pa.host_api_info(default_host));
let def_input = try!(pa.default_input_device());
let input_info = try!(pa.device_info(def_input));
println!("Default input device info: {:#?}", &input_info);
// Construct the input stream parameters.
let latency = input_info.default_low_input_latency;
let input_params = pa::StreamParameters::<f32>::new(def_input, CHANNELS, INTERLEAVED, latency);
let def_output = try!(pa.default_output_device());
|
let latency = output_info.default_low_output_latency;
let output_params = pa::StreamParameters::new(def_output, CHANNELS, INTERLEAVED, latency);
// Check that the stream format is supported.
try!(pa.is_duplex_format_supported(input_params, output_params, SAMPLE_RATE));
// Construct the settings with which we'll open our duplex stream.
let settings = pa::DuplexStreamSettings::new(input_params, output_params, SAMPLE_RATE, FRAMES);
// Once the countdown reaches 0 we'll close the stream.
let mut count_down = 3.0;
// Keep track of the last `current_time` so we can calculate the delta time.
let mut maybe_last_time = None;
// We'll use this channel to send the count_down to the main thread for fun.
let (sender, receiver) = ::std::sync::mpsc::channel();
// A callback to pass to the non-blocking stream.
let callback = move |pa::DuplexStreamCallbackArgs { in_buffer, out_buffer, frames, time,.. }| {
let current_time = time.current;
let prev_time = maybe_last_time.unwrap_or(current_time);
let dt = current_time - prev_time;
count_down -= dt;
maybe_last_time = Some(current_time);
assert!(frames == FRAMES as usize);
sender.send(count_down).ok();
// Pass the input straight to the output - BEWARE OF FEEDBACK!
for (output_sample, input_sample) in out_buffer.iter_mut().zip(in_buffer.iter()) {
*output_sample = *input_sample;
}
if count_down > 0.0 { pa::Continue } else { pa::Complete }
};
// Construct a stream with input and output sample types of f32.
let mut stream = try!(pa.open_non_blocking_stream(settings, callback));
try!(stream.start());
// Loop while the non-blocking stream is active.
while let true = try!(stream.is_active()) {
// Do some stuff!
while let Ok(count_down) = receiver.try_recv() {
println!("count_down: {:?}", count_down);
}
}
try!(stream.stop());
Ok(())
}
|
let output_info = try!(pa.device_info(def_output));
println!("Default output device info: {:#?}", &output_info);
// Construct the output stream parameters.
|
random_line_split
|
non_blocking.rs
|
//! A demonstration of constructing and using a non-blocking stream.
//!
//! Audio from the default input device is passed directly to the default output device in a duplex
//! stream, so beware of feedback!
extern crate portaudio;
use portaudio as pa;
const SAMPLE_RATE: f64 = 44_100.0;
const FRAMES: u32 = 256;
const CHANNELS: i32 = 2;
const INTERLEAVED: bool = true;
fn main()
|
fn run() -> Result<(), pa::Error> {
let pa = try!(pa::PortAudio::new());
println!("PortAudio:");
println!("version: {}", pa.version());
println!("version text: {:?}", pa.version_text());
println!("host count: {}", try!(pa.host_api_count()));
let default_host = try!(pa.default_host_api());
println!("default host: {:#?}", pa.host_api_info(default_host));
let def_input = try!(pa.default_input_device());
let input_info = try!(pa.device_info(def_input));
println!("Default input device info: {:#?}", &input_info);
// Construct the input stream parameters.
let latency = input_info.default_low_input_latency;
let input_params = pa::StreamParameters::<f32>::new(def_input, CHANNELS, INTERLEAVED, latency);
let def_output = try!(pa.default_output_device());
let output_info = try!(pa.device_info(def_output));
println!("Default output device info: {:#?}", &output_info);
// Construct the output stream parameters.
let latency = output_info.default_low_output_latency;
let output_params = pa::StreamParameters::new(def_output, CHANNELS, INTERLEAVED, latency);
// Check that the stream format is supported.
try!(pa.is_duplex_format_supported(input_params, output_params, SAMPLE_RATE));
// Construct the settings with which we'll open our duplex stream.
let settings = pa::DuplexStreamSettings::new(input_params, output_params, SAMPLE_RATE, FRAMES);
// Once the countdown reaches 0 we'll close the stream.
let mut count_down = 3.0;
// Keep track of the last `current_time` so we can calculate the delta time.
let mut maybe_last_time = None;
// We'll use this channel to send the count_down to the main thread for fun.
let (sender, receiver) = ::std::sync::mpsc::channel();
// A callback to pass to the non-blocking stream.
let callback = move |pa::DuplexStreamCallbackArgs { in_buffer, out_buffer, frames, time,.. }| {
let current_time = time.current;
let prev_time = maybe_last_time.unwrap_or(current_time);
let dt = current_time - prev_time;
count_down -= dt;
maybe_last_time = Some(current_time);
assert!(frames == FRAMES as usize);
sender.send(count_down).ok();
// Pass the input straight to the output - BEWARE OF FEEDBACK!
for (output_sample, input_sample) in out_buffer.iter_mut().zip(in_buffer.iter()) {
*output_sample = *input_sample;
}
if count_down > 0.0 { pa::Continue } else { pa::Complete }
};
// Construct a stream with input and output sample types of f32.
let mut stream = try!(pa.open_non_blocking_stream(settings, callback));
try!(stream.start());
// Loop while the non-blocking stream is active.
while let true = try!(stream.is_active()) {
// Do some stuff!
while let Ok(count_down) = receiver.try_recv() {
println!("count_down: {:?}", count_down);
}
}
try!(stream.stop());
Ok(())
}
|
{
run().unwrap()
}
|
identifier_body
|
non_blocking.rs
|
//! A demonstration of constructing and using a non-blocking stream.
//!
//! Audio from the default input device is passed directly to the default output device in a duplex
//! stream, so beware of feedback!
extern crate portaudio;
use portaudio as pa;
const SAMPLE_RATE: f64 = 44_100.0;
const FRAMES: u32 = 256;
const CHANNELS: i32 = 2;
const INTERLEAVED: bool = true;
fn main() {
run().unwrap()
}
fn
|
() -> Result<(), pa::Error> {
let pa = try!(pa::PortAudio::new());
println!("PortAudio:");
println!("version: {}", pa.version());
println!("version text: {:?}", pa.version_text());
println!("host count: {}", try!(pa.host_api_count()));
let default_host = try!(pa.default_host_api());
println!("default host: {:#?}", pa.host_api_info(default_host));
let def_input = try!(pa.default_input_device());
let input_info = try!(pa.device_info(def_input));
println!("Default input device info: {:#?}", &input_info);
// Construct the input stream parameters.
let latency = input_info.default_low_input_latency;
let input_params = pa::StreamParameters::<f32>::new(def_input, CHANNELS, INTERLEAVED, latency);
let def_output = try!(pa.default_output_device());
let output_info = try!(pa.device_info(def_output));
println!("Default output device info: {:#?}", &output_info);
// Construct the output stream parameters.
let latency = output_info.default_low_output_latency;
let output_params = pa::StreamParameters::new(def_output, CHANNELS, INTERLEAVED, latency);
// Check that the stream format is supported.
try!(pa.is_duplex_format_supported(input_params, output_params, SAMPLE_RATE));
// Construct the settings with which we'll open our duplex stream.
let settings = pa::DuplexStreamSettings::new(input_params, output_params, SAMPLE_RATE, FRAMES);
// Once the countdown reaches 0 we'll close the stream.
let mut count_down = 3.0;
// Keep track of the last `current_time` so we can calculate the delta time.
let mut maybe_last_time = None;
// We'll use this channel to send the count_down to the main thread for fun.
let (sender, receiver) = ::std::sync::mpsc::channel();
// A callback to pass to the non-blocking stream.
let callback = move |pa::DuplexStreamCallbackArgs { in_buffer, out_buffer, frames, time,.. }| {
let current_time = time.current;
let prev_time = maybe_last_time.unwrap_or(current_time);
let dt = current_time - prev_time;
count_down -= dt;
maybe_last_time = Some(current_time);
assert!(frames == FRAMES as usize);
sender.send(count_down).ok();
// Pass the input straight to the output - BEWARE OF FEEDBACK!
for (output_sample, input_sample) in out_buffer.iter_mut().zip(in_buffer.iter()) {
*output_sample = *input_sample;
}
if count_down > 0.0 { pa::Continue } else { pa::Complete }
};
// Construct a stream with input and output sample types of f32.
let mut stream = try!(pa.open_non_blocking_stream(settings, callback));
try!(stream.start());
// Loop while the non-blocking stream is active.
while let true = try!(stream.is_active()) {
// Do some stuff!
while let Ok(count_down) = receiver.try_recv() {
println!("count_down: {:?}", count_down);
}
}
try!(stream.stop());
Ok(())
}
|
run
|
identifier_name
|
segmentgraph.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Write;
use clidispatch::errors;
use cliparser::define_flags;
use dag::render::render_segment_dag;
use super::Repo;
use super::Result;
use super::IO;
define_flags! {
pub struct GraphOpts {
/// segment level (0 is flat)
#[short('l')]
level: i64 = 0,
/// segment group (master|non_master)
#[short('g')]
group: String = "master",
}
}
pub fn run(opts: GraphOpts, io: &IO, repo: Repo) -> Result<u8> {
let group = match opts.group.as_ref() {
"master" => dag::Group::MASTER,
"non_master" => dag::Group::NON_MASTER,
_ => return Err(errors::Abort("invalid group".into()).into()),
};
let level: dag::Level = match opts.level.try_into() {
Ok(level) => level,
_ => return Err(errors::Abort("invalid level".into()).into()),
};
let mut out = io.output();
write!(out, "{}, Level: {}\n", group, level)?;
let dag = dag::Dag::open(repo.store_path().join("segments/v1"))?;
render_segment_dag(out, &dag, level, group)?;
Ok(0)
}
pub fn name() -> &'static str {
"debugsegmentgraph"
}
pub fn doc() -> &'static str
|
{
"display segment graph for a given group and level"
}
|
identifier_body
|
|
segmentgraph.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Write;
use clidispatch::errors;
use cliparser::define_flags;
use dag::render::render_segment_dag;
use super::Repo;
use super::Result;
use super::IO;
define_flags! {
pub struct GraphOpts {
/// segment level (0 is flat)
#[short('l')]
level: i64 = 0,
/// segment group (master|non_master)
#[short('g')]
group: String = "master",
}
}
pub fn run(opts: GraphOpts, io: &IO, repo: Repo) -> Result<u8> {
let group = match opts.group.as_ref() {
"master" => dag::Group::MASTER,
"non_master" => dag::Group::NON_MASTER,
_ => return Err(errors::Abort("invalid group".into()).into()),
|
Ok(level) => level,
_ => return Err(errors::Abort("invalid level".into()).into()),
};
let mut out = io.output();
write!(out, "{}, Level: {}\n", group, level)?;
let dag = dag::Dag::open(repo.store_path().join("segments/v1"))?;
render_segment_dag(out, &dag, level, group)?;
Ok(0)
}
pub fn name() -> &'static str {
"debugsegmentgraph"
}
pub fn doc() -> &'static str {
"display segment graph for a given group and level"
}
|
};
let level: dag::Level = match opts.level.try_into() {
|
random_line_split
|
segmentgraph.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Write;
use clidispatch::errors;
use cliparser::define_flags;
use dag::render::render_segment_dag;
use super::Repo;
use super::Result;
use super::IO;
define_flags! {
pub struct GraphOpts {
/// segment level (0 is flat)
#[short('l')]
level: i64 = 0,
/// segment group (master|non_master)
#[short('g')]
group: String = "master",
}
}
pub fn run(opts: GraphOpts, io: &IO, repo: Repo) -> Result<u8> {
let group = match opts.group.as_ref() {
"master" => dag::Group::MASTER,
"non_master" => dag::Group::NON_MASTER,
_ => return Err(errors::Abort("invalid group".into()).into()),
};
let level: dag::Level = match opts.level.try_into() {
Ok(level) => level,
_ => return Err(errors::Abort("invalid level".into()).into()),
};
let mut out = io.output();
write!(out, "{}, Level: {}\n", group, level)?;
let dag = dag::Dag::open(repo.store_path().join("segments/v1"))?;
render_segment_dag(out, &dag, level, group)?;
Ok(0)
}
pub fn
|
() -> &'static str {
"debugsegmentgraph"
}
pub fn doc() -> &'static str {
"display segment graph for a given group and level"
}
|
name
|
identifier_name
|
creader.rs
|
None => i.ident.to_string(),
};
Some(CrateInfo {
ident: i.ident.to_string(),
name: name,
id: i.id,
should_link: should_link(i),
})
}
_ => None
}
}
fn existing_match(&self, name: &str, hash: Option<&Svh>, kind: PathKind)
-> Option<ast::CrateNum> {
let mut ret = None;
self.sess.cstore.iter_crate_data(|cnum, data| {
if data.name!= name { return }
|
}
// When the hash is None we're dealing with a top-level dependency
// in which case we may have a specification on the command line for
// this library. Even though an upstream library may have loaded
// something of the same name, we have to make sure it was loaded
// from the exact same location as well.
//
// We're also sure to compare *paths*, not actual byte slices. The
// `source` stores paths which are normalized which may be different
// from the strings on the command line.
let source = self.sess.cstore.get_used_crate_source(cnum).unwrap();
if let Some(locs) = self.sess.opts.externs.get(name) {
let found = locs.iter().any(|l| {
let l = fs::canonicalize(l).ok();
source.dylib.as_ref().map(|p| &p.0) == l.as_ref() ||
source.rlib.as_ref().map(|p| &p.0) == l.as_ref()
});
if found {
ret = Some(cnum);
}
return
}
// Alright, so we've gotten this far which means that `data` has the
// right name, we don't have a hash, and we don't have a --extern
// pointing for ourselves. We're still not quite yet done because we
// have to make sure that this crate was found in the crate lookup
// path (this is a top-level dependency) as we don't want to
// implicitly load anything inside the dependency lookup path.
let prev_kind = source.dylib.as_ref().or(source.rlib.as_ref())
.unwrap().1;
if ret.is_none() && (prev_kind == kind || prev_kind == PathKind::All) {
ret = Some(cnum);
}
});
return ret;
}
fn register_crate(&mut self,
root: &Option<CratePaths>,
ident: &str,
name: &str,
span: Span,
lib: loader::Library)
-> (ast::CrateNum, Rc<cstore::crate_metadata>,
cstore::CrateSource) {
// Claim this crate number and cache it
let cnum = self.next_crate_num;
self.next_crate_num += 1;
// Stash paths for top-most crate locally if necessary.
let crate_paths = if root.is_none() {
Some(CratePaths {
ident: ident.to_string(),
dylib: lib.dylib.clone().map(|p| p.0),
rlib: lib.rlib.clone().map(|p| p.0),
})
} else {
None
};
// Maintain a reference to the top most crate.
let root = if root.is_some() { root } else { &crate_paths };
let loader::Library { dylib, rlib, metadata } = lib;
let cnum_map = self.resolve_crate_deps(root, metadata.as_slice(), span);
let staged_api = self.is_staged_api(metadata.as_slice());
let cmeta = Rc::new( cstore::crate_metadata {
name: name.to_string(),
local_path: RefCell::new(SmallVector::zero()),
data: metadata,
cnum_map: cnum_map,
cnum: cnum,
codemap_import_info: RefCell::new(vec![]),
span: span,
staged_api: staged_api
});
let source = cstore::CrateSource {
dylib: dylib,
rlib: rlib,
cnum: cnum,
};
self.sess.cstore.set_crate_data(cnum, cmeta.clone());
self.sess.cstore.add_used_crate_source(source.clone());
(cnum, cmeta, source)
}
fn is_staged_api(&self, data: &[u8]) -> bool {
let attrs = decoder::get_crate_attributes(data);
for attr in &attrs {
if &attr.name()[..] == "staged_api" {
match attr.node.value.node { ast::MetaWord(_) => return true, _ => (/*pass*/) }
}
}
return false;
}
fn resolve_crate(&mut self,
root: &Option<CratePaths>,
ident: &str,
name: &str,
hash: Option<&Svh>,
span: Span,
kind: PathKind)
-> (ast::CrateNum, Rc<cstore::crate_metadata>,
cstore::CrateSource) {
match self.existing_match(name, hash, kind) {
None => {
let mut load_ctxt = loader::Context {
sess: self.sess,
span: span,
ident: ident,
crate_name: name,
hash: hash.map(|a| &*a),
filesearch: self.sess.target_filesearch(kind),
target: &self.sess.target.target,
triple: &self.sess.opts.target_triple,
root: root,
rejected_via_hash: vec!(),
rejected_via_triple: vec!(),
rejected_via_kind: vec!(),
should_match_name: true,
};
let library = load_ctxt.load_library_crate();
self.register_crate(root, ident, name, span, library)
}
Some(cnum) => (cnum,
self.sess.cstore.get_crate_data(cnum),
self.sess.cstore.get_used_crate_source(cnum).unwrap())
}
}
// Go through the crate metadata and load any crates that it references
fn resolve_crate_deps(&mut self,
root: &Option<CratePaths>,
cdata: &[u8], span : Span)
-> cstore::cnum_map {
debug!("resolving deps of external crate");
// The map from crate numbers in the crate we're resolving to local crate
// numbers
decoder::get_crate_deps(cdata).iter().map(|dep| {
debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash);
let (local_cnum, _, _) = self.resolve_crate(root,
&dep.name,
&dep.name,
Some(&dep.hash),
span,
PathKind::Dependency);
(dep.cnum, local_cnum)
}).collect()
}
fn read_extension_crate(&mut self, span: Span, info: &CrateInfo) -> ExtensionCrate {
let target_triple = &self.sess.opts.target_triple[..];
let is_cross = target_triple!= config::host_triple();
let mut should_link = info.should_link &&!is_cross;
let mut target_only = false;
let ident = info.ident.clone();
let name = info.name.clone();
let mut load_ctxt = loader::Context {
sess: self.sess,
span: span,
ident: &ident[..],
crate_name: &name[..],
hash: None,
filesearch: self.sess.host_filesearch(PathKind::Crate),
target: &self.sess.host,
triple: config::host_triple(),
root: &None,
rejected_via_hash: vec!(),
rejected_via_triple: vec!(),
rejected_via_kind: vec!(),
should_match_name: true,
};
let library = match load_ctxt.maybe_load_library_crate() {
Some(l) => l,
None if is_cross => {
// Try loading from target crates. This will abort later if we
// try to load a plugin registrar function,
target_only = true;
should_link = info.should_link;
load_ctxt.target = &self.sess.target.target;
load_ctxt.triple = target_triple;
load_ctxt.filesearch = self.sess.target_filesearch(PathKind::Crate);
load_ctxt.load_library_crate()
}
None => { load_ctxt.report_load_errs(); unreachable!() },
};
let dylib = library.dylib.clone();
let register = should_link && self.existing_match(&info.name,
None,
PathKind::Crate).is_none();
let metadata = if register {
// Register crate now to avoid double-reading metadata
let (_, cmd, _) = self.register_crate(&None, &info.ident,
&info.name, span, library);
PMDSource::Registered(cmd)
} else {
// Not registering the crate; just hold on to the metadata
PMDSource::Owned(library.metadata)
};
ExtensionCrate {
metadata: metadata,
dylib: dylib.map(|p| p.0),
target_only: target_only,
}
}
/// Read exported macros.
pub fn read_exported_macros(&mut self, krate: &ast::Item) -> Vec<ast::MacroDef> {
let ci = self.extract_crate_info(krate).unwrap();
let ekrate = self.read_extension_crate(krate.span, &ci);
let source_name = format!("<{} macros>", krate.ident);
let mut macros = vec![];
decoder::each_exported_macro(ekrate.metadata.as_slice(),
&*self.sess.cstore.intr,
|name, attrs, body| {
// NB: Don't use parse::parse_tts_from_source_str because it parses with
// quote_depth > 0.
let mut p = parse::new_parser_from_source_str(&self.sess.parse_sess,
self.sess.opts.cfg.clone(),
source_name.clone(),
body);
let lo = p.span.lo;
let body = match p.parse_all_token_trees() {
Ok(body) => body,
Err(err) => panic!(err),
};
let span = mk_sp(lo, p.last_span.hi);
p.abort_if_errors();
macros.push(ast::MacroDef {
ident: name.ident(),
attrs: attrs,
id: ast::DUMMY_NODE_ID,
span: span,
imported_from: Some(krate.ident),
// overridden in plugin/load.rs
export: false,
use_locally: false,
allow_internal_unstable: false,
body: body,
});
true
}
);
macros
}
/// Look for a plugin registrar. Returns library path and symbol name.
pub fn find_plugin_registrar(&mut self, span: Span, name: &str)
-> Option<(PathBuf, String)> {
let ekrate = self.read_extension_crate(span, &CrateInfo {
name: name.to_string(),
ident: name.to_string(),
id: ast::DUMMY_NODE_ID,
should_link: false,
});
if ekrate.target_only {
// Need to abort before syntax expansion.
let message = format!("plugin `{}` is not available for triple `{}` \
(only found {})",
name,
config::host_triple(),
self.sess.opts.target_triple);
self.sess.span_err(span, &message[..]);
self.sess.abort_if_errors();
}
let registrar = decoder::get_plugin_registrar_fn(ekrate.metadata.as_slice())
.map(|id| decoder::get_symbol(ekrate.metadata.as_slice(), id));
match (ekrate.dylib.as_ref(), registrar) {
(Some(dylib), Some(reg)) => Some((dylib.to_path_buf(), reg)),
(None, Some(_)) => {
let message = format!("plugin `{}` only found in rlib format, \
but must be available in dylib format",
name);
self.sess.span_err(span, &message[..]);
// No need to abort because the loading code will just ignore this
// empty dylib.
None
}
_ => None,
}
}
}
impl<'a, 'b> LocalCrateReader<'a, 'b> {
pub fn new(sess: &'a Session, map: &'a ast_map::Map<'b>) -> LocalCrateReader<'a, 'b> {
LocalCrateReader {
sess: sess,
creader: CrateReader::new(sess),
ast_map: map,
}
}
// Traverses an AST, reading all the information about use'd crates and
// extern libraries necessary for later resolving, typechecking, linking,
// etc.
pub fn read_crates(&mut self, krate: &ast::Crate) {
self.process_crate(krate);
visit::walk_crate(self, krate);
if log_enabled!(log::DEBUG) {
dump_crates(&self.sess.cstore);
}
for &(ref name, kind) in &self.sess.opts.libs {
register_native_lib(self.sess, None, name.clone(), kind);
}
}
fn process_crate(&self, c: &ast::Crate) {
for a in c.attrs.iter().filter(|m| m.name() == "link_args") {
match a.value_str() {
Some(ref linkarg) => self.sess.cstore.add_used_link_args(&linkarg),
None => { /* fallthrough */ }
}
}
}
fn process_item(&mut self, i: &ast::Item) {
match i.node {
ast::ItemExternCrate(_) => {
if!should_link(i) {
return;
}
match self.creader.extract_crate_info(i) {
Some(info) => {
let (cnum, cmeta, _) = self.creader.resolve_crate(&None,
&info.ident,
&info.name,
None,
i.span,
|
match hash {
Some(hash) if *hash == data.hash() => { ret = Some(cnum); return }
Some(..) => return,
None => {}
|
random_line_split
|
creader.rs
|
}
None => { load_ctxt.report_load_errs(); unreachable!() },
};
let dylib = library.dylib.clone();
let register = should_link && self.existing_match(&info.name,
None,
PathKind::Crate).is_none();
let metadata = if register {
// Register crate now to avoid double-reading metadata
let (_, cmd, _) = self.register_crate(&None, &info.ident,
&info.name, span, library);
PMDSource::Registered(cmd)
} else {
// Not registering the crate; just hold on to the metadata
PMDSource::Owned(library.metadata)
};
ExtensionCrate {
metadata: metadata,
dylib: dylib.map(|p| p.0),
target_only: target_only,
}
}
/// Read exported macros.
pub fn read_exported_macros(&mut self, krate: &ast::Item) -> Vec<ast::MacroDef> {
let ci = self.extract_crate_info(krate).unwrap();
let ekrate = self.read_extension_crate(krate.span, &ci);
let source_name = format!("<{} macros>", krate.ident);
let mut macros = vec![];
decoder::each_exported_macro(ekrate.metadata.as_slice(),
&*self.sess.cstore.intr,
|name, attrs, body| {
// NB: Don't use parse::parse_tts_from_source_str because it parses with
// quote_depth > 0.
let mut p = parse::new_parser_from_source_str(&self.sess.parse_sess,
self.sess.opts.cfg.clone(),
source_name.clone(),
body);
let lo = p.span.lo;
let body = match p.parse_all_token_trees() {
Ok(body) => body,
Err(err) => panic!(err),
};
let span = mk_sp(lo, p.last_span.hi);
p.abort_if_errors();
macros.push(ast::MacroDef {
ident: name.ident(),
attrs: attrs,
id: ast::DUMMY_NODE_ID,
span: span,
imported_from: Some(krate.ident),
// overridden in plugin/load.rs
export: false,
use_locally: false,
allow_internal_unstable: false,
body: body,
});
true
}
);
macros
}
/// Look for a plugin registrar. Returns library path and symbol name.
pub fn find_plugin_registrar(&mut self, span: Span, name: &str)
-> Option<(PathBuf, String)> {
let ekrate = self.read_extension_crate(span, &CrateInfo {
name: name.to_string(),
ident: name.to_string(),
id: ast::DUMMY_NODE_ID,
should_link: false,
});
if ekrate.target_only {
// Need to abort before syntax expansion.
let message = format!("plugin `{}` is not available for triple `{}` \
(only found {})",
name,
config::host_triple(),
self.sess.opts.target_triple);
self.sess.span_err(span, &message[..]);
self.sess.abort_if_errors();
}
let registrar = decoder::get_plugin_registrar_fn(ekrate.metadata.as_slice())
.map(|id| decoder::get_symbol(ekrate.metadata.as_slice(), id));
match (ekrate.dylib.as_ref(), registrar) {
(Some(dylib), Some(reg)) => Some((dylib.to_path_buf(), reg)),
(None, Some(_)) => {
let message = format!("plugin `{}` only found in rlib format, \
but must be available in dylib format",
name);
self.sess.span_err(span, &message[..]);
// No need to abort because the loading code will just ignore this
// empty dylib.
None
}
_ => None,
}
}
}
impl<'a, 'b> LocalCrateReader<'a, 'b> {
pub fn new(sess: &'a Session, map: &'a ast_map::Map<'b>) -> LocalCrateReader<'a, 'b> {
LocalCrateReader {
sess: sess,
creader: CrateReader::new(sess),
ast_map: map,
}
}
// Traverses an AST, reading all the information about use'd crates and
// extern libraries necessary for later resolving, typechecking, linking,
// etc.
pub fn read_crates(&mut self, krate: &ast::Crate) {
self.process_crate(krate);
visit::walk_crate(self, krate);
if log_enabled!(log::DEBUG) {
dump_crates(&self.sess.cstore);
}
for &(ref name, kind) in &self.sess.opts.libs {
register_native_lib(self.sess, None, name.clone(), kind);
}
}
fn process_crate(&self, c: &ast::Crate) {
for a in c.attrs.iter().filter(|m| m.name() == "link_args") {
match a.value_str() {
Some(ref linkarg) => self.sess.cstore.add_used_link_args(&linkarg),
None => { /* fallthrough */ }
}
}
}
fn process_item(&mut self, i: &ast::Item) {
match i.node {
ast::ItemExternCrate(_) => {
if!should_link(i) {
return;
}
match self.creader.extract_crate_info(i) {
Some(info) => {
let (cnum, cmeta, _) = self.creader.resolve_crate(&None,
&info.ident,
&info.name,
None,
i.span,
PathKind::Crate);
self.ast_map.with_path(i.id, |path|
cmeta.update_local_path(path));
self.sess.cstore.add_extern_mod_stmt_cnum(info.id, cnum);
}
None => ()
}
}
ast::ItemForeignMod(ref fm) => {
if fm.abi == abi::Rust || fm.abi == abi::RustIntrinsic {
return;
}
// First, add all of the custom link_args attributes
let link_args = i.attrs.iter()
.filter_map(|at| if at.name() == "link_args" {
Some(at)
} else {
None
})
.collect::<Vec<&ast::Attribute>>();
for m in &link_args {
match m.value_str() {
Some(linkarg) => self.sess.cstore.add_used_link_args(&linkarg),
None => { /* fallthrough */ }
}
}
// Next, process all of the #[link(..)]-style arguments
let link_args = i.attrs.iter()
.filter_map(|at| if at.name() == "link" {
Some(at)
} else {
None
})
.collect::<Vec<&ast::Attribute>>();
for m in &link_args {
match m.meta_item_list() {
Some(items) => {
let kind = items.iter().find(|k| {
k.name() == "kind"
}).and_then(|a| a.value_str());
let kind = match kind {
Some(k) => {
if k == "static" {
cstore::NativeStatic
} else if self.sess.target.target.options.is_like_osx
&& k == "framework" {
cstore::NativeFramework
} else if k == "framework" {
cstore::NativeFramework
} else if k == "dylib" {
cstore::NativeUnknown
} else {
self.sess.span_err(m.span,
&format!("unknown kind: `{}`",
k));
cstore::NativeUnknown
}
}
None => cstore::NativeUnknown
};
let n = items.iter().find(|n| {
n.name() == "name"
}).and_then(|a| a.value_str());
let n = match n {
Some(n) => n,
None => {
self.sess.span_err(m.span,
"#[link(...)] specified without \
`name = \"foo\"`");
InternedString::new("foo")
}
};
register_native_lib(self.sess, Some(m.span),
n.to_string(), kind);
}
None => {}
}
}
}
_ => { }
}
}
}
/// Imports the codemap from an external crate into the codemap of the crate
/// currently being compiled (the "local crate").
///
/// The import algorithm works analogous to how AST items are inlined from an
/// external crate's metadata:
/// For every FileMap in the external codemap an 'inline' copy is created in the
/// local codemap. The correspondence relation between external and local
/// FileMaps is recorded in the `ImportedFileMap` objects returned from this
/// function. When an item from an external crate is later inlined into this
/// crate, this correspondence information is used to translate the span
/// information of the inlined item so that it refers the correct positions in
/// the local codemap (see `astencode::DecodeContext::tr_span()`).
///
/// The import algorithm in the function below will reuse FileMaps already
/// existing in the local codemap. For example, even if the FileMap of some
/// source file of libstd gets imported many times, there will only ever be
/// one FileMap object for the corresponding file in the local codemap.
///
/// Note that imported FileMaps do not actually contain the source code of the
/// file they represent, just information about length, line breaks, and
/// multibyte characters. This information is enough to generate valid debuginfo
/// for items inlined from other crates.
pub fn import_codemap(local_codemap: &codemap::CodeMap,
metadata: &MetadataBlob)
-> Vec<cstore::ImportedFileMap> {
let external_codemap = decoder::get_imported_filemaps(metadata.as_slice());
let imported_filemaps = external_codemap.into_iter().map(|filemap_to_import| {
// Try to find an existing FileMap that can be reused for the filemap to
// be imported. A FileMap is reusable if it is exactly the same, just
// positioned at a different offset within the codemap.
let reusable_filemap = {
local_codemap.files
.borrow()
.iter()
.find(|fm| are_equal_modulo_startpos(&fm, &filemap_to_import))
.map(|rc| rc.clone())
};
match reusable_filemap {
Some(fm) => {
cstore::ImportedFileMap {
original_start_pos: filemap_to_import.start_pos,
original_end_pos: filemap_to_import.end_pos,
translated_filemap: fm
}
}
None => {
// We can't reuse an existing FileMap, so allocate a new one
// containing the information we need.
let codemap::FileMap {
name,
start_pos,
end_pos,
lines,
multibyte_chars,
..
} = filemap_to_import;
let source_length = (end_pos - start_pos).to_usize();
// Translate line-start positions and multibyte character
// position into frame of reference local to file.
// `CodeMap::new_imported_filemap()` will then translate those
// coordinates to their new global frame of reference when the
// offset of the FileMap is known.
let mut lines = lines.into_inner();
for pos in &mut lines {
*pos = *pos - start_pos;
}
let mut multibyte_chars = multibyte_chars.into_inner();
for mbc in &mut multibyte_chars {
mbc.pos = mbc.pos - start_pos;
}
let local_version = local_codemap.new_imported_filemap(name,
source_length,
lines,
multibyte_chars);
cstore::ImportedFileMap {
original_start_pos: start_pos,
original_end_pos: end_pos,
translated_filemap: local_version
}
}
}
}).collect();
return imported_filemaps;
fn
|
are_equal_modulo_startpos
|
identifier_name
|
|
creader.rs
|
None => i.ident.to_string(),
};
Some(CrateInfo {
ident: i.ident.to_string(),
name: name,
id: i.id,
should_link: should_link(i),
})
}
_ => None
}
}
fn existing_match(&self, name: &str, hash: Option<&Svh>, kind: PathKind)
-> Option<ast::CrateNum> {
let mut ret = None;
self.sess.cstore.iter_crate_data(|cnum, data| {
if data.name!= name { return }
match hash {
Some(hash) if *hash == data.hash() => { ret = Some(cnum); return }
Some(..) => return,
None => {}
}
// When the hash is None we're dealing with a top-level dependency
// in which case we may have a specification on the command line for
// this library. Even though an upstream library may have loaded
// something of the same name, we have to make sure it was loaded
// from the exact same location as well.
//
// We're also sure to compare *paths*, not actual byte slices. The
// `source` stores paths which are normalized which may be different
// from the strings on the command line.
let source = self.sess.cstore.get_used_crate_source(cnum).unwrap();
if let Some(locs) = self.sess.opts.externs.get(name) {
let found = locs.iter().any(|l| {
let l = fs::canonicalize(l).ok();
source.dylib.as_ref().map(|p| &p.0) == l.as_ref() ||
source.rlib.as_ref().map(|p| &p.0) == l.as_ref()
});
if found {
ret = Some(cnum);
}
return
}
// Alright, so we've gotten this far which means that `data` has the
// right name, we don't have a hash, and we don't have a --extern
// pointing for ourselves. We're still not quite yet done because we
// have to make sure that this crate was found in the crate lookup
// path (this is a top-level dependency) as we don't want to
// implicitly load anything inside the dependency lookup path.
let prev_kind = source.dylib.as_ref().or(source.rlib.as_ref())
.unwrap().1;
if ret.is_none() && (prev_kind == kind || prev_kind == PathKind::All) {
ret = Some(cnum);
}
});
return ret;
}
fn register_crate(&mut self,
root: &Option<CratePaths>,
ident: &str,
name: &str,
span: Span,
lib: loader::Library)
-> (ast::CrateNum, Rc<cstore::crate_metadata>,
cstore::CrateSource) {
// Claim this crate number and cache it
let cnum = self.next_crate_num;
self.next_crate_num += 1;
// Stash paths for top-most crate locally if necessary.
let crate_paths = if root.is_none() {
Some(CratePaths {
ident: ident.to_string(),
dylib: lib.dylib.clone().map(|p| p.0),
rlib: lib.rlib.clone().map(|p| p.0),
})
} else {
None
};
// Maintain a reference to the top most crate.
let root = if root.is_some() { root } else { &crate_paths };
let loader::Library { dylib, rlib, metadata } = lib;
let cnum_map = self.resolve_crate_deps(root, metadata.as_slice(), span);
let staged_api = self.is_staged_api(metadata.as_slice());
let cmeta = Rc::new( cstore::crate_metadata {
name: name.to_string(),
local_path: RefCell::new(SmallVector::zero()),
data: metadata,
cnum_map: cnum_map,
cnum: cnum,
codemap_import_info: RefCell::new(vec![]),
span: span,
staged_api: staged_api
});
let source = cstore::CrateSource {
dylib: dylib,
rlib: rlib,
cnum: cnum,
};
self.sess.cstore.set_crate_data(cnum, cmeta.clone());
self.sess.cstore.add_used_crate_source(source.clone());
(cnum, cmeta, source)
}
fn is_staged_api(&self, data: &[u8]) -> bool {
let attrs = decoder::get_crate_attributes(data);
for attr in &attrs {
if &attr.name()[..] == "staged_api" {
match attr.node.value.node { ast::MetaWord(_) => return true, _ => (/*pass*/) }
}
}
return false;
}
fn resolve_crate(&mut self,
root: &Option<CratePaths>,
ident: &str,
name: &str,
hash: Option<&Svh>,
span: Span,
kind: PathKind)
-> (ast::CrateNum, Rc<cstore::crate_metadata>,
cstore::CrateSource) {
match self.existing_match(name, hash, kind) {
None => {
let mut load_ctxt = loader::Context {
sess: self.sess,
span: span,
ident: ident,
crate_name: name,
hash: hash.map(|a| &*a),
filesearch: self.sess.target_filesearch(kind),
target: &self.sess.target.target,
triple: &self.sess.opts.target_triple,
root: root,
rejected_via_hash: vec!(),
rejected_via_triple: vec!(),
rejected_via_kind: vec!(),
should_match_name: true,
};
let library = load_ctxt.load_library_crate();
self.register_crate(root, ident, name, span, library)
}
Some(cnum) => (cnum,
self.sess.cstore.get_crate_data(cnum),
self.sess.cstore.get_used_crate_source(cnum).unwrap())
}
}
// Go through the crate metadata and load any crates that it references
fn resolve_crate_deps(&mut self,
root: &Option<CratePaths>,
cdata: &[u8], span : Span)
-> cstore::cnum_map {
debug!("resolving deps of external crate");
// The map from crate numbers in the crate we're resolving to local crate
// numbers
decoder::get_crate_deps(cdata).iter().map(|dep| {
debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash);
let (local_cnum, _, _) = self.resolve_crate(root,
&dep.name,
&dep.name,
Some(&dep.hash),
span,
PathKind::Dependency);
(dep.cnum, local_cnum)
}).collect()
}
fn read_extension_crate(&mut self, span: Span, info: &CrateInfo) -> ExtensionCrate {
let target_triple = &self.sess.opts.target_triple[..];
let is_cross = target_triple!= config::host_triple();
let mut should_link = info.should_link &&!is_cross;
let mut target_only = false;
let ident = info.ident.clone();
let name = info.name.clone();
let mut load_ctxt = loader::Context {
sess: self.sess,
span: span,
ident: &ident[..],
crate_name: &name[..],
hash: None,
filesearch: self.sess.host_filesearch(PathKind::Crate),
target: &self.sess.host,
triple: config::host_triple(),
root: &None,
rejected_via_hash: vec!(),
rejected_via_triple: vec!(),
rejected_via_kind: vec!(),
should_match_name: true,
};
let library = match load_ctxt.maybe_load_library_crate() {
Some(l) => l,
None if is_cross => {
// Try loading from target crates. This will abort later if we
// try to load a plugin registrar function,
target_only = true;
should_link = info.should_link;
load_ctxt.target = &self.sess.target.target;
load_ctxt.triple = target_triple;
load_ctxt.filesearch = self.sess.target_filesearch(PathKind::Crate);
load_ctxt.load_library_crate()
}
None => { load_ctxt.report_load_errs(); unreachable!() },
};
let dylib = library.dylib.clone();
let register = should_link && self.existing_match(&info.name,
None,
PathKind::Crate).is_none();
let metadata = if register {
// Register crate now to avoid double-reading metadata
let (_, cmd, _) = self.register_crate(&None, &info.ident,
&info.name, span, library);
PMDSource::Registered(cmd)
} else {
// Not registering the crate; just hold on to the metadata
PMDSource::Owned(library.metadata)
};
ExtensionCrate {
metadata: metadata,
dylib: dylib.map(|p| p.0),
target_only: target_only,
}
}
/// Read exported macros.
pub fn read_exported_macros(&mut self, krate: &ast::Item) -> Vec<ast::MacroDef> {
let ci = self.extract_crate_info(krate).unwrap();
let ekrate = self.read_extension_crate(krate.span, &ci);
let source_name = format!("<{} macros>", krate.ident);
let mut macros = vec![];
decoder::each_exported_macro(ekrate.metadata.as_slice(),
&*self.sess.cstore.intr,
|name, attrs, body| {
// NB: Don't use parse::parse_tts_from_source_str because it parses with
// quote_depth > 0.
let mut p = parse::new_parser_from_source_str(&self.sess.parse_sess,
self.sess.opts.cfg.clone(),
source_name.clone(),
body);
let lo = p.span.lo;
let body = match p.parse_all_token_trees() {
Ok(body) => body,
Err(err) => panic!(err),
};
let span = mk_sp(lo, p.last_span.hi);
p.abort_if_errors();
macros.push(ast::MacroDef {
ident: name.ident(),
attrs: attrs,
id: ast::DUMMY_NODE_ID,
span: span,
imported_from: Some(krate.ident),
// overridden in plugin/load.rs
export: false,
use_locally: false,
allow_internal_unstable: false,
body: body,
});
true
}
);
macros
}
/// Look for a plugin registrar. Returns library path and symbol name.
pub fn find_plugin_registrar(&mut self, span: Span, name: &str)
-> Option<(PathBuf, String)> {
let ekrate = self.read_extension_crate(span, &CrateInfo {
name: name.to_string(),
ident: name.to_string(),
id: ast::DUMMY_NODE_ID,
should_link: false,
});
if ekrate.target_only {
// Need to abort before syntax expansion.
let message = format!("plugin `{}` is not available for triple `{}` \
(only found {})",
name,
config::host_triple(),
self.sess.opts.target_triple);
self.sess.span_err(span, &message[..]);
self.sess.abort_if_errors();
}
let registrar = decoder::get_plugin_registrar_fn(ekrate.metadata.as_slice())
.map(|id| decoder::get_symbol(ekrate.metadata.as_slice(), id));
match (ekrate.dylib.as_ref(), registrar) {
(Some(dylib), Some(reg)) => Some((dylib.to_path_buf(), reg)),
(None, Some(_)) => {
let message = format!("plugin `{}` only found in rlib format, \
but must be available in dylib format",
name);
self.sess.span_err(span, &message[..]);
// No need to abort because the loading code will just ignore this
// empty dylib.
None
}
_ => None,
}
}
}
impl<'a, 'b> LocalCrateReader<'a, 'b> {
pub fn new(sess: &'a Session, map: &'a ast_map::Map<'b>) -> LocalCrateReader<'a, 'b> {
LocalCrateReader {
sess: sess,
creader: CrateReader::new(sess),
ast_map: map,
}
}
// Traverses an AST, reading all the information about use'd crates and
// extern libraries necessary for later resolving, typechecking, linking,
// etc.
pub fn read_crates(&mut self, krate: &ast::Crate) {
self.process_crate(krate);
visit::walk_crate(self, krate);
if log_enabled!(log::DEBUG) {
dump_crates(&self.sess.cstore);
}
for &(ref name, kind) in &self.sess.opts.libs {
register_native_lib(self.sess, None, name.clone(), kind);
}
}
fn process_crate(&self, c: &ast::Crate) {
for a in c.attrs.iter().filter(|m| m.name() == "link_args") {
match a.value_str() {
Some(ref linkarg) => self.sess.cstore.add_used_link_args(&linkarg),
None =>
|
}
}
}
fn process_item(&mut self, i: &ast::Item) {
match i.node {
ast::ItemExternCrate(_) => {
if!should_link(i) {
return;
}
match self.creader.extract_crate_info(i) {
Some(info) => {
let (cnum, cmeta, _) = self.creader.resolve_crate(&None,
&info.ident,
&info.name,
None,
i.span,
|
{ /* fallthrough */ }
|
conditional_block
|
creader.rs
|
None => i.ident.to_string(),
};
Some(CrateInfo {
ident: i.ident.to_string(),
name: name,
id: i.id,
should_link: should_link(i),
})
}
_ => None
}
}
fn existing_match(&self, name: &str, hash: Option<&Svh>, kind: PathKind)
-> Option<ast::CrateNum>
|
let source = self.sess.cstore.get_used_crate_source(cnum).unwrap();
if let Some(locs) = self.sess.opts.externs.get(name) {
let found = locs.iter().any(|l| {
let l = fs::canonicalize(l).ok();
source.dylib.as_ref().map(|p| &p.0) == l.as_ref() ||
source.rlib.as_ref().map(|p| &p.0) == l.as_ref()
});
if found {
ret = Some(cnum);
}
return
}
// Alright, so we've gotten this far which means that `data` has the
// right name, we don't have a hash, and we don't have a --extern
// pointing for ourselves. We're still not quite yet done because we
// have to make sure that this crate was found in the crate lookup
// path (this is a top-level dependency) as we don't want to
// implicitly load anything inside the dependency lookup path.
let prev_kind = source.dylib.as_ref().or(source.rlib.as_ref())
.unwrap().1;
if ret.is_none() && (prev_kind == kind || prev_kind == PathKind::All) {
ret = Some(cnum);
}
});
return ret;
}
fn register_crate(&mut self,
root: &Option<CratePaths>,
ident: &str,
name: &str,
span: Span,
lib: loader::Library)
-> (ast::CrateNum, Rc<cstore::crate_metadata>,
cstore::CrateSource) {
// Claim this crate number and cache it
let cnum = self.next_crate_num;
self.next_crate_num += 1;
// Stash paths for top-most crate locally if necessary.
let crate_paths = if root.is_none() {
Some(CratePaths {
ident: ident.to_string(),
dylib: lib.dylib.clone().map(|p| p.0),
rlib: lib.rlib.clone().map(|p| p.0),
})
} else {
None
};
// Maintain a reference to the top most crate.
let root = if root.is_some() { root } else { &crate_paths };
let loader::Library { dylib, rlib, metadata } = lib;
let cnum_map = self.resolve_crate_deps(root, metadata.as_slice(), span);
let staged_api = self.is_staged_api(metadata.as_slice());
let cmeta = Rc::new( cstore::crate_metadata {
name: name.to_string(),
local_path: RefCell::new(SmallVector::zero()),
data: metadata,
cnum_map: cnum_map,
cnum: cnum,
codemap_import_info: RefCell::new(vec![]),
span: span,
staged_api: staged_api
});
let source = cstore::CrateSource {
dylib: dylib,
rlib: rlib,
cnum: cnum,
};
self.sess.cstore.set_crate_data(cnum, cmeta.clone());
self.sess.cstore.add_used_crate_source(source.clone());
(cnum, cmeta, source)
}
fn is_staged_api(&self, data: &[u8]) -> bool {
let attrs = decoder::get_crate_attributes(data);
for attr in &attrs {
if &attr.name()[..] == "staged_api" {
match attr.node.value.node { ast::MetaWord(_) => return true, _ => (/*pass*/) }
}
}
return false;
}
fn resolve_crate(&mut self,
root: &Option<CratePaths>,
ident: &str,
name: &str,
hash: Option<&Svh>,
span: Span,
kind: PathKind)
-> (ast::CrateNum, Rc<cstore::crate_metadata>,
cstore::CrateSource) {
match self.existing_match(name, hash, kind) {
None => {
let mut load_ctxt = loader::Context {
sess: self.sess,
span: span,
ident: ident,
crate_name: name,
hash: hash.map(|a| &*a),
filesearch: self.sess.target_filesearch(kind),
target: &self.sess.target.target,
triple: &self.sess.opts.target_triple,
root: root,
rejected_via_hash: vec!(),
rejected_via_triple: vec!(),
rejected_via_kind: vec!(),
should_match_name: true,
};
let library = load_ctxt.load_library_crate();
self.register_crate(root, ident, name, span, library)
}
Some(cnum) => (cnum,
self.sess.cstore.get_crate_data(cnum),
self.sess.cstore.get_used_crate_source(cnum).unwrap())
}
}
// Go through the crate metadata and load any crates that it references
fn resolve_crate_deps(&mut self,
root: &Option<CratePaths>,
cdata: &[u8], span : Span)
-> cstore::cnum_map {
debug!("resolving deps of external crate");
// The map from crate numbers in the crate we're resolving to local crate
// numbers
decoder::get_crate_deps(cdata).iter().map(|dep| {
debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash);
let (local_cnum, _, _) = self.resolve_crate(root,
&dep.name,
&dep.name,
Some(&dep.hash),
span,
PathKind::Dependency);
(dep.cnum, local_cnum)
}).collect()
}
fn read_extension_crate(&mut self, span: Span, info: &CrateInfo) -> ExtensionCrate {
let target_triple = &self.sess.opts.target_triple[..];
let is_cross = target_triple!= config::host_triple();
let mut should_link = info.should_link &&!is_cross;
let mut target_only = false;
let ident = info.ident.clone();
let name = info.name.clone();
let mut load_ctxt = loader::Context {
sess: self.sess,
span: span,
ident: &ident[..],
crate_name: &name[..],
hash: None,
filesearch: self.sess.host_filesearch(PathKind::Crate),
target: &self.sess.host,
triple: config::host_triple(),
root: &None,
rejected_via_hash: vec!(),
rejected_via_triple: vec!(),
rejected_via_kind: vec!(),
should_match_name: true,
};
let library = match load_ctxt.maybe_load_library_crate() {
Some(l) => l,
None if is_cross => {
// Try loading from target crates. This will abort later if we
// try to load a plugin registrar function,
target_only = true;
should_link = info.should_link;
load_ctxt.target = &self.sess.target.target;
load_ctxt.triple = target_triple;
load_ctxt.filesearch = self.sess.target_filesearch(PathKind::Crate);
load_ctxt.load_library_crate()
}
None => { load_ctxt.report_load_errs(); unreachable!() },
};
let dylib = library.dylib.clone();
let register = should_link && self.existing_match(&info.name,
None,
PathKind::Crate).is_none();
let metadata = if register {
// Register crate now to avoid double-reading metadata
let (_, cmd, _) = self.register_crate(&None, &info.ident,
&info.name, span, library);
PMDSource::Registered(cmd)
} else {
// Not registering the crate; just hold on to the metadata
PMDSource::Owned(library.metadata)
};
ExtensionCrate {
metadata: metadata,
dylib: dylib.map(|p| p.0),
target_only: target_only,
}
}
/// Read exported macros.
pub fn read_exported_macros(&mut self, krate: &ast::Item) -> Vec<ast::MacroDef> {
let ci = self.extract_crate_info(krate).unwrap();
let ekrate = self.read_extension_crate(krate.span, &ci);
let source_name = format!("<{} macros>", krate.ident);
let mut macros = vec![];
decoder::each_exported_macro(ekrate.metadata.as_slice(),
&*self.sess.cstore.intr,
|name, attrs, body| {
// NB: Don't use parse::parse_tts_from_source_str because it parses with
// quote_depth > 0.
let mut p = parse::new_parser_from_source_str(&self.sess.parse_sess,
self.sess.opts.cfg.clone(),
source_name.clone(),
body);
let lo = p.span.lo;
let body = match p.parse_all_token_trees() {
Ok(body) => body,
Err(err) => panic!(err),
};
let span = mk_sp(lo, p.last_span.hi);
p.abort_if_errors();
macros.push(ast::MacroDef {
ident: name.ident(),
attrs: attrs,
id: ast::DUMMY_NODE_ID,
span: span,
imported_from: Some(krate.ident),
// overridden in plugin/load.rs
export: false,
use_locally: false,
allow_internal_unstable: false,
body: body,
});
true
}
);
macros
}
/// Look for a plugin registrar. Returns library path and symbol name.
pub fn find_plugin_registrar(&mut self, span: Span, name: &str)
-> Option<(PathBuf, String)> {
let ekrate = self.read_extension_crate(span, &CrateInfo {
name: name.to_string(),
ident: name.to_string(),
id: ast::DUMMY_NODE_ID,
should_link: false,
});
if ekrate.target_only {
// Need to abort before syntax expansion.
let message = format!("plugin `{}` is not available for triple `{}` \
(only found {})",
name,
config::host_triple(),
self.sess.opts.target_triple);
self.sess.span_err(span, &message[..]);
self.sess.abort_if_errors();
}
let registrar = decoder::get_plugin_registrar_fn(ekrate.metadata.as_slice())
.map(|id| decoder::get_symbol(ekrate.metadata.as_slice(), id));
match (ekrate.dylib.as_ref(), registrar) {
(Some(dylib), Some(reg)) => Some((dylib.to_path_buf(), reg)),
(None, Some(_)) => {
let message = format!("plugin `{}` only found in rlib format, \
but must be available in dylib format",
name);
self.sess.span_err(span, &message[..]);
// No need to abort because the loading code will just ignore this
// empty dylib.
None
}
_ => None,
}
}
}
impl<'a, 'b> LocalCrateReader<'a, 'b> {
pub fn new(sess: &'a Session, map: &'a ast_map::Map<'b>) -> LocalCrateReader<'a, 'b> {
LocalCrateReader {
sess: sess,
creader: CrateReader::new(sess),
ast_map: map,
}
}
// Traverses an AST, reading all the information about use'd crates and
// extern libraries necessary for later resolving, typechecking, linking,
// etc.
pub fn read_crates(&mut self, krate: &ast::Crate) {
self.process_crate(krate);
visit::walk_crate(self, krate);
if log_enabled!(log::DEBUG) {
dump_crates(&self.sess.cstore);
}
for &(ref name, kind) in &self.sess.opts.libs {
register_native_lib(self.sess, None, name.clone(), kind);
}
}
fn process_crate(&self, c: &ast::Crate) {
for a in c.attrs.iter().filter(|m| m.name() == "link_args") {
match a.value_str() {
Some(ref linkarg) => self.sess.cstore.add_used_link_args(&linkarg),
None => { /* fallthrough */ }
}
}
}
fn process_item(&mut self, i: &ast::Item) {
match i.node {
ast::ItemExternCrate(_) => {
if!should_link(i) {
return;
}
match self.creader.extract_crate_info(i) {
Some(info) => {
let (cnum, cmeta, _) = self.creader.resolve_crate(&None,
&info.ident,
&info.name,
None,
i.span,
|
{
let mut ret = None;
self.sess.cstore.iter_crate_data(|cnum, data| {
if data.name != name { return }
match hash {
Some(hash) if *hash == data.hash() => { ret = Some(cnum); return }
Some(..) => return,
None => {}
}
// When the hash is None we're dealing with a top-level dependency
// in which case we may have a specification on the command line for
// this library. Even though an upstream library may have loaded
// something of the same name, we have to make sure it was loaded
// from the exact same location as well.
//
// We're also sure to compare *paths*, not actual byte slices. The
// `source` stores paths which are normalized which may be different
// from the strings on the command line.
|
identifier_body
|
async_utils.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Virtio device async helper functions.
use std::cell::RefCell;
use std::rc::Rc;
use anyhow::{Context, Result};
use base::Event;
use cros_async::{EventAsync, Executor};
use super::{Interrupt, SignalableInterrupt};
/// Async task that waits for a signal from `event`. Once this event is readable, exit. Exiting
/// this future will cause the main loop to break and the worker thread to exit.
pub async fn await_and_exit(ex: &Executor, event: Event) -> Result<()> {
let event_async = EventAsync::new(event.0, ex).context("failed to create EventAsync")?;
let _ = event_async.next_val().await;
Ok(())
}
/// Async task that resamples the status of the interrupt when the guest sends a request by
/// signalling the resample event associated with the interrupt.
pub async fn handle_irq_resample(ex: &Executor, interrupt: Rc<RefCell<Interrupt>>) -> Result<()> {
// Clone resample_evt if interrupt has one.
// This is a separate block so that we do not hold a RefCell borrow across await.
let resample_evt = if let Some(resample_evt) = interrupt.borrow().get_resample_evt() {
let resample_evt = resample_evt
.try_clone()
.context("resample_evt.try_clone() failed")?;
Some(EventAsync::new(resample_evt.0, ex).context("failed to create async resample event")?)
} else {
None
};
if let Some(resample_evt) = resample_evt {
loop {
let _ = resample_evt
.next_val()
.await
.context("failed to read resample event")?;
interrupt.borrow().do_interrupt_resample();
}
} else
|
Ok(())
}
|
{
// No resample event; park the future.
let () = futures::future::pending().await;
}
|
conditional_block
|
async_utils.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Virtio device async helper functions.
use std::cell::RefCell;
use std::rc::Rc;
use anyhow::{Context, Result};
use base::Event;
use cros_async::{EventAsync, Executor};
use super::{Interrupt, SignalableInterrupt};
/// Async task that waits for a signal from `event`. Once this event is readable, exit. Exiting
/// this future will cause the main loop to break and the worker thread to exit.
pub async fn await_and_exit(ex: &Executor, event: Event) -> Result<()> {
let event_async = EventAsync::new(event.0, ex).context("failed to create EventAsync")?;
let _ = event_async.next_val().await;
Ok(())
}
/// Async task that resamples the status of the interrupt when the guest sends a request by
/// signalling the resample event associated with the interrupt.
pub async fn handle_irq_resample(ex: &Executor, interrupt: Rc<RefCell<Interrupt>>) -> Result<()> {
// Clone resample_evt if interrupt has one.
// This is a separate block so that we do not hold a RefCell borrow across await.
let resample_evt = if let Some(resample_evt) = interrupt.borrow().get_resample_evt() {
let resample_evt = resample_evt
.try_clone()
.context("resample_evt.try_clone() failed")?;
Some(EventAsync::new(resample_evt.0, ex).context("failed to create async resample event")?)
} else {
None
};
if let Some(resample_evt) = resample_evt {
loop {
let _ = resample_evt
.next_val()
.await
.context("failed to read resample event")?;
interrupt.borrow().do_interrupt_resample();
}
} else {
|
}
Ok(())
}
|
// No resample event; park the future.
let () = futures::future::pending().await;
|
random_line_split
|
async_utils.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Virtio device async helper functions.
use std::cell::RefCell;
use std::rc::Rc;
use anyhow::{Context, Result};
use base::Event;
use cros_async::{EventAsync, Executor};
use super::{Interrupt, SignalableInterrupt};
/// Async task that waits for a signal from `event`. Once this event is readable, exit. Exiting
/// this future will cause the main loop to break and the worker thread to exit.
pub async fn await_and_exit(ex: &Executor, event: Event) -> Result<()>
|
/// Async task that resamples the status of the interrupt when the guest sends a request by
/// signalling the resample event associated with the interrupt.
pub async fn handle_irq_resample(ex: &Executor, interrupt: Rc<RefCell<Interrupt>>) -> Result<()> {
// Clone resample_evt if interrupt has one.
// This is a separate block so that we do not hold a RefCell borrow across await.
let resample_evt = if let Some(resample_evt) = interrupt.borrow().get_resample_evt() {
let resample_evt = resample_evt
.try_clone()
.context("resample_evt.try_clone() failed")?;
Some(EventAsync::new(resample_evt.0, ex).context("failed to create async resample event")?)
} else {
None
};
if let Some(resample_evt) = resample_evt {
loop {
let _ = resample_evt
.next_val()
.await
.context("failed to read resample event")?;
interrupt.borrow().do_interrupt_resample();
}
} else {
// No resample event; park the future.
let () = futures::future::pending().await;
}
Ok(())
}
|
{
let event_async = EventAsync::new(event.0, ex).context("failed to create EventAsync")?;
let _ = event_async.next_val().await;
Ok(())
}
|
identifier_body
|
async_utils.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Virtio device async helper functions.
use std::cell::RefCell;
use std::rc::Rc;
use anyhow::{Context, Result};
use base::Event;
use cros_async::{EventAsync, Executor};
use super::{Interrupt, SignalableInterrupt};
/// Async task that waits for a signal from `event`. Once this event is readable, exit. Exiting
/// this future will cause the main loop to break and the worker thread to exit.
pub async fn await_and_exit(ex: &Executor, event: Event) -> Result<()> {
let event_async = EventAsync::new(event.0, ex).context("failed to create EventAsync")?;
let _ = event_async.next_val().await;
Ok(())
}
/// Async task that resamples the status of the interrupt when the guest sends a request by
/// signalling the resample event associated with the interrupt.
pub async fn
|
(ex: &Executor, interrupt: Rc<RefCell<Interrupt>>) -> Result<()> {
// Clone resample_evt if interrupt has one.
// This is a separate block so that we do not hold a RefCell borrow across await.
let resample_evt = if let Some(resample_evt) = interrupt.borrow().get_resample_evt() {
let resample_evt = resample_evt
.try_clone()
.context("resample_evt.try_clone() failed")?;
Some(EventAsync::new(resample_evt.0, ex).context("failed to create async resample event")?)
} else {
None
};
if let Some(resample_evt) = resample_evt {
loop {
let _ = resample_evt
.next_val()
.await
.context("failed to read resample event")?;
interrupt.borrow().do_interrupt_resample();
}
} else {
// No resample event; park the future.
let () = futures::future::pending().await;
}
Ok(())
}
|
handle_irq_resample
|
identifier_name
|
profiling_support.rs
|
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData;
use rustc_middle::ty::{TyCtxt, WithOptConstParam};
use rustc_query_system::query::{QueryCache, QueryCacheStore};
use std::fmt::Debug;
use std::io::Write;
struct QueryKeyStringCache {
def_id_cache: FxHashMap<DefId, StringId>,
}
impl QueryKeyStringCache {
fn new() -> QueryKeyStringCache {
QueryKeyStringCache { def_id_cache: Default::default() }
}
}
struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
string_cache: &'c mut QueryKeyStringCache,
}
impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
fn new(
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
string_cache: &'c mut QueryKeyStringCache,
) -> QueryKeyStringBuilder<'p, 'c, 'tcx> {
QueryKeyStringBuilder { profiler, tcx, string_cache }
}
// The current implementation is rather crude. In the future it might be a
// good idea to base this on `ty::print` in order to get nicer and more
// efficient query keys.
fn def_id_to_string_id(&mut self, def_id: DefId) -> StringId
|
let dis;
let end_index;
match def_key.disambiguated_data.data {
DefPathData::CrateRoot => {
crate_name = self.tcx.crate_name(def_id.krate).as_str();
name = &*crate_name;
dis = "";
end_index = 3;
}
other => {
other_name = other.to_string();
name = other_name.as_str();
if def_key.disambiguated_data.disambiguator == 0 {
dis = "";
end_index = 3;
} else {
write!(&mut dis_buffer[..], "[{}]", def_key.disambiguated_data.disambiguator)
.unwrap();
let end_of_dis = dis_buffer.iter().position(|&c| c == b']').unwrap();
dis = std::str::from_utf8(&dis_buffer[..end_of_dis + 1]).unwrap();
end_index = 4;
}
}
}
let components = [
StringComponent::Ref(parent_string_id),
StringComponent::Value("::"),
StringComponent::Value(name),
StringComponent::Value(dis),
];
let string_id = self.profiler.alloc_string(&components[start_index..end_index]);
self.string_cache.def_id_cache.insert(def_id, string_id);
string_id
}
}
trait IntoSelfProfilingString {
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
}
// The default implementation of `IntoSelfProfilingString` just uses `Debug`
// which is slow and causes lots of duplication of string data.
// The specialized impls below take care of making the `DefId` case more
// efficient.
impl<T: Debug> IntoSelfProfilingString for T {
default fn to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
let s = format!("{:?}", self);
builder.profiler.alloc_string(&s[..])
}
}
impl<T: SpecIntoSelfProfilingString> IntoSelfProfilingString for T {
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId {
self.spec_to_self_profile_string(builder)
}
}
#[rustc_specialization_trait]
trait SpecIntoSelfProfilingString: Debug {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId;
}
impl SpecIntoSelfProfilingString for DefId {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(*self)
}
}
impl SpecIntoSelfProfilingString for CrateNum {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: *self, index: CRATE_DEF_INDEX })
}
}
impl SpecIntoSelfProfilingString for DefIndex {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: *self })
}
}
impl SpecIntoSelfProfilingString for LocalDefId {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: self.local_def_index })
}
}
impl<T: SpecIntoSelfProfilingString> SpecIntoSelfProfilingString for WithOptConstParam<T> {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
// We print `WithOptConstParam` values as tuples to make them shorter
// and more readable, without losing information:
//
// "WithOptConstParam { did: foo::bar, const_param_did: Some(foo::baz) }"
// becomes "(foo::bar, foo::baz)" and
// "WithOptConstParam { did: foo::bar, const_param_did: None }"
// becomes "(foo::bar, _)".
let did = StringComponent::Ref(self.did.to_self_profile_string(builder));
let const_param_did = if let Some(const_param_did) = self.const_param_did {
let const_param_did = builder.def_id_to_string_id(const_param_did);
StringComponent::Ref(const_param_did)
} else {
StringComponent::Value("_")
};
let components = [
StringComponent::Value("("),
did,
StringComponent::Value(", "),
const_param_did,
StringComponent::Value(")"),
];
builder.profiler.alloc_string(&components[..])
}
}
impl<T0, T1> SpecIntoSelfProfilingString for (T0, T1)
where
T0: SpecIntoSelfProfilingString,
T1: SpecIntoSelfProfilingString,
{
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
let val0 = self.0.to_self_profile_string(builder);
let val1 = self.1.to_self_profile_string(builder);
let components = &[
StringComponent::Value("("),
StringComponent::Ref(val0),
StringComponent::Value(","),
StringComponent::Ref(val1),
StringComponent::Value(")"),
];
builder.profiler.alloc_string(components)
}
}
/// Allocate the self-profiling query strings for a single query cache. This
/// method is called from `alloc_self_profile_query_strings` which knows all
/// the queries via macro magic.
fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
query_cache: &QueryCacheStore<C>,
string_cache: &mut QueryKeyStringCache,
) where
C: QueryCache,
C::Key: Debug + Clone,
{
tcx.prof.with_profiler(|profiler| {
let event_id_builder = profiler.event_id_builder();
// Walk the entire query cache and allocate the appropriate
// string representations. Each cache entry is uniquely
// identified by its dep_node_index.
if profiler.query_key_recording_enabled() {
let mut query_string_builder = QueryKeyStringBuilder::new(profiler, tcx, string_cache);
let query_name = profiler.get_or_alloc_cached_string(query_name);
// Since building the string representation of query keys might
// need to invoke queries itself, we cannot keep the query caches
// locked while doing so. Instead we copy out the
// `(query_key, dep_node_index)` pairs and release the lock again.
let mut query_keys_and_indices = Vec::new();
query_cache.iter_results(&mut |k, _, i| query_keys_and_indices.push((k.clone(), i)));
// Now actually allocate the strings. If allocating the strings
// generates new entries in the query cache, we'll miss them but
// we don't actually care.
for (query_key, dep_node_index) in query_keys_and_indices {
// Translate the DepNodeIndex into a QueryInvocationId
let query_invocation_id = dep_node_index.into();
// Create the string version of the query-key
let query_key = query_key.to_self_profile_string(&mut query_string_builder);
let event_id = event_id_builder.from_label_and_arg(query_name, query_key);
// Doing this in bulk might be a good idea:
profiler.map_query_invocation_id_to_string(
query_invocation_id,
event_id.to_string_id(),
);
}
} else {
// In this branch we don't allocate query keys
let query_name = profiler.get_or_alloc_cached_string(query_name);
let event_id = event_id_builder.from_label(query_name).to_string_id();
let mut query_invocation_ids = Vec::new();
query_cache.iter_results(&mut |_, _, i| {
query_invocation_ids.push(i.into());
});
profiler.bulk_map_query_invocation_id_to_single_string(
query_invocation_ids.into_iter(),
event_id,
);
}
});
}
/// All self-profiling events generated by the query engine use
/// virtual `StringId`s for their `event_id`. This method makes all
/// those virtual `StringId`s point to actual strings.
///
/// If we are recording only summary data, the ids will point to
/// just the query names. If we are recording query keys too, we
/// allocate the corresponding strings here.
pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'tcx>) {
if!tcx.prof.enabled() {
return;
}
let mut string_cache = QueryKeyStringCache::new();
macro_rules! alloc_once {
(<$tcx:tt>
$($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($K:ty) -> $V:ty,)*
) => {
$({
alloc_self_profile_query_strings_for_query_cache(
tcx,
stringify!($name),
&tcx.query_caches.$name,
&mut string_cache,
);
})*
}
}
rustc_query_append! { [alloc_once!][<'tcx>] }
}
|
{
if let Some(&string_id) = self.string_cache.def_id_cache.get(&def_id) {
return string_id;
}
let def_key = self.tcx.def_key(def_id);
let (parent_string_id, start_index) = match def_key.parent {
Some(parent_index) => {
let parent_def_id = DefId { index: parent_index, krate: def_id.krate };
(self.def_id_to_string_id(parent_def_id), 0)
}
None => (StringId::INVALID, 2),
};
let dis_buffer = &mut [0u8; 16];
let crate_name;
let other_name;
let name;
|
identifier_body
|
profiling_support.rs
|
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData;
use rustc_middle::ty::{TyCtxt, WithOptConstParam};
use rustc_query_system::query::{QueryCache, QueryCacheStore};
use std::fmt::Debug;
use std::io::Write;
struct QueryKeyStringCache {
def_id_cache: FxHashMap<DefId, StringId>,
}
impl QueryKeyStringCache {
fn new() -> QueryKeyStringCache {
QueryKeyStringCache { def_id_cache: Default::default() }
}
}
struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
string_cache: &'c mut QueryKeyStringCache,
}
impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
fn new(
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
string_cache: &'c mut QueryKeyStringCache,
) -> QueryKeyStringBuilder<'p, 'c, 'tcx> {
QueryKeyStringBuilder { profiler, tcx, string_cache }
|
// efficient query keys.
fn def_id_to_string_id(&mut self, def_id: DefId) -> StringId {
if let Some(&string_id) = self.string_cache.def_id_cache.get(&def_id) {
return string_id;
}
let def_key = self.tcx.def_key(def_id);
let (parent_string_id, start_index) = match def_key.parent {
Some(parent_index) => {
let parent_def_id = DefId { index: parent_index, krate: def_id.krate };
(self.def_id_to_string_id(parent_def_id), 0)
}
None => (StringId::INVALID, 2),
};
let dis_buffer = &mut [0u8; 16];
let crate_name;
let other_name;
let name;
let dis;
let end_index;
match def_key.disambiguated_data.data {
DefPathData::CrateRoot => {
crate_name = self.tcx.crate_name(def_id.krate).as_str();
name = &*crate_name;
dis = "";
end_index = 3;
}
other => {
other_name = other.to_string();
name = other_name.as_str();
if def_key.disambiguated_data.disambiguator == 0 {
dis = "";
end_index = 3;
} else {
write!(&mut dis_buffer[..], "[{}]", def_key.disambiguated_data.disambiguator)
.unwrap();
let end_of_dis = dis_buffer.iter().position(|&c| c == b']').unwrap();
dis = std::str::from_utf8(&dis_buffer[..end_of_dis + 1]).unwrap();
end_index = 4;
}
}
}
let components = [
StringComponent::Ref(parent_string_id),
StringComponent::Value("::"),
StringComponent::Value(name),
StringComponent::Value(dis),
];
let string_id = self.profiler.alloc_string(&components[start_index..end_index]);
self.string_cache.def_id_cache.insert(def_id, string_id);
string_id
}
}
trait IntoSelfProfilingString {
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
}
// The default implementation of `IntoSelfProfilingString` just uses `Debug`
// which is slow and causes lots of duplication of string data.
// The specialized impls below take care of making the `DefId` case more
// efficient.
impl<T: Debug> IntoSelfProfilingString for T {
default fn to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
let s = format!("{:?}", self);
builder.profiler.alloc_string(&s[..])
}
}
impl<T: SpecIntoSelfProfilingString> IntoSelfProfilingString for T {
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId {
self.spec_to_self_profile_string(builder)
}
}
#[rustc_specialization_trait]
trait SpecIntoSelfProfilingString: Debug {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId;
}
impl SpecIntoSelfProfilingString for DefId {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(*self)
}
}
impl SpecIntoSelfProfilingString for CrateNum {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: *self, index: CRATE_DEF_INDEX })
}
}
impl SpecIntoSelfProfilingString for DefIndex {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: *self })
}
}
impl SpecIntoSelfProfilingString for LocalDefId {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: self.local_def_index })
}
}
impl<T: SpecIntoSelfProfilingString> SpecIntoSelfProfilingString for WithOptConstParam<T> {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
// We print `WithOptConstParam` values as tuples to make them shorter
// and more readable, without losing information:
//
// "WithOptConstParam { did: foo::bar, const_param_did: Some(foo::baz) }"
// becomes "(foo::bar, foo::baz)" and
// "WithOptConstParam { did: foo::bar, const_param_did: None }"
// becomes "(foo::bar, _)".
let did = StringComponent::Ref(self.did.to_self_profile_string(builder));
let const_param_did = if let Some(const_param_did) = self.const_param_did {
let const_param_did = builder.def_id_to_string_id(const_param_did);
StringComponent::Ref(const_param_did)
} else {
StringComponent::Value("_")
};
let components = [
StringComponent::Value("("),
did,
StringComponent::Value(", "),
const_param_did,
StringComponent::Value(")"),
];
builder.profiler.alloc_string(&components[..])
}
}
impl<T0, T1> SpecIntoSelfProfilingString for (T0, T1)
where
T0: SpecIntoSelfProfilingString,
T1: SpecIntoSelfProfilingString,
{
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
let val0 = self.0.to_self_profile_string(builder);
let val1 = self.1.to_self_profile_string(builder);
let components = &[
StringComponent::Value("("),
StringComponent::Ref(val0),
StringComponent::Value(","),
StringComponent::Ref(val1),
StringComponent::Value(")"),
];
builder.profiler.alloc_string(components)
}
}
/// Allocate the self-profiling query strings for a single query cache. This
/// method is called from `alloc_self_profile_query_strings` which knows all
/// the queries via macro magic.
fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
query_cache: &QueryCacheStore<C>,
string_cache: &mut QueryKeyStringCache,
) where
C: QueryCache,
C::Key: Debug + Clone,
{
tcx.prof.with_profiler(|profiler| {
let event_id_builder = profiler.event_id_builder();
// Walk the entire query cache and allocate the appropriate
// string representations. Each cache entry is uniquely
// identified by its dep_node_index.
if profiler.query_key_recording_enabled() {
let mut query_string_builder = QueryKeyStringBuilder::new(profiler, tcx, string_cache);
let query_name = profiler.get_or_alloc_cached_string(query_name);
// Since building the string representation of query keys might
// need to invoke queries itself, we cannot keep the query caches
// locked while doing so. Instead we copy out the
// `(query_key, dep_node_index)` pairs and release the lock again.
let mut query_keys_and_indices = Vec::new();
query_cache.iter_results(&mut |k, _, i| query_keys_and_indices.push((k.clone(), i)));
// Now actually allocate the strings. If allocating the strings
// generates new entries in the query cache, we'll miss them but
// we don't actually care.
for (query_key, dep_node_index) in query_keys_and_indices {
// Translate the DepNodeIndex into a QueryInvocationId
let query_invocation_id = dep_node_index.into();
// Create the string version of the query-key
let query_key = query_key.to_self_profile_string(&mut query_string_builder);
let event_id = event_id_builder.from_label_and_arg(query_name, query_key);
// Doing this in bulk might be a good idea:
profiler.map_query_invocation_id_to_string(
query_invocation_id,
event_id.to_string_id(),
);
}
} else {
// In this branch we don't allocate query keys
let query_name = profiler.get_or_alloc_cached_string(query_name);
let event_id = event_id_builder.from_label(query_name).to_string_id();
let mut query_invocation_ids = Vec::new();
query_cache.iter_results(&mut |_, _, i| {
query_invocation_ids.push(i.into());
});
profiler.bulk_map_query_invocation_id_to_single_string(
query_invocation_ids.into_iter(),
event_id,
);
}
});
}
/// All self-profiling events generated by the query engine use
/// virtual `StringId`s for their `event_id`. This method makes all
/// those virtual `StringId`s point to actual strings.
///
/// If we are recording only summary data, the ids will point to
/// just the query names. If we are recording query keys too, we
/// allocate the corresponding strings here.
pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'tcx>) {
if!tcx.prof.enabled() {
return;
}
let mut string_cache = QueryKeyStringCache::new();
macro_rules! alloc_once {
(<$tcx:tt>
$($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($K:ty) -> $V:ty,)*
) => {
$({
alloc_self_profile_query_strings_for_query_cache(
tcx,
stringify!($name),
&tcx.query_caches.$name,
&mut string_cache,
);
})*
}
}
rustc_query_append! { [alloc_once!][<'tcx>] }
}
|
}
// The current implementation is rather crude. In the future it might be a
// good idea to base this on `ty::print` in order to get nicer and more
|
random_line_split
|
profiling_support.rs
|
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData;
use rustc_middle::ty::{TyCtxt, WithOptConstParam};
use rustc_query_system::query::{QueryCache, QueryCacheStore};
use std::fmt::Debug;
use std::io::Write;
struct QueryKeyStringCache {
def_id_cache: FxHashMap<DefId, StringId>,
}
impl QueryKeyStringCache {
fn new() -> QueryKeyStringCache {
QueryKeyStringCache { def_id_cache: Default::default() }
}
}
struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
string_cache: &'c mut QueryKeyStringCache,
}
impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
fn new(
profiler: &'p SelfProfiler,
tcx: TyCtxt<'tcx>,
string_cache: &'c mut QueryKeyStringCache,
) -> QueryKeyStringBuilder<'p, 'c, 'tcx> {
QueryKeyStringBuilder { profiler, tcx, string_cache }
}
// The current implementation is rather crude. In the future it might be a
// good idea to base this on `ty::print` in order to get nicer and more
// efficient query keys.
fn def_id_to_string_id(&mut self, def_id: DefId) -> StringId {
if let Some(&string_id) = self.string_cache.def_id_cache.get(&def_id) {
return string_id;
}
let def_key = self.tcx.def_key(def_id);
let (parent_string_id, start_index) = match def_key.parent {
Some(parent_index) => {
let parent_def_id = DefId { index: parent_index, krate: def_id.krate };
(self.def_id_to_string_id(parent_def_id), 0)
}
None => (StringId::INVALID, 2),
};
let dis_buffer = &mut [0u8; 16];
let crate_name;
let other_name;
let name;
let dis;
let end_index;
match def_key.disambiguated_data.data {
DefPathData::CrateRoot => {
crate_name = self.tcx.crate_name(def_id.krate).as_str();
name = &*crate_name;
dis = "";
end_index = 3;
}
other => {
other_name = other.to_string();
name = other_name.as_str();
if def_key.disambiguated_data.disambiguator == 0 {
dis = "";
end_index = 3;
} else {
write!(&mut dis_buffer[..], "[{}]", def_key.disambiguated_data.disambiguator)
.unwrap();
let end_of_dis = dis_buffer.iter().position(|&c| c == b']').unwrap();
dis = std::str::from_utf8(&dis_buffer[..end_of_dis + 1]).unwrap();
end_index = 4;
}
}
}
let components = [
StringComponent::Ref(parent_string_id),
StringComponent::Value("::"),
StringComponent::Value(name),
StringComponent::Value(dis),
];
let string_id = self.profiler.alloc_string(&components[start_index..end_index]);
self.string_cache.def_id_cache.insert(def_id, string_id);
string_id
}
}
trait IntoSelfProfilingString {
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
}
// The default implementation of `IntoSelfProfilingString` just uses `Debug`
// which is slow and causes lots of duplication of string data.
// The specialized impls below take care of making the `DefId` case more
// efficient.
impl<T: Debug> IntoSelfProfilingString for T {
default fn to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
let s = format!("{:?}", self);
builder.profiler.alloc_string(&s[..])
}
}
impl<T: SpecIntoSelfProfilingString> IntoSelfProfilingString for T {
fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId {
self.spec_to_self_profile_string(builder)
}
}
#[rustc_specialization_trait]
trait SpecIntoSelfProfilingString: Debug {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId;
}
impl SpecIntoSelfProfilingString for DefId {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(*self)
}
}
impl SpecIntoSelfProfilingString for CrateNum {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: *self, index: CRATE_DEF_INDEX })
}
}
impl SpecIntoSelfProfilingString for DefIndex {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: *self })
}
}
impl SpecIntoSelfProfilingString for LocalDefId {
fn
|
(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: self.local_def_index })
}
}
impl<T: SpecIntoSelfProfilingString> SpecIntoSelfProfilingString for WithOptConstParam<T> {
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
// We print `WithOptConstParam` values as tuples to make them shorter
// and more readable, without losing information:
//
// "WithOptConstParam { did: foo::bar, const_param_did: Some(foo::baz) }"
// becomes "(foo::bar, foo::baz)" and
// "WithOptConstParam { did: foo::bar, const_param_did: None }"
// becomes "(foo::bar, _)".
let did = StringComponent::Ref(self.did.to_self_profile_string(builder));
let const_param_did = if let Some(const_param_did) = self.const_param_did {
let const_param_did = builder.def_id_to_string_id(const_param_did);
StringComponent::Ref(const_param_did)
} else {
StringComponent::Value("_")
};
let components = [
StringComponent::Value("("),
did,
StringComponent::Value(", "),
const_param_did,
StringComponent::Value(")"),
];
builder.profiler.alloc_string(&components[..])
}
}
impl<T0, T1> SpecIntoSelfProfilingString for (T0, T1)
where
T0: SpecIntoSelfProfilingString,
T1: SpecIntoSelfProfilingString,
{
fn spec_to_self_profile_string(
&self,
builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
) -> StringId {
let val0 = self.0.to_self_profile_string(builder);
let val1 = self.1.to_self_profile_string(builder);
let components = &[
StringComponent::Value("("),
StringComponent::Ref(val0),
StringComponent::Value(","),
StringComponent::Ref(val1),
StringComponent::Value(")"),
];
builder.profiler.alloc_string(components)
}
}
/// Allocate the self-profiling query strings for a single query cache. This
/// method is called from `alloc_self_profile_query_strings` which knows all
/// the queries via macro magic.
fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
query_cache: &QueryCacheStore<C>,
string_cache: &mut QueryKeyStringCache,
) where
C: QueryCache,
C::Key: Debug + Clone,
{
tcx.prof.with_profiler(|profiler| {
let event_id_builder = profiler.event_id_builder();
// Walk the entire query cache and allocate the appropriate
// string representations. Each cache entry is uniquely
// identified by its dep_node_index.
if profiler.query_key_recording_enabled() {
let mut query_string_builder = QueryKeyStringBuilder::new(profiler, tcx, string_cache);
let query_name = profiler.get_or_alloc_cached_string(query_name);
// Since building the string representation of query keys might
// need to invoke queries itself, we cannot keep the query caches
// locked while doing so. Instead we copy out the
// `(query_key, dep_node_index)` pairs and release the lock again.
let mut query_keys_and_indices = Vec::new();
query_cache.iter_results(&mut |k, _, i| query_keys_and_indices.push((k.clone(), i)));
// Now actually allocate the strings. If allocating the strings
// generates new entries in the query cache, we'll miss them but
// we don't actually care.
for (query_key, dep_node_index) in query_keys_and_indices {
// Translate the DepNodeIndex into a QueryInvocationId
let query_invocation_id = dep_node_index.into();
// Create the string version of the query-key
let query_key = query_key.to_self_profile_string(&mut query_string_builder);
let event_id = event_id_builder.from_label_and_arg(query_name, query_key);
// Doing this in bulk might be a good idea:
profiler.map_query_invocation_id_to_string(
query_invocation_id,
event_id.to_string_id(),
);
}
} else {
// In this branch we don't allocate query keys
let query_name = profiler.get_or_alloc_cached_string(query_name);
let event_id = event_id_builder.from_label(query_name).to_string_id();
let mut query_invocation_ids = Vec::new();
query_cache.iter_results(&mut |_, _, i| {
query_invocation_ids.push(i.into());
});
profiler.bulk_map_query_invocation_id_to_single_string(
query_invocation_ids.into_iter(),
event_id,
);
}
});
}
/// All self-profiling events generated by the query engine use
/// virtual `StringId`s for their `event_id`. This method makes all
/// those virtual `StringId`s point to actual strings.
///
/// If we are recording only summary data, the ids will point to
/// just the query names. If we are recording query keys too, we
/// allocate the corresponding strings here.
pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'tcx>) {
if!tcx.prof.enabled() {
return;
}
let mut string_cache = QueryKeyStringCache::new();
macro_rules! alloc_once {
(<$tcx:tt>
$($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($K:ty) -> $V:ty,)*
) => {
$({
alloc_self_profile_query_strings_for_query_cache(
tcx,
stringify!($name),
&tcx.query_caches.$name,
&mut string_cache,
);
})*
}
}
rustc_query_append! { [alloc_once!][<'tcx>] }
}
|
spec_to_self_profile_string
|
identifier_name
|
permission.rs
|
use std::{error::Error as StdError, fmt, io::Write, str::FromStr};
use diesel::{backend::Backend, deserialize, serialize, sql_types::Text};
#[derive(AsExpression, Clone, Copy, Debug, Eq, FromSqlRow, Hash, PartialEq)]
#[sql_type = "Text"]
pub enum Permission {
MakePost,
MakeMediaPost,
MakeComment,
FollowUser,
MakePersona,
SwitchPersona,
DeletePersona,
ManageFollowRequest,
ConfigureInstance,
BanUser,
BlockInstance,
GrantRole,
RevokeRole,
}
impl fmt::Display for Permission {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Permission::MakePost => write!(f, "make-post"),
Permission::MakeMediaPost => write!(f, "make-media-post"),
Permission::MakeComment => write!(f, "make-comment"),
Permission::FollowUser => write!(f, "follow-user"),
Permission::MakePersona => write!(f, "make-persona"),
Permission::SwitchPersona => write!(f, "switch-persona"),
Permission::DeletePersona => write!(f, "delete-persona"),
Permission::ManageFollowRequest => write!(f, "manage-follow-request"),
Permission::ConfigureInstance => write!(f, "configure-instance"),
Permission::BanUser => write!(f, "ban-user"),
Permission::BlockInstance => write!(f, "block-instance"),
Permission::GrantRole => write!(f, "grant-role"),
Permission::RevokeRole => write!(f, "revoke-role"),
}
}
}
impl FromStr for Permission {
type Err = PermissionParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"make-post" => Ok(Permission::MakePost),
"make-media-post" => Ok(Permission::MakeMediaPost),
"make-comment" => Ok(Permission::MakeComment),
"follow-user" => Ok(Permission::FollowUser),
"make-persona" => Ok(Permission::MakePersona),
"switch-persona" => Ok(Permission::SwitchPersona),
"delete-persona" => Ok(Permission::DeletePersona),
"manage-follow-request" => Ok(Permission::ManageFollowRequest),
"configure-instance" => Ok(Permission::ConfigureInstance),
"ban-user" => Ok(Permission::BanUser),
"block-instance" => Ok(Permission::BlockInstance),
"grant-role" => Ok(Permission::GrantRole),
"revoke-role" => Ok(Permission::RevokeRole),
_ => Err(PermissionParseError),
}
}
}
impl<DB> serialize::ToSql<Text, DB> for Permission
where
DB: Backend,
{
fn to_sql<W: Write>(&self, out: &mut serialize::Output<W, DB>) -> serialize::Result {
serialize::ToSql::<Text, DB>::to_sql(&format!("{}", self), out)
}
}
impl<DB> deserialize::FromSql<Text, DB> for Permission
where
DB: Backend<RawValue = [u8]>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
deserialize::FromSql::<Text, DB>::from_sql(bytes).and_then(|string: String| {
string
.parse::<Permission>()
.map_err(|e| Box::new(e) as Box<StdError + Send + Sync>)
})
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct PermissionParseError;
impl fmt::Display for PermissionParseError {
fn
|
(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Failed to parse Permission")
}
}
impl StdError for PermissionParseError {
fn description(&self) -> &str {
"Failed to parse Permission"
}
fn cause(&self) -> Option<&StdError> {
None
}
}
|
fmt
|
identifier_name
|
permission.rs
|
use std::{error::Error as StdError, fmt, io::Write, str::FromStr};
use diesel::{backend::Backend, deserialize, serialize, sql_types::Text};
#[derive(AsExpression, Clone, Copy, Debug, Eq, FromSqlRow, Hash, PartialEq)]
#[sql_type = "Text"]
pub enum Permission {
MakePost,
MakeMediaPost,
MakeComment,
FollowUser,
MakePersona,
SwitchPersona,
DeletePersona,
ManageFollowRequest,
ConfigureInstance,
BanUser,
|
impl fmt::Display for Permission {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Permission::MakePost => write!(f, "make-post"),
Permission::MakeMediaPost => write!(f, "make-media-post"),
Permission::MakeComment => write!(f, "make-comment"),
Permission::FollowUser => write!(f, "follow-user"),
Permission::MakePersona => write!(f, "make-persona"),
Permission::SwitchPersona => write!(f, "switch-persona"),
Permission::DeletePersona => write!(f, "delete-persona"),
Permission::ManageFollowRequest => write!(f, "manage-follow-request"),
Permission::ConfigureInstance => write!(f, "configure-instance"),
Permission::BanUser => write!(f, "ban-user"),
Permission::BlockInstance => write!(f, "block-instance"),
Permission::GrantRole => write!(f, "grant-role"),
Permission::RevokeRole => write!(f, "revoke-role"),
}
}
}
impl FromStr for Permission {
type Err = PermissionParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"make-post" => Ok(Permission::MakePost),
"make-media-post" => Ok(Permission::MakeMediaPost),
"make-comment" => Ok(Permission::MakeComment),
"follow-user" => Ok(Permission::FollowUser),
"make-persona" => Ok(Permission::MakePersona),
"switch-persona" => Ok(Permission::SwitchPersona),
"delete-persona" => Ok(Permission::DeletePersona),
"manage-follow-request" => Ok(Permission::ManageFollowRequest),
"configure-instance" => Ok(Permission::ConfigureInstance),
"ban-user" => Ok(Permission::BanUser),
"block-instance" => Ok(Permission::BlockInstance),
"grant-role" => Ok(Permission::GrantRole),
"revoke-role" => Ok(Permission::RevokeRole),
_ => Err(PermissionParseError),
}
}
}
impl<DB> serialize::ToSql<Text, DB> for Permission
where
DB: Backend,
{
fn to_sql<W: Write>(&self, out: &mut serialize::Output<W, DB>) -> serialize::Result {
serialize::ToSql::<Text, DB>::to_sql(&format!("{}", self), out)
}
}
impl<DB> deserialize::FromSql<Text, DB> for Permission
where
DB: Backend<RawValue = [u8]>,
{
fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result<Self> {
deserialize::FromSql::<Text, DB>::from_sql(bytes).and_then(|string: String| {
string
.parse::<Permission>()
.map_err(|e| Box::new(e) as Box<StdError + Send + Sync>)
})
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct PermissionParseError;
impl fmt::Display for PermissionParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Failed to parse Permission")
}
}
impl StdError for PermissionParseError {
fn description(&self) -> &str {
"Failed to parse Permission"
}
fn cause(&self) -> Option<&StdError> {
None
}
}
|
BlockInstance,
GrantRole,
RevokeRole,
}
|
random_line_split
|
spawn.rs
|
//extern crate scoped_threadpool;
extern crate threadpool;
extern crate thread_local;
extern crate hwloc;
use matrix::{Scalar,Mat};
use core::marker::{PhantomData};
use thread_comm::{ThreadComm,ThreadInfo};
use composables::{GemmNode,AlgorithmStep};
use std::sync::{Arc,Mutex};
use std::cell::{RefCell};
use self::threadpool::ThreadPool;
use self::thread_local::ThreadLocal;
use self::hwloc::{Topology, ObjectType, CPUBIND_THREAD, CpuSet};
use libc;
fn cpuset_for_core(topology: &Topology, idx: usize) -> CpuSet {
let cores = (*topology).objects_with_type(&ObjectType::Core).unwrap();
match cores.get(idx) {
Some(val) => val.cpuset().unwrap(),
None => panic!("No Core found with id {}", idx)
}
}
pub struct SpawnThreads<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
where S: Send, T:'static, S:'static, At:'static, Bt:'static, Ct:'static {
n_threads: usize,
pool: ThreadPool,
cntl_cache: Arc<ThreadLocal<RefCell<S>>>,
_t: PhantomData<T>,
_at: PhantomData<At>,
_bt: PhantomData<Bt>,
_ct: PhantomData<Ct>,
}
impl<T: Scalar,At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
SpawnThreads <T,At,Bt,Ct,S>
where S: Send {
pub fn
|
(&mut self, n_threads: usize){
//Create new thread pool
self.n_threads = n_threads;
if n_threads > 1 {
self.pool = ThreadPool::new(n_threads-1);
} else {
self.pool = ThreadPool::new(1);
}
//Clear the control tree cache
Arc::get_mut(&mut self.cntl_cache).expect("").clear();
//Bind threads to cores
self.bind_threads();
}
fn bind_threads(&mut self) {
//Get topology
let topo = Arc::new(Mutex::new(Topology::new()));
let comm : Arc<ThreadComm<T>> = Arc::new(ThreadComm::new(self.n_threads));
//Bind workers to cores.
for id in 1..self.n_threads {
let my_topo = topo.clone();
let my_comm = comm.clone();
self.pool.execute(move || {
let tid = unsafe { libc::pthread_self() };
{
let mut locked_topo = my_topo.lock().unwrap();
//let before = locked_topo.get_cpubind_for_thread(tid, CPUBIND_THREAD);
let bind_to = cpuset_for_core(&*locked_topo, id);
let _ = locked_topo.set_cpubind_for_thread(tid, bind_to, CPUBIND_THREAD);
//let after = locked_topo.get_cpubind_for_thread(tid, CPUBIND_THREAD);
}
//Barrier to make sure therad binding is done.
let thr = ThreadInfo::new(id, my_comm);
thr.barrier()
});
}
//Bind parent to a core.
let tid = unsafe { libc::pthread_self() };
{
let mut locked_topo = topo.lock().unwrap();
let bind_to = cpuset_for_core(&*locked_topo, 0);
let _ = locked_topo.set_cpubind_for_thread(tid, bind_to, CPUBIND_THREAD);
}
let thr = ThreadInfo::new(0, comm);
thr.barrier();
}
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
GemmNode<T, At, Bt, Ct> for SpawnThreads<T, At, Bt, Ct, S>
where S: Send {
#[inline(always)]
unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c:&mut Ct, _thr: &ThreadInfo<T>) -> () {
//Create global thread communicator
let comm : Arc<ThreadComm<T>> = Arc::new(ThreadComm::new(self.n_threads));
//Make some shallow copies here to pass into the scoped,
//because self.pool borrows self as mutable
//let cache = self.cntl_cache.clone();
//Spawn n-1 workers since head thread will do work too.
for id in 1..self.n_threads {
//Make some shallow copies because of borrow rules
let mut my_a = a.make_alias();
let mut my_b = b.make_alias();
let mut my_c = c.make_alias();
let my_comm = comm.clone();
let my_cache = self.cntl_cache.clone();
self.pool.execute(move || {
//Make this thread's communicator holder
let thr = ThreadInfo::new(id, my_comm);
//Read this thread's cached control tree
let cntl_tree_cell = my_cache.get_or(|| Box::new(RefCell::new(S::new())));
//Run subproblem
cntl_tree_cell.borrow_mut().run(&mut my_a, &mut my_b, &mut my_c, &thr);
thr.barrier();
});
}
//Do parent thread's work
let thr = ThreadInfo::new(0, comm);
let cntl_tree_cell = self.cntl_cache.get_or(|| Box::new(RefCell::new(S::new())));
cntl_tree_cell.borrow_mut().run(a, b, c, &thr);
thr.barrier();
}
fn new() -> Self {
SpawnThreads{ n_threads : 1, pool: ThreadPool::new(1),
cntl_cache: Arc::new(ThreadLocal::new()),
_t: PhantomData, _at:PhantomData, _bt: PhantomData, _ct: PhantomData }
}
fn hierarchy_description() -> Vec<AlgorithmStep> {
S::hierarchy_description()
}
}
|
set_n_threads
|
identifier_name
|
spawn.rs
|
//extern crate scoped_threadpool;
extern crate threadpool;
extern crate thread_local;
extern crate hwloc;
use matrix::{Scalar,Mat};
use core::marker::{PhantomData};
use thread_comm::{ThreadComm,ThreadInfo};
use composables::{GemmNode,AlgorithmStep};
use std::sync::{Arc,Mutex};
use std::cell::{RefCell};
use self::threadpool::ThreadPool;
use self::thread_local::ThreadLocal;
use self::hwloc::{Topology, ObjectType, CPUBIND_THREAD, CpuSet};
use libc;
fn cpuset_for_core(topology: &Topology, idx: usize) -> CpuSet {
let cores = (*topology).objects_with_type(&ObjectType::Core).unwrap();
match cores.get(idx) {
Some(val) => val.cpuset().unwrap(),
None => panic!("No Core found with id {}", idx)
}
}
pub struct SpawnThreads<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
where S: Send, T:'static, S:'static, At:'static, Bt:'static, Ct:'static {
n_threads: usize,
pool: ThreadPool,
cntl_cache: Arc<ThreadLocal<RefCell<S>>>,
_t: PhantomData<T>,
_at: PhantomData<At>,
_bt: PhantomData<Bt>,
_ct: PhantomData<Ct>,
}
impl<T: Scalar,At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
SpawnThreads <T,At,Bt,Ct,S>
where S: Send {
pub fn set_n_threads(&mut self, n_threads: usize){
//Create new thread pool
self.n_threads = n_threads;
if n_threads > 1 {
self.pool = ThreadPool::new(n_threads-1);
} else {
self.pool = ThreadPool::new(1);
}
//Clear the control tree cache
Arc::get_mut(&mut self.cntl_cache).expect("").clear();
//Bind threads to cores
self.bind_threads();
}
fn bind_threads(&mut self) {
//Get topology
let topo = Arc::new(Mutex::new(Topology::new()));
let comm : Arc<ThreadComm<T>> = Arc::new(ThreadComm::new(self.n_threads));
//Bind workers to cores.
for id in 1..self.n_threads {
let my_topo = topo.clone();
let my_comm = comm.clone();
self.pool.execute(move || {
let tid = unsafe { libc::pthread_self() };
{
let mut locked_topo = my_topo.lock().unwrap();
//let before = locked_topo.get_cpubind_for_thread(tid, CPUBIND_THREAD);
let bind_to = cpuset_for_core(&*locked_topo, id);
let _ = locked_topo.set_cpubind_for_thread(tid, bind_to, CPUBIND_THREAD);
//let after = locked_topo.get_cpubind_for_thread(tid, CPUBIND_THREAD);
}
//Barrier to make sure therad binding is done.
let thr = ThreadInfo::new(id, my_comm);
thr.barrier()
});
}
//Bind parent to a core.
let tid = unsafe { libc::pthread_self() };
{
let mut locked_topo = topo.lock().unwrap();
let bind_to = cpuset_for_core(&*locked_topo, 0);
let _ = locked_topo.set_cpubind_for_thread(tid, bind_to, CPUBIND_THREAD);
}
let thr = ThreadInfo::new(0, comm);
thr.barrier();
}
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
GemmNode<T, At, Bt, Ct> for SpawnThreads<T, At, Bt, Ct, S>
where S: Send {
#[inline(always)]
unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c:&mut Ct, _thr: &ThreadInfo<T>) -> () {
//Create global thread communicator
let comm : Arc<ThreadComm<T>> = Arc::new(ThreadComm::new(self.n_threads));
//Make some shallow copies here to pass into the scoped,
//because self.pool borrows self as mutable
//let cache = self.cntl_cache.clone();
//Spawn n-1 workers since head thread will do work too.
for id in 1..self.n_threads {
//Make some shallow copies because of borrow rules
let mut my_a = a.make_alias();
let mut my_b = b.make_alias();
let mut my_c = c.make_alias();
let my_comm = comm.clone();
let my_cache = self.cntl_cache.clone();
self.pool.execute(move || {
//Make this thread's communicator holder
let thr = ThreadInfo::new(id, my_comm);
//Read this thread's cached control tree
let cntl_tree_cell = my_cache.get_or(|| Box::new(RefCell::new(S::new())));
//Run subproblem
cntl_tree_cell.borrow_mut().run(&mut my_a, &mut my_b, &mut my_c, &thr);
thr.barrier();
});
}
//Do parent thread's work
let thr = ThreadInfo::new(0, comm);
let cntl_tree_cell = self.cntl_cache.get_or(|| Box::new(RefCell::new(S::new())));
cntl_tree_cell.borrow_mut().run(a, b, c, &thr);
thr.barrier();
}
fn new() -> Self {
SpawnThreads{ n_threads : 1, pool: ThreadPool::new(1),
cntl_cache: Arc::new(ThreadLocal::new()),
_t: PhantomData, _at:PhantomData, _bt: PhantomData, _ct: PhantomData }
}
fn hierarchy_description() -> Vec<AlgorithmStep>
|
}
|
{
S::hierarchy_description()
}
|
identifier_body
|
spawn.rs
|
//extern crate scoped_threadpool;
extern crate threadpool;
extern crate thread_local;
extern crate hwloc;
use matrix::{Scalar,Mat};
use core::marker::{PhantomData};
use thread_comm::{ThreadComm,ThreadInfo};
use composables::{GemmNode,AlgorithmStep};
use std::sync::{Arc,Mutex};
use std::cell::{RefCell};
use self::threadpool::ThreadPool;
use self::thread_local::ThreadLocal;
use self::hwloc::{Topology, ObjectType, CPUBIND_THREAD, CpuSet};
use libc;
fn cpuset_for_core(topology: &Topology, idx: usize) -> CpuSet {
let cores = (*topology).objects_with_type(&ObjectType::Core).unwrap();
match cores.get(idx) {
Some(val) => val.cpuset().unwrap(),
None => panic!("No Core found with id {}", idx)
}
}
pub struct SpawnThreads<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
where S: Send, T:'static, S:'static, At:'static, Bt:'static, Ct:'static {
n_threads: usize,
pool: ThreadPool,
cntl_cache: Arc<ThreadLocal<RefCell<S>>>,
_t: PhantomData<T>,
_at: PhantomData<At>,
_bt: PhantomData<Bt>,
_ct: PhantomData<Ct>,
}
impl<T: Scalar,At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
SpawnThreads <T,At,Bt,Ct,S>
where S: Send {
pub fn set_n_threads(&mut self, n_threads: usize){
//Create new thread pool
self.n_threads = n_threads;
|
//Clear the control tree cache
Arc::get_mut(&mut self.cntl_cache).expect("").clear();
//Bind threads to cores
self.bind_threads();
}
fn bind_threads(&mut self) {
//Get topology
let topo = Arc::new(Mutex::new(Topology::new()));
let comm : Arc<ThreadComm<T>> = Arc::new(ThreadComm::new(self.n_threads));
//Bind workers to cores.
for id in 1..self.n_threads {
let my_topo = topo.clone();
let my_comm = comm.clone();
self.pool.execute(move || {
let tid = unsafe { libc::pthread_self() };
{
let mut locked_topo = my_topo.lock().unwrap();
//let before = locked_topo.get_cpubind_for_thread(tid, CPUBIND_THREAD);
let bind_to = cpuset_for_core(&*locked_topo, id);
let _ = locked_topo.set_cpubind_for_thread(tid, bind_to, CPUBIND_THREAD);
//let after = locked_topo.get_cpubind_for_thread(tid, CPUBIND_THREAD);
}
//Barrier to make sure therad binding is done.
let thr = ThreadInfo::new(id, my_comm);
thr.barrier()
});
}
//Bind parent to a core.
let tid = unsafe { libc::pthread_self() };
{
let mut locked_topo = topo.lock().unwrap();
let bind_to = cpuset_for_core(&*locked_topo, 0);
let _ = locked_topo.set_cpubind_for_thread(tid, bind_to, CPUBIND_THREAD);
}
let thr = ThreadInfo::new(0, comm);
thr.barrier();
}
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, S: GemmNode<T, At, Bt, Ct>>
GemmNode<T, At, Bt, Ct> for SpawnThreads<T, At, Bt, Ct, S>
where S: Send {
#[inline(always)]
unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c:&mut Ct, _thr: &ThreadInfo<T>) -> () {
//Create global thread communicator
let comm : Arc<ThreadComm<T>> = Arc::new(ThreadComm::new(self.n_threads));
//Make some shallow copies here to pass into the scoped,
//because self.pool borrows self as mutable
//let cache = self.cntl_cache.clone();
//Spawn n-1 workers since head thread will do work too.
for id in 1..self.n_threads {
//Make some shallow copies because of borrow rules
let mut my_a = a.make_alias();
let mut my_b = b.make_alias();
let mut my_c = c.make_alias();
let my_comm = comm.clone();
let my_cache = self.cntl_cache.clone();
self.pool.execute(move || {
//Make this thread's communicator holder
let thr = ThreadInfo::new(id, my_comm);
//Read this thread's cached control tree
let cntl_tree_cell = my_cache.get_or(|| Box::new(RefCell::new(S::new())));
//Run subproblem
cntl_tree_cell.borrow_mut().run(&mut my_a, &mut my_b, &mut my_c, &thr);
thr.barrier();
});
}
//Do parent thread's work
let thr = ThreadInfo::new(0, comm);
let cntl_tree_cell = self.cntl_cache.get_or(|| Box::new(RefCell::new(S::new())));
cntl_tree_cell.borrow_mut().run(a, b, c, &thr);
thr.barrier();
}
fn new() -> Self {
SpawnThreads{ n_threads : 1, pool: ThreadPool::new(1),
cntl_cache: Arc::new(ThreadLocal::new()),
_t: PhantomData, _at:PhantomData, _bt: PhantomData, _ct: PhantomData }
}
fn hierarchy_description() -> Vec<AlgorithmStep> {
S::hierarchy_description()
}
}
|
if n_threads > 1 {
self.pool = ThreadPool::new(n_threads-1);
} else {
self.pool = ThreadPool::new(1);
}
|
random_line_split
|
method-on-enum.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// min-lldb-version: 310
// ignore-test // Test temporarily ignored due to debuginfo tests being disabled, see PR 47155
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// STACK BY REF
// gdb-command:print *self
// gdbg-check:$1 = {{RUST$ENUM$DISR = Variant2, [...]}, {RUST$ENUM$DISR = Variant2, __0 = 117901063}}
// gdbr-check:$1 = method_on_enum::Enum::Variant2(117901063)
// gdb-command:print arg1
// gdb-check:$2 = -1
// gdb-command:print arg2
// gdb-check:$3 = -2
// gdb-command:continue
// STACK BY VAL
// gdb-command:print self
// gdbg-check:$4 = {{RUST$ENUM$DISR = Variant2, [...]}, {RUST$ENUM$DISR = Variant2, __0 = 117901063}}
// gdbr-check:$4 = method_on_enum::Enum::Variant2(117901063)
// gdb-command:print arg1
// gdb-check:$5 = -3
// gdb-command:print arg2
// gdb-check:$6 = -4
// gdb-command:continue
// OWNED BY REF
// gdb-command:print *self
// gdbg-check:$7 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$7 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$8 = -5
// gdb-command:print arg2
// gdb-check:$9 = -6
// gdb-command:continue
// OWNED BY VAL
// gdb-command:print self
// gdbg-check:$10 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$10 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$11 = -7
// gdb-command:print arg2
// gdb-check:$12 = -8
// gdb-command:continue
// OWNED MOVED
// gdb-command:print *self
// gdbg-check:$13 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$13 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$14 = -9
// gdb-command:print arg2
// gdb-check:$15 = -10
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// STACK BY REF
// lldb-command:print *self
// lldb-check:[...]$0 = Variant2(117901063)
// lldb-command:print arg1
// lldb-check:[...]$1 = -1
// lldb-command:print arg2
// lldb-check:[...]$2 = -2
// lldb-command:continue
// STACK BY VAL
// lldb-command:print self
// lldb-check:[...]$3 = Variant2(117901063)
// lldb-command:print arg1
// lldb-check:[...]$4 = -3
// lldb-command:print arg2
// lldb-check:[...]$5 = -4
// lldb-command:continue
// OWNED BY REF
// lldb-command:print *self
// lldb-check:[...]$6 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$7 = -5
// lldb-command:print arg2
// lldb-check:[...]$8 = -6
// lldb-command:continue
// OWNED BY VAL
// lldb-command:print self
// lldb-check:[...]$9 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$10 = -7
// lldb-command:print arg2
// lldb-check:[...]$11 = -8
// lldb-command:continue
// OWNED MOVED
// lldb-command:print *self
// lldb-check:[...]$12 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$13 = -9
// lldb-command:print arg2
// lldb-check:[...]$14 = -10
// lldb-command:continue
#![feature(box_syntax)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
#[derive(Copy, Clone)]
enum Enum {
Variant1 { x: u16, y: u16 },
Variant2 (u32)
}
impl Enum {
fn self_by_ref(&self, arg1: isize, arg2: isize) -> isize {
zzz(); // #break
arg1 + arg2
}
fn self_by_val(self, arg1: isize, arg2: isize) -> isize {
zzz(); // #break
arg1 + arg2
}
fn self_owned(self: Box<Enum>, arg1: isize, arg2: isize) -> isize {
zzz(); // #break
arg1 + arg2
}
}
fn
|
() {
let stack = Enum::Variant2(117901063);
let _ = stack.self_by_ref(-1, -2);
let _ = stack.self_by_val(-3, -4);
let owned: Box<_> = box Enum::Variant1{ x: 1799, y: 1799 };
let _ = owned.self_by_ref(-5, -6);
let _ = owned.self_by_val(-7, -8);
let _ = owned.self_owned(-9, -10);
}
fn zzz() {()}
|
main
|
identifier_name
|
method-on-enum.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// min-lldb-version: 310
// ignore-test // Test temporarily ignored due to debuginfo tests being disabled, see PR 47155
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// STACK BY REF
// gdb-command:print *self
// gdbg-check:$1 = {{RUST$ENUM$DISR = Variant2, [...]}, {RUST$ENUM$DISR = Variant2, __0 = 117901063}}
// gdbr-check:$1 = method_on_enum::Enum::Variant2(117901063)
// gdb-command:print arg1
// gdb-check:$2 = -1
// gdb-command:print arg2
// gdb-check:$3 = -2
// gdb-command:continue
// STACK BY VAL
// gdb-command:print self
// gdbg-check:$4 = {{RUST$ENUM$DISR = Variant2, [...]}, {RUST$ENUM$DISR = Variant2, __0 = 117901063}}
// gdbr-check:$4 = method_on_enum::Enum::Variant2(117901063)
// gdb-command:print arg1
// gdb-check:$5 = -3
// gdb-command:print arg2
// gdb-check:$6 = -4
// gdb-command:continue
// OWNED BY REF
// gdb-command:print *self
// gdbg-check:$7 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$7 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$8 = -5
// gdb-command:print arg2
// gdb-check:$9 = -6
// gdb-command:continue
// OWNED BY VAL
// gdb-command:print self
// gdbg-check:$10 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$10 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$11 = -7
// gdb-command:print arg2
// gdb-check:$12 = -8
// gdb-command:continue
// OWNED MOVED
// gdb-command:print *self
// gdbg-check:$13 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$13 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$14 = -9
// gdb-command:print arg2
// gdb-check:$15 = -10
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// STACK BY REF
// lldb-command:print *self
// lldb-check:[...]$0 = Variant2(117901063)
// lldb-command:print arg1
// lldb-check:[...]$1 = -1
// lldb-command:print arg2
// lldb-check:[...]$2 = -2
// lldb-command:continue
// STACK BY VAL
// lldb-command:print self
// lldb-check:[...]$3 = Variant2(117901063)
// lldb-command:print arg1
// lldb-check:[...]$4 = -3
// lldb-command:print arg2
// lldb-check:[...]$5 = -4
// lldb-command:continue
// OWNED BY REF
// lldb-command:print *self
// lldb-check:[...]$6 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$7 = -5
// lldb-command:print arg2
// lldb-check:[...]$8 = -6
// lldb-command:continue
// OWNED BY VAL
// lldb-command:print self
// lldb-check:[...]$9 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$10 = -7
// lldb-command:print arg2
// lldb-check:[...]$11 = -8
// lldb-command:continue
// OWNED MOVED
// lldb-command:print *self
// lldb-check:[...]$12 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$13 = -9
// lldb-command:print arg2
// lldb-check:[...]$14 = -10
// lldb-command:continue
#![feature(box_syntax)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
#[derive(Copy, Clone)]
enum Enum {
Variant1 { x: u16, y: u16 },
|
impl Enum {
fn self_by_ref(&self, arg1: isize, arg2: isize) -> isize {
zzz(); // #break
arg1 + arg2
}
fn self_by_val(self, arg1: isize, arg2: isize) -> isize {
zzz(); // #break
arg1 + arg2
}
fn self_owned(self: Box<Enum>, arg1: isize, arg2: isize) -> isize {
zzz(); // #break
arg1 + arg2
}
}
fn main() {
let stack = Enum::Variant2(117901063);
let _ = stack.self_by_ref(-1, -2);
let _ = stack.self_by_val(-3, -4);
let owned: Box<_> = box Enum::Variant1{ x: 1799, y: 1799 };
let _ = owned.self_by_ref(-5, -6);
let _ = owned.self_by_val(-7, -8);
let _ = owned.self_owned(-9, -10);
}
fn zzz() {()}
|
Variant2 (u32)
}
|
random_line_split
|
method-on-enum.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// min-lldb-version: 310
// ignore-test // Test temporarily ignored due to debuginfo tests being disabled, see PR 47155
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// STACK BY REF
// gdb-command:print *self
// gdbg-check:$1 = {{RUST$ENUM$DISR = Variant2, [...]}, {RUST$ENUM$DISR = Variant2, __0 = 117901063}}
// gdbr-check:$1 = method_on_enum::Enum::Variant2(117901063)
// gdb-command:print arg1
// gdb-check:$2 = -1
// gdb-command:print arg2
// gdb-check:$3 = -2
// gdb-command:continue
// STACK BY VAL
// gdb-command:print self
// gdbg-check:$4 = {{RUST$ENUM$DISR = Variant2, [...]}, {RUST$ENUM$DISR = Variant2, __0 = 117901063}}
// gdbr-check:$4 = method_on_enum::Enum::Variant2(117901063)
// gdb-command:print arg1
// gdb-check:$5 = -3
// gdb-command:print arg2
// gdb-check:$6 = -4
// gdb-command:continue
// OWNED BY REF
// gdb-command:print *self
// gdbg-check:$7 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$7 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$8 = -5
// gdb-command:print arg2
// gdb-check:$9 = -6
// gdb-command:continue
// OWNED BY VAL
// gdb-command:print self
// gdbg-check:$10 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$10 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$11 = -7
// gdb-command:print arg2
// gdb-check:$12 = -8
// gdb-command:continue
// OWNED MOVED
// gdb-command:print *self
// gdbg-check:$13 = {{RUST$ENUM$DISR = Variant1, x = 1799, y = 1799}, {RUST$ENUM$DISR = Variant1, [...]}}
// gdbr-check:$13 = method_on_enum::Enum::Variant1{x: 1799, y: 1799}
// gdb-command:print arg1
// gdb-check:$14 = -9
// gdb-command:print arg2
// gdb-check:$15 = -10
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// STACK BY REF
// lldb-command:print *self
// lldb-check:[...]$0 = Variant2(117901063)
// lldb-command:print arg1
// lldb-check:[...]$1 = -1
// lldb-command:print arg2
// lldb-check:[...]$2 = -2
// lldb-command:continue
// STACK BY VAL
// lldb-command:print self
// lldb-check:[...]$3 = Variant2(117901063)
// lldb-command:print arg1
// lldb-check:[...]$4 = -3
// lldb-command:print arg2
// lldb-check:[...]$5 = -4
// lldb-command:continue
// OWNED BY REF
// lldb-command:print *self
// lldb-check:[...]$6 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$7 = -5
// lldb-command:print arg2
// lldb-check:[...]$8 = -6
// lldb-command:continue
// OWNED BY VAL
// lldb-command:print self
// lldb-check:[...]$9 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$10 = -7
// lldb-command:print arg2
// lldb-check:[...]$11 = -8
// lldb-command:continue
// OWNED MOVED
// lldb-command:print *self
// lldb-check:[...]$12 = Variant1 { x: 1799, y: 1799 }
// lldb-command:print arg1
// lldb-check:[...]$13 = -9
// lldb-command:print arg2
// lldb-check:[...]$14 = -10
// lldb-command:continue
#![feature(box_syntax)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
#[derive(Copy, Clone)]
enum Enum {
Variant1 { x: u16, y: u16 },
Variant2 (u32)
}
impl Enum {
fn self_by_ref(&self, arg1: isize, arg2: isize) -> isize {
zzz(); // #break
arg1 + arg2
}
fn self_by_val(self, arg1: isize, arg2: isize) -> isize {
zzz(); // #break
arg1 + arg2
}
fn self_owned(self: Box<Enum>, arg1: isize, arg2: isize) -> isize
|
}
fn main() {
let stack = Enum::Variant2(117901063);
let _ = stack.self_by_ref(-1, -2);
let _ = stack.self_by_val(-3, -4);
let owned: Box<_> = box Enum::Variant1{ x: 1799, y: 1799 };
let _ = owned.self_by_ref(-5, -6);
let _ = owned.self_by_val(-7, -8);
let _ = owned.self_owned(-9, -10);
}
fn zzz() {()}
|
{
zzz(); // #break
arg1 + arg2
}
|
identifier_body
|
react_native_xcode.rs
|
//! Implements a command for uploading react-native projects.
use std::env;
use std::fs;
use std::path::PathBuf;
use std::process;
use chrono::Duration;
use clap::{App, Arg, ArgMatches};
use failure::{bail, Error};
use if_chain::if_chain;
use log::info;
use serde::{Deserialize, Serialize};
use crate::api::{Api, NewRelease};
use crate::config::Config;
use crate::utils::args::ArgExt;
use crate::utils::file_search::ReleaseFileSearch;
use crate::utils::file_upload::UploadContext;
use crate::utils::fs::TempFile;
use crate::utils::sourcemaps::SourceMapProcessor;
use crate::utils::system::propagate_exit_status;
use crate::utils::xcode::{InfoPlist, MayDetach};
#[derive(Serialize, Deserialize, Default, Debug)]
struct SourceMapReport {
bundle_path: Option<PathBuf>,
sourcemap_path: Option<PathBuf>,
}
pub fn make_app<'a, 'b: 'a>(app: App<'a, 'b>) -> App<'a, 'b> {
app.about("Upload react-native projects in a Xcode build step.")
.org_project_args()
// legacy parameter
.arg(
Arg::with_name("verbose")
.long("verbose")
.short("v")
.hidden(true),
)
.arg(Arg::with_name("force").long("force").short("f").help(
"Force the script to run, even in debug configuration.{n}This rarely \
does what you want because the default build script does not actually \
produce any information that the sentry build tool could pick up on.",
))
.arg(Arg::with_name("allow_fetch").long("allow-fetch").help(
"Enable sourcemap fetching from the packager.{n}If this is enabled \
the react native packager needs to run and sourcemaps are downloade \
from it if the simulator platform is detected.",
))
.arg(
Arg::with_name("fetch_from")
.long("fetch-from")
.value_name("URL")
.help(
"Set the URL to fetch sourcemaps from.{n}\
The default is http://127.0.0.1:8081/, where the react-native \
packager runs by default.",
),
)
.arg(
Arg::with_name("force_foreground")
.long("force-foreground")
.help(
"Wait for the process to finish.{n}\
By default part of the build process will when triggered from Xcode \
detach and continue in the background. When an error happens, \
a dialog is shown. If this parameter is passed, Xcode will wait \
for the process to finish before the build finishes and output \
will be shown in the Xcode build output.",
),
)
.arg(
Arg::with_name("build_script")
.value_name("BUILD_SCRIPT")
.index(1)
.help(
"Optional path to the build script.{n}\
This is the path to the `react-native-xcode.sh` script you want \
to use. By default the bundled build script is used.",
),
)
.arg(
Arg::with_name("dist")
.long("dist")
.value_name("DISTRIBUTION")
.multiple(true)
.number_of_values(1)
.help("The names of the distributions to publish. Can be supplied multiple times."),
)
.arg(
Arg::with_name("args")
.value_name("ARGS")
.multiple(true)
.last(true)
.help("Optional arguments to pass to the build script."),
)
.arg(
Arg::with_name("wait")
.long("wait")
.help("Wait for the server to fully process uploaded files."),
)
}
fn find_node() -> String {
if let Ok(path) = env::var("NODE_BINARY") {
if!path.is_empty() {
return path;
}
}
"node".into()
}
pub fn
|
(matches: &ArgMatches<'_>) -> Result<(), Error> {
let config = Config::current();
let (org, project) = config.get_org_and_project(matches)?;
let should_wrap = matches.is_present("force")
|| match env::var("CONFIGURATION") {
Ok(config) =>!&config.contains("Debug"),
Err(_) => bail!("Need to run this from Xcode"),
};
let base = env::current_dir()?;
let script = if let Some(path) = matches.value_of("build_script") {
base.join(path)
} else {
base.join("../node_modules/react-native/scripts/react-native-xcode.sh")
}
.canonicalize()?;
info!(
"Issuing a command for Organization: {} Project: {}",
org, project
);
// if we allow fetching and we detect a simulator run, then we need to switch
// to simulator mode.
let fetch_url;
if_chain! {
if matches.is_present("allow_fetch");
if let Ok(val) = env::var("PLATFORM_NAME");
if val.ends_with("simulator");
then {
let url = matches.value_of("fetch_from").unwrap_or("http://127.0.0.1:8081/");
info!("Fetching sourcemaps from {}", url);
fetch_url = Some(url);
} else {
info!("Using react-native build script at {}", base.display());
fetch_url = None;
}
}
// in case we are in debug mode we directly dispatch to the script
// and exit out early.
if!should_wrap && fetch_url.is_none() {
info!("Running in debug mode, skipping script wrapping.");
let rv = process::Command::new(&script).spawn()?.wait()?;
propagate_exit_status(rv);
return Ok(());
}
info!("Parsing Info.plist");
let plist = match InfoPlist::discover_from_env()? {
Some(plist) => plist,
None => bail!("Could not find info.plist"),
};
info!("Parse result from Info.plist: {:?}", &plist);
let report_file = TempFile::create()?;
let node = find_node();
info!("Using node interpreter '{}'", &node);
MayDetach::wrap("React native symbol handling", |md| {
let bundle_path;
let sourcemap_path;
let bundle_url;
let sourcemap_url;
let bundle_file;
let sourcemap_file;
// If we have a fetch URL we need to fetch them from there now. In that
// case we do indeed fetch it right from the running packager and then
// store it in temporary files for later consumption.
if let Some(url) = fetch_url {
if!matches.is_present("force_foreground") {
md.may_detach()?;
}
let api = Api::current();
let url = url.trim_end_matches('/');
bundle_file = TempFile::create()?;
bundle_path = bundle_file.path().to_path_buf();
bundle_url = "~/index.ios.bundle".to_string();
sourcemap_file = TempFile::create()?;
sourcemap_path = sourcemap_file.path().to_path_buf();
sourcemap_url = "~/index.ios.map".to_string();
// wait up to 10 seconds for the server to be up.
if!api.wait_until_available(url, Duration::seconds(10))? {
bail!("Error: react-native packager did not respond in time");
}
api.download(
&format!("{}/index.ios.bundle?platform=ios&dev=true", url),
&mut bundle_file.open()?,
)?;
api.download(
&format!("{}/index.ios.map?platform=ios&dev=true", url),
&mut sourcemap_file.open()?,
)?;
// This is the case where we need to hook into the release process to
// collect sourcemaps when they are generated.
//
// this invokes via an indirection of sentry-cli our wrap_call() below.
// What is happening behind the scenes is that we switch out NODE_BINARY
// for ourselves which is what the react-native build script normally
// invokes. Because we export __SENTRY_RN_WRAP_XCODE_CALL=1, the main
// sentry-cli script will invoke our wrap_call() function below.
//
// That will then attempt to figure out that a react-native bundle is
// happening to the build script, parse out the arguments, add additional
// arguments if needed and then report the parsed arguments to a temporary
// JSON file we load back below.
//
// With that we we then have all the information we need to invoke the
// upload process.
} else {
let rv = process::Command::new(&script)
.env("NODE_BINARY", env::current_exe()?.to_str().unwrap())
.env("SENTRY_RN_REAL_NODE_BINARY", &node)
.env(
"SENTRY_RN_SOURCEMAP_REPORT",
report_file.path().to_str().unwrap(),
)
.env("__SENTRY_RN_WRAP_XCODE_CALL", "1")
.spawn()?
.wait()?;
propagate_exit_status(rv);
if!matches.is_present("force_foreground") {
md.may_detach()?;
}
let mut f = fs::File::open(report_file.path())?;
let report: SourceMapReport = serde_json::from_reader(&mut f).unwrap_or_else(|_| {
let err_msg = format!(
"File {} doesn't contain a valid JSON data.",
report_file.path().display()
);
panic!("{}", err_msg);
});
if report.bundle_path.is_none() || report.sourcemap_path.is_none() {
println!("Warning: build produced no sourcemaps.");
return Ok(());
}
bundle_path = report.bundle_path.unwrap();
bundle_url = format!("~/{}", bundle_path.file_name().unwrap().to_string_lossy());
sourcemap_path = report.sourcemap_path.unwrap();
sourcemap_url = format!(
"~/{}",
sourcemap_path.file_name().unwrap().to_string_lossy()
);
}
// now that we have all the data, we can now process and upload the
// sourcemaps.
println!("Processing react-native sourcemaps for Sentry upload.");
info!(" bundle path: {}", bundle_path.display());
info!(" sourcemap path: {}", sourcemap_path.display());
let mut processor = SourceMapProcessor::new();
processor.add(&bundle_url, ReleaseFileSearch::collect_file(bundle_path)?)?;
processor.add(
&sourcemap_url,
ReleaseFileSearch::collect_file(sourcemap_path)?,
)?;
processor.rewrite(&[base.parent().unwrap().to_str().unwrap()])?;
processor.add_sourcemap_references()?;
let dist = env::var("SENTRY_DIST").unwrap_or_else(|_| plist.build().to_string());
let release_name = env::var("SENTRY_RELEASE").unwrap_or(format!(
"{}@{}+{}",
plist.bundle_id(),
plist.version(),
dist
));
let api = Api::current();
let release = api.new_release(
&org,
&NewRelease {
version: release_name,
projects: vec![project.to_string()],
..Default::default()
},
)?;
match matches.values_of("dist") {
None => {
processor.upload(&UploadContext {
org: &org,
project: Some(&project),
release: &release.version,
dist: Some(&dist),
wait: matches.is_present("wait"),
})?;
}
Some(dists) => {
for dist in dists {
processor.upload(&UploadContext {
org: &org,
project: Some(&project),
release: &release.version,
dist: Some(dist),
wait: matches.is_present("wait"),
})?;
}
}
}
Ok(())
})
}
pub fn wrap_call() -> Result<(), Error> {
let mut args: Vec<_> = env::args().skip(1).collect();
let mut bundle_path = None;
let mut sourcemap_path = None;
if args.len() > 1 && (args[1] == "bundle" || args[1] == "ram-bundle") {
let mut iter = args.iter().fuse();
while let Some(item) = iter.next() {
if item == "--sourcemap-output" {
sourcemap_path = iter.next().cloned();
} else if let Some(rest) = item.strip_prefix("--sourcemap-output=") {
sourcemap_path = Some(rest.to_string());
} else if item == "--bundle-output" {
bundle_path = iter.next().cloned();
} else if let Some(rest) = item.strip_prefix("--bundle-output=") {
bundle_path = Some(rest.to_string());
}
}
}
let mut sourcemap_report = SourceMapReport::default();
if sourcemap_path.is_none() && bundle_path.is_some() {
let mut path = env::temp_dir();
let mut map_path = PathBuf::from(bundle_path.clone().unwrap());
map_path.set_extension("jsbundle.map");
path.push(map_path.file_name().unwrap());
sourcemap_report.sourcemap_path = Some(PathBuf::from(&path));
args.push("--sourcemap-output".into());
args.push(path.into_os_string().into_string().unwrap());
} else if let Some(path) = sourcemap_path {
sourcemap_report.sourcemap_path = Some(PathBuf::from(path));
}
sourcemap_report.bundle_path = bundle_path.map(PathBuf::from);
let rv = process::Command::new(env::var("SENTRY_RN_REAL_NODE_BINARY").unwrap())
.args(&args)
.spawn()?
.wait()?;
propagate_exit_status(rv);
let mut f = fs::File::create(env::var("SENTRY_RN_SOURCEMAP_REPORT").unwrap())?;
serde_json::to_writer(&mut f, &sourcemap_report)?;
Ok(())
}
|
execute
|
identifier_name
|
react_native_xcode.rs
|
//! Implements a command for uploading react-native projects.
use std::env;
use std::fs;
use std::path::PathBuf;
use std::process;
use chrono::Duration;
use clap::{App, Arg, ArgMatches};
use failure::{bail, Error};
use if_chain::if_chain;
use log::info;
use serde::{Deserialize, Serialize};
use crate::api::{Api, NewRelease};
use crate::config::Config;
use crate::utils::args::ArgExt;
use crate::utils::file_search::ReleaseFileSearch;
use crate::utils::file_upload::UploadContext;
use crate::utils::fs::TempFile;
use crate::utils::sourcemaps::SourceMapProcessor;
use crate::utils::system::propagate_exit_status;
use crate::utils::xcode::{InfoPlist, MayDetach};
#[derive(Serialize, Deserialize, Default, Debug)]
struct SourceMapReport {
bundle_path: Option<PathBuf>,
sourcemap_path: Option<PathBuf>,
}
pub fn make_app<'a, 'b: 'a>(app: App<'a, 'b>) -> App<'a, 'b> {
app.about("Upload react-native projects in a Xcode build step.")
.org_project_args()
// legacy parameter
.arg(
Arg::with_name("verbose")
.long("verbose")
.short("v")
.hidden(true),
)
.arg(Arg::with_name("force").long("force").short("f").help(
"Force the script to run, even in debug configuration.{n}This rarely \
does what you want because the default build script does not actually \
produce any information that the sentry build tool could pick up on.",
))
.arg(Arg::with_name("allow_fetch").long("allow-fetch").help(
"Enable sourcemap fetching from the packager.{n}If this is enabled \
the react native packager needs to run and sourcemaps are downloade \
from it if the simulator platform is detected.",
))
.arg(
Arg::with_name("fetch_from")
.long("fetch-from")
.value_name("URL")
.help(
"Set the URL to fetch sourcemaps from.{n}\
The default is http://127.0.0.1:8081/, where the react-native \
packager runs by default.",
),
)
.arg(
Arg::with_name("force_foreground")
.long("force-foreground")
.help(
"Wait for the process to finish.{n}\
By default part of the build process will when triggered from Xcode \
detach and continue in the background. When an error happens, \
a dialog is shown. If this parameter is passed, Xcode will wait \
for the process to finish before the build finishes and output \
will be shown in the Xcode build output.",
),
)
.arg(
Arg::with_name("build_script")
.value_name("BUILD_SCRIPT")
.index(1)
.help(
"Optional path to the build script.{n}\
This is the path to the `react-native-xcode.sh` script you want \
to use. By default the bundled build script is used.",
),
)
.arg(
Arg::with_name("dist")
.long("dist")
.value_name("DISTRIBUTION")
.multiple(true)
.number_of_values(1)
.help("The names of the distributions to publish. Can be supplied multiple times."),
)
.arg(
Arg::with_name("args")
.value_name("ARGS")
.multiple(true)
.last(true)
.help("Optional arguments to pass to the build script."),
)
.arg(
Arg::with_name("wait")
.long("wait")
.help("Wait for the server to fully process uploaded files."),
)
}
fn find_node() -> String {
if let Ok(path) = env::var("NODE_BINARY") {
if!path.is_empty() {
return path;
}
}
"node".into()
}
pub fn execute(matches: &ArgMatches<'_>) -> Result<(), Error> {
let config = Config::current();
let (org, project) = config.get_org_and_project(matches)?;
let should_wrap = matches.is_present("force")
|| match env::var("CONFIGURATION") {
Ok(config) =>!&config.contains("Debug"),
Err(_) => bail!("Need to run this from Xcode"),
};
let base = env::current_dir()?;
let script = if let Some(path) = matches.value_of("build_script") {
base.join(path)
} else {
base.join("../node_modules/react-native/scripts/react-native-xcode.sh")
}
.canonicalize()?;
info!(
"Issuing a command for Organization: {} Project: {}",
org, project
);
// if we allow fetching and we detect a simulator run, then we need to switch
// to simulator mode.
let fetch_url;
if_chain! {
if matches.is_present("allow_fetch");
if let Ok(val) = env::var("PLATFORM_NAME");
if val.ends_with("simulator");
then {
let url = matches.value_of("fetch_from").unwrap_or("http://127.0.0.1:8081/");
info!("Fetching sourcemaps from {}", url);
fetch_url = Some(url);
} else {
info!("Using react-native build script at {}", base.display());
fetch_url = None;
}
}
// in case we are in debug mode we directly dispatch to the script
// and exit out early.
if!should_wrap && fetch_url.is_none() {
info!("Running in debug mode, skipping script wrapping.");
let rv = process::Command::new(&script).spawn()?.wait()?;
propagate_exit_status(rv);
return Ok(());
}
info!("Parsing Info.plist");
let plist = match InfoPlist::discover_from_env()? {
Some(plist) => plist,
None => bail!("Could not find info.plist"),
};
info!("Parse result from Info.plist: {:?}", &plist);
let report_file = TempFile::create()?;
let node = find_node();
info!("Using node interpreter '{}'", &node);
MayDetach::wrap("React native symbol handling", |md| {
let bundle_path;
let sourcemap_path;
let bundle_url;
let sourcemap_url;
let bundle_file;
let sourcemap_file;
// If we have a fetch URL we need to fetch them from there now. In that
// case we do indeed fetch it right from the running packager and then
// store it in temporary files for later consumption.
if let Some(url) = fetch_url {
if!matches.is_present("force_foreground") {
md.may_detach()?;
}
let api = Api::current();
let url = url.trim_end_matches('/');
bundle_file = TempFile::create()?;
bundle_path = bundle_file.path().to_path_buf();
bundle_url = "~/index.ios.bundle".to_string();
sourcemap_file = TempFile::create()?;
sourcemap_path = sourcemap_file.path().to_path_buf();
sourcemap_url = "~/index.ios.map".to_string();
// wait up to 10 seconds for the server to be up.
if!api.wait_until_available(url, Duration::seconds(10))? {
bail!("Error: react-native packager did not respond in time");
}
api.download(
&format!("{}/index.ios.bundle?platform=ios&dev=true", url),
&mut bundle_file.open()?,
)?;
api.download(
&format!("{}/index.ios.map?platform=ios&dev=true", url),
&mut sourcemap_file.open()?,
)?;
// This is the case where we need to hook into the release process to
// collect sourcemaps when they are generated.
//
// this invokes via an indirection of sentry-cli our wrap_call() below.
// What is happening behind the scenes is that we switch out NODE_BINARY
// for ourselves which is what the react-native build script normally
// invokes. Because we export __SENTRY_RN_WRAP_XCODE_CALL=1, the main
// sentry-cli script will invoke our wrap_call() function below.
//
// That will then attempt to figure out that a react-native bundle is
// happening to the build script, parse out the arguments, add additional
// arguments if needed and then report the parsed arguments to a temporary
// JSON file we load back below.
//
// With that we we then have all the information we need to invoke the
// upload process.
} else {
let rv = process::Command::new(&script)
.env("NODE_BINARY", env::current_exe()?.to_str().unwrap())
.env("SENTRY_RN_REAL_NODE_BINARY", &node)
.env(
"SENTRY_RN_SOURCEMAP_REPORT",
report_file.path().to_str().unwrap(),
)
.env("__SENTRY_RN_WRAP_XCODE_CALL", "1")
.spawn()?
.wait()?;
propagate_exit_status(rv);
if!matches.is_present("force_foreground") {
md.may_detach()?;
}
let mut f = fs::File::open(report_file.path())?;
let report: SourceMapReport = serde_json::from_reader(&mut f).unwrap_or_else(|_| {
let err_msg = format!(
"File {} doesn't contain a valid JSON data.",
report_file.path().display()
);
panic!("{}", err_msg);
});
if report.bundle_path.is_none() || report.sourcemap_path.is_none() {
println!("Warning: build produced no sourcemaps.");
return Ok(());
|
sourcemap_url = format!(
"~/{}",
sourcemap_path.file_name().unwrap().to_string_lossy()
);
}
// now that we have all the data, we can now process and upload the
// sourcemaps.
println!("Processing react-native sourcemaps for Sentry upload.");
info!(" bundle path: {}", bundle_path.display());
info!(" sourcemap path: {}", sourcemap_path.display());
let mut processor = SourceMapProcessor::new();
processor.add(&bundle_url, ReleaseFileSearch::collect_file(bundle_path)?)?;
processor.add(
&sourcemap_url,
ReleaseFileSearch::collect_file(sourcemap_path)?,
)?;
processor.rewrite(&[base.parent().unwrap().to_str().unwrap()])?;
processor.add_sourcemap_references()?;
let dist = env::var("SENTRY_DIST").unwrap_or_else(|_| plist.build().to_string());
let release_name = env::var("SENTRY_RELEASE").unwrap_or(format!(
"{}@{}+{}",
plist.bundle_id(),
plist.version(),
dist
));
let api = Api::current();
let release = api.new_release(
&org,
&NewRelease {
version: release_name,
projects: vec![project.to_string()],
..Default::default()
},
)?;
match matches.values_of("dist") {
None => {
processor.upload(&UploadContext {
org: &org,
project: Some(&project),
release: &release.version,
dist: Some(&dist),
wait: matches.is_present("wait"),
})?;
}
Some(dists) => {
for dist in dists {
processor.upload(&UploadContext {
org: &org,
project: Some(&project),
release: &release.version,
dist: Some(dist),
wait: matches.is_present("wait"),
})?;
}
}
}
Ok(())
})
}
pub fn wrap_call() -> Result<(), Error> {
let mut args: Vec<_> = env::args().skip(1).collect();
let mut bundle_path = None;
let mut sourcemap_path = None;
if args.len() > 1 && (args[1] == "bundle" || args[1] == "ram-bundle") {
let mut iter = args.iter().fuse();
while let Some(item) = iter.next() {
if item == "--sourcemap-output" {
sourcemap_path = iter.next().cloned();
} else if let Some(rest) = item.strip_prefix("--sourcemap-output=") {
sourcemap_path = Some(rest.to_string());
} else if item == "--bundle-output" {
bundle_path = iter.next().cloned();
} else if let Some(rest) = item.strip_prefix("--bundle-output=") {
bundle_path = Some(rest.to_string());
}
}
}
let mut sourcemap_report = SourceMapReport::default();
if sourcemap_path.is_none() && bundle_path.is_some() {
let mut path = env::temp_dir();
let mut map_path = PathBuf::from(bundle_path.clone().unwrap());
map_path.set_extension("jsbundle.map");
path.push(map_path.file_name().unwrap());
sourcemap_report.sourcemap_path = Some(PathBuf::from(&path));
args.push("--sourcemap-output".into());
args.push(path.into_os_string().into_string().unwrap());
} else if let Some(path) = sourcemap_path {
sourcemap_report.sourcemap_path = Some(PathBuf::from(path));
}
sourcemap_report.bundle_path = bundle_path.map(PathBuf::from);
let rv = process::Command::new(env::var("SENTRY_RN_REAL_NODE_BINARY").unwrap())
.args(&args)
.spawn()?
.wait()?;
propagate_exit_status(rv);
let mut f = fs::File::create(env::var("SENTRY_RN_SOURCEMAP_REPORT").unwrap())?;
serde_json::to_writer(&mut f, &sourcemap_report)?;
Ok(())
}
|
}
bundle_path = report.bundle_path.unwrap();
bundle_url = format!("~/{}", bundle_path.file_name().unwrap().to_string_lossy());
sourcemap_path = report.sourcemap_path.unwrap();
|
random_line_split
|
issue-46604.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// revisions: ast mir
//[mir]compile-flags: -Z borrowck=mir
static buf: &mut [u8] = &mut [1u8,2,3,4,5,7]; //[ast]~ ERROR E0017
//[mir]~^ ERROR E0017
fn write<T: AsRef<[u8]>>(buffer: T) { }
fn main()
|
{
write(&buf);
buf[0]=2; //[ast]~ ERROR E0389
//[mir]~^ ERROR E0594
}
|
identifier_body
|
|
issue-46604.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
static buf: &mut [u8] = &mut [1u8,2,3,4,5,7]; //[ast]~ ERROR E0017
//[mir]~^ ERROR E0017
fn write<T: AsRef<[u8]>>(buffer: T) { }
fn main() {
write(&buf);
buf[0]=2; //[ast]~ ERROR E0389
//[mir]~^ ERROR E0594
}
|
// revisions: ast mir
//[mir]compile-flags: -Z borrowck=mir
|
random_line_split
|
issue-46604.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// revisions: ast mir
//[mir]compile-flags: -Z borrowck=mir
static buf: &mut [u8] = &mut [1u8,2,3,4,5,7]; //[ast]~ ERROR E0017
//[mir]~^ ERROR E0017
fn
|
<T: AsRef<[u8]>>(buffer: T) { }
fn main() {
write(&buf);
buf[0]=2; //[ast]~ ERROR E0389
//[mir]~^ ERROR E0594
}
|
write
|
identifier_name
|
main.rs
|
extern crate futures;
extern crate tokio_proto;
extern crate tokio_service;
extern crate hyper;
#[macro_use]
|
extern crate serde_json;
extern crate num_cpus;
extern crate mime;
use tokio_proto::TcpServer;
use futures::future;
use hyper::Method::Get;
use hyper::header::{ContentLength, ContentType, Server};
use hyper::StatusCode::NotFound;
use hyper::server::{Http, Service, Request, Response};
use std::net::SocketAddr;
static HELLOWORLD: &'static [u8] = b"Hello, world!";
#[derive(Serialize)]
struct JsonResponse<'a> {
message: &'a str,
}
struct TechEmpower;
impl Service for TechEmpower {
type Request = Request;
type Response = Response;
type Error = hyper::Error;
type Future = ::futures::Finished<Response, hyper::Error>;
fn call(&self, req: Request) -> Self::Future {
let response = match (req.method(), req.path()) {
(&Get, "/plaintext") => {
Response::new()
.with_header(ContentLength(HELLOWORLD.len() as u64))
.with_header(ContentType(mime::TEXT_PLAIN))
.with_body(HELLOWORLD)
}
(&Get, "/json") => {
let rep = JsonResponse { message: "Hello, world!" };
let rep_body = serde_json::to_vec(&rep).unwrap();
Response::new()
.with_header(ContentLength(rep_body.len() as u64))
.with_header(ContentType(mime::APPLICATION_JSON))
.with_body(rep_body)
}
_ => Response::new().with_status(NotFound),
};
future::ok(response.with_header(Server::new("Hyper")))
}
}
fn main() {
let addr: SocketAddr = "0.0.0.0:8080".parse().unwrap();
let mut srv = TcpServer::new(Http::new(), addr);
println!("Listening on http://{} using {} threads",
addr,
num_cpus::get());
srv.threads(num_cpus::get());
srv.serve(move || Ok(TechEmpower))
}
|
extern crate serde_derive;
|
random_line_split
|
main.rs
|
extern crate futures;
extern crate tokio_proto;
extern crate tokio_service;
extern crate hyper;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate num_cpus;
extern crate mime;
use tokio_proto::TcpServer;
use futures::future;
use hyper::Method::Get;
use hyper::header::{ContentLength, ContentType, Server};
use hyper::StatusCode::NotFound;
use hyper::server::{Http, Service, Request, Response};
use std::net::SocketAddr;
static HELLOWORLD: &'static [u8] = b"Hello, world!";
#[derive(Serialize)]
struct JsonResponse<'a> {
message: &'a str,
}
struct TechEmpower;
impl Service for TechEmpower {
type Request = Request;
type Response = Response;
type Error = hyper::Error;
type Future = ::futures::Finished<Response, hyper::Error>;
fn
|
(&self, req: Request) -> Self::Future {
let response = match (req.method(), req.path()) {
(&Get, "/plaintext") => {
Response::new()
.with_header(ContentLength(HELLOWORLD.len() as u64))
.with_header(ContentType(mime::TEXT_PLAIN))
.with_body(HELLOWORLD)
}
(&Get, "/json") => {
let rep = JsonResponse { message: "Hello, world!" };
let rep_body = serde_json::to_vec(&rep).unwrap();
Response::new()
.with_header(ContentLength(rep_body.len() as u64))
.with_header(ContentType(mime::APPLICATION_JSON))
.with_body(rep_body)
}
_ => Response::new().with_status(NotFound),
};
future::ok(response.with_header(Server::new("Hyper")))
}
}
fn main() {
let addr: SocketAddr = "0.0.0.0:8080".parse().unwrap();
let mut srv = TcpServer::new(Http::new(), addr);
println!("Listening on http://{} using {} threads",
addr,
num_cpus::get());
srv.threads(num_cpus::get());
srv.serve(move || Ok(TechEmpower))
}
|
call
|
identifier_name
|
main.rs
|
extern crate futures;
extern crate tokio_proto;
extern crate tokio_service;
extern crate hyper;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate num_cpus;
extern crate mime;
use tokio_proto::TcpServer;
use futures::future;
use hyper::Method::Get;
use hyper::header::{ContentLength, ContentType, Server};
use hyper::StatusCode::NotFound;
use hyper::server::{Http, Service, Request, Response};
use std::net::SocketAddr;
static HELLOWORLD: &'static [u8] = b"Hello, world!";
#[derive(Serialize)]
struct JsonResponse<'a> {
message: &'a str,
}
struct TechEmpower;
impl Service for TechEmpower {
type Request = Request;
type Response = Response;
type Error = hyper::Error;
type Future = ::futures::Finished<Response, hyper::Error>;
fn call(&self, req: Request) -> Self::Future {
let response = match (req.method(), req.path()) {
(&Get, "/plaintext") => {
Response::new()
.with_header(ContentLength(HELLOWORLD.len() as u64))
.with_header(ContentType(mime::TEXT_PLAIN))
.with_body(HELLOWORLD)
}
(&Get, "/json") => {
let rep = JsonResponse { message: "Hello, world!" };
let rep_body = serde_json::to_vec(&rep).unwrap();
Response::new()
.with_header(ContentLength(rep_body.len() as u64))
.with_header(ContentType(mime::APPLICATION_JSON))
.with_body(rep_body)
}
_ => Response::new().with_status(NotFound),
};
future::ok(response.with_header(Server::new("Hyper")))
}
}
fn main()
|
{
let addr: SocketAddr = "0.0.0.0:8080".parse().unwrap();
let mut srv = TcpServer::new(Http::new(), addr);
println!("Listening on http://{} using {} threads",
addr,
num_cpus::get());
srv.threads(num_cpus::get());
srv.serve(move || Ok(TechEmpower))
}
|
identifier_body
|
|
input_process.rs
|
extern crate glutin;
extern crate state;
use state::{GraphicsState, CoreState};
use glutin::Event::{Closed, KeyboardInput};
use glutin::ElementState::{Pressed};
use glutin::VirtualKeyCode as VK;
pub fn execute(graphics_state: &mut GraphicsState, core_state: &mut CoreState) -> () {
for event in graphics_state.display.poll_events() {
match event {
Closed => {
core_state.quit = true
}
KeyboardInput(Pressed, _, Some(VK::Escape)) => {
core_state.quit = true
|
KeyboardInput(Pressed, _, Some(VK::F3)) => {
graphics_state.reload_shaders = true;
}
KeyboardInput(Pressed, _, Some(VK::F4)) => {
core_state.reload = true
}
KeyboardInput(Pressed, _, Some(VK::F5)) => {
core_state.reset = true
}
_ => ()
}
}
}
|
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.