file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
traffic.rs
|
use std::env;
use tokio::runtime::Runtime;
use hubcaps::traffic::TimeUnit;
use hubcaps::{Credentials, Github, Result};
fn main() -> Result<()> {
pretty_env_logger::init();
match env::var("GITHUB_TOKEN").ok() {
Some(token) =>
|
let views = rt.block_on(github.repo(owner, repo).traffic().views(TimeUnit::Day))?;
println!("{:#?}", views);
println!("Clones per day");
let clones = rt.block_on(github.repo(owner, repo).traffic().clones(TimeUnit::Day))?;
println!("{:#?}", clones);
Ok(())
}
_ => Err("example missing GITHUB_TOKEN".into()),
}
}
|
{
let mut rt = Runtime::new()?;
let github = Github::new(
concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")),
Credentials::Token(token),
)?;
let owner = "softprops";
let repo = "hubcaps";
println!("Top 10 referrers");
for referrer in rt.block_on(github.repo(owner, repo).traffic().referrers())? {
println!("{:#?}", referrer)
}
println!("Top 10 paths");
for path in rt.block_on(github.repo(owner, repo).traffic().paths())? {
println!("{:#?}", path)
}
println!("Views per day");
|
conditional_block
|
traffic.rs
|
use std::env;
use tokio::runtime::Runtime;
use hubcaps::traffic::TimeUnit;
use hubcaps::{Credentials, Github, Result};
fn main() -> Result<()>
|
}
println!("Views per day");
let views = rt.block_on(github.repo(owner, repo).traffic().views(TimeUnit::Day))?;
println!("{:#?}", views);
println!("Clones per day");
let clones = rt.block_on(github.repo(owner, repo).traffic().clones(TimeUnit::Day))?;
println!("{:#?}", clones);
Ok(())
}
_ => Err("example missing GITHUB_TOKEN".into()),
}
}
|
{
pretty_env_logger::init();
match env::var("GITHUB_TOKEN").ok() {
Some(token) => {
let mut rt = Runtime::new()?;
let github = Github::new(
concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")),
Credentials::Token(token),
)?;
let owner = "softprops";
let repo = "hubcaps";
println!("Top 10 referrers");
for referrer in rt.block_on(github.repo(owner, repo).traffic().referrers())? {
println!("{:#?}", referrer)
}
println!("Top 10 paths");
for path in rt.block_on(github.repo(owner, repo).traffic().paths())? {
println!("{:#?}", path)
|
identifier_body
|
traffic.rs
|
use std::env;
use tokio::runtime::Runtime;
use hubcaps::traffic::TimeUnit;
use hubcaps::{Credentials, Github, Result};
fn main() -> Result<()> {
pretty_env_logger::init();
|
Credentials::Token(token),
)?;
let owner = "softprops";
let repo = "hubcaps";
println!("Top 10 referrers");
for referrer in rt.block_on(github.repo(owner, repo).traffic().referrers())? {
println!("{:#?}", referrer)
}
println!("Top 10 paths");
for path in rt.block_on(github.repo(owner, repo).traffic().paths())? {
println!("{:#?}", path)
}
println!("Views per day");
let views = rt.block_on(github.repo(owner, repo).traffic().views(TimeUnit::Day))?;
println!("{:#?}", views);
println!("Clones per day");
let clones = rt.block_on(github.repo(owner, repo).traffic().clones(TimeUnit::Day))?;
println!("{:#?}", clones);
Ok(())
}
_ => Err("example missing GITHUB_TOKEN".into()),
}
}
|
match env::var("GITHUB_TOKEN").ok() {
Some(token) => {
let mut rt = Runtime::new()?;
let github = Github::new(
concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")),
|
random_line_split
|
diagnostic.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::Location;
use std::error::Error;
use std::fmt;
use std::fmt::Write;
pub type DiagnosticsResult<T> = Result<T, Vec<Diagnostic>>;
#[derive(fmt::Debug)]
pub struct WithDiagnostics<T> {
pub item: T,
pub errors: Vec<Diagnostic>,
}
impl<T> Into<Result<T, Vec<Diagnostic>>> for WithDiagnostics<T> {
fn into(self) -> Result<T, Vec<Diagnostic>> {
if self.errors.is_empty() {
Ok(self.item)
} else {
Err(self.errors)
}
}
}
pub fn diagnostics_result<T>(result: T, diagnostics: Vec<Diagnostic>) -> DiagnosticsResult<T> {
if diagnostics.is_empty() {
Ok(result)
} else {
Err(diagnostics)
}
}
/// Convert a list of DiagnosticsResult<T> into a DiagnosticsResult<Vec<T>>.
/// This is similar to Result::from_iter except that in the case of an error
/// result, Result::from_iter returns the first error in the list. Whereas
/// this function concatenates all the Vec<Diagnostic> into one flat list.
pub fn combined_result<T, I>(results: I) -> DiagnosticsResult<Vec<T>>
where
T: std::fmt::Debug,
I: Iterator<Item = DiagnosticsResult<T>>,
{
let (oks, errs): (Vec<_>, Vec<_>) = results.partition(Result::is_ok);
diagnostics_result(
oks.into_iter().map(Result::unwrap).collect(),
errs.into_iter().map(Result::unwrap_err).flatten().collect(),
)
}
/// A diagnostic message as a result of validating some code. This struct is
/// modeled after the LSP Diagnostic type:
/// https://microsoft.github.io/language-server-protocol/specification#diagnostic
///
/// Changes from LSP:
/// - `location` is different from LSP in that it's a file + span instead of
/// just a span.
/// - Unused fields are omitted.
#[derive(fmt::Debug)]
pub struct Diagnostic(Box<DiagnosticData>);
impl Diagnostic {
/// Creates a new error Diagnostic.
/// Additional locations can be added with the `.annotate()` function.
pub fn error<T:'static + DiagnosticDisplay>(message: T, location: Location) -> Self {
Self(Box::new(DiagnosticData {
message: Box::new(message),
location,
related_information: Vec::new(),
}))
}
/// Annotates this error with an additional location and associated message.
pub fn annotate<T:'static + DiagnosticDisplay>(
mut self,
message: T,
location: Location,
) -> Self {
self.0
.related_information
.push(DiagnosticRelatedInformation {
message: Box::new(message),
location,
});
self
}
pub fn message(&self) -> &impl DiagnosticDisplay {
&self.0.message
}
pub fn location(&self) -> Location {
self.0.location
}
pub fn related_information(&self) -> &[DiagnosticRelatedInformation] {
&self.0.related_information
}
pub fn print_without_source(&self) -> String {
let mut result = String::new();
writeln!(
result,
"{message}:{location:?}",
message = &self.0.message,
location = self.0.location
)
.unwrap();
if!self.0.related_information.is_empty() {
for (ix, related) in self.0.related_information.iter().enumerate() {
writeln!(
result,
"[related {ix}] {message}:{location:?}",
ix = ix + 1,
message = related.message,
location = related.location
)
.unwrap();
}
};
result
}
}
impl fmt::Display for Diagnostic {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.print_without_source())
}
}
impl Error for Diagnostic {}
// statically verify that the Diagnostic type is thread safe
fn _assert_diagnostic_constraints()
where
Diagnostic: Send + Sync,
{
}
#[derive(fmt::Debug)]
struct DiagnosticData {
/// Human readable error message.
message: Box<dyn DiagnosticDisplay>,
/// The primary location of this diagnostic.
location: Location,
/// Related diagnostic information, such as other definitions in the case of
/// a duplicate definition error.
related_information: Vec<DiagnosticRelatedInformation>,
}
/// Secondary locations attached to a diagnostic.
#[derive(fmt::Debug)]
pub struct DiagnosticRelatedInformation {
/// The message of this related diagnostic information.
pub message: Box<dyn DiagnosticDisplay>,
/// The location of this related diagnostic information.
pub location: Location,
}
/// Trait for diagnostic messages to allow structs that capture
/// some data and can lazily convert it to a message.
pub trait DiagnosticDisplay: fmt::Debug + fmt::Display + Send + Sync {}
/// Automatically implement the trait if constraints are met, so that
/// implementors don't need to.
impl<T> DiagnosticDisplay for T where T: fmt::Debug + fmt::Display + Send + Sync {}
impl From<Diagnostic> for Vec<Diagnostic> {
fn from(diagnostic: Diagnostic) -> Self {
vec![diagnostic]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_combined_result() {
let input: Vec<DiagnosticsResult<u32>> = vec![Ok(0), Ok(1), Ok(2)];
let output = combined_result(input.into_iter());
assert_eq!(output.unwrap(), vec![0, 1, 2]);
let input: Vec<DiagnosticsResult<u32>> = vec![
Ok(1),
Err(vec![Diagnostic::error("err0", Location::generated())]),
];
let output = combined_result(input.into_iter());
assert_eq!(
output.as_ref().unwrap_err()[0].message().to_string(),
"err0"
);
let input: Vec<DiagnosticsResult<u32>> = vec![
Ok(0),
Err(vec![Diagnostic::error("err0", Location::generated())]),
Ok(1),
|
]),
];
let output = combined_result(input.into_iter());
assert_eq!(
output.as_ref().unwrap_err()[0].message().to_string(),
"err0"
);
assert_eq!(
output.as_ref().unwrap_err()[1].message().to_string(),
"err1"
);
assert_eq!(
output.as_ref().unwrap_err()[2].message().to_string(),
"err2"
);
}
}
|
Err(vec![
Diagnostic::error("err1", Location::generated()),
Diagnostic::error("err2", Location::generated()),
|
random_line_split
|
diagnostic.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::Location;
use std::error::Error;
use std::fmt;
use std::fmt::Write;
pub type DiagnosticsResult<T> = Result<T, Vec<Diagnostic>>;
#[derive(fmt::Debug)]
pub struct WithDiagnostics<T> {
pub item: T,
pub errors: Vec<Diagnostic>,
}
impl<T> Into<Result<T, Vec<Diagnostic>>> for WithDiagnostics<T> {
fn into(self) -> Result<T, Vec<Diagnostic>> {
if self.errors.is_empty() {
Ok(self.item)
} else {
Err(self.errors)
}
}
}
pub fn diagnostics_result<T>(result: T, diagnostics: Vec<Diagnostic>) -> DiagnosticsResult<T> {
if diagnostics.is_empty() {
Ok(result)
} else {
Err(diagnostics)
}
}
/// Convert a list of DiagnosticsResult<T> into a DiagnosticsResult<Vec<T>>.
/// This is similar to Result::from_iter except that in the case of an error
/// result, Result::from_iter returns the first error in the list. Whereas
/// this function concatenates all the Vec<Diagnostic> into one flat list.
pub fn combined_result<T, I>(results: I) -> DiagnosticsResult<Vec<T>>
where
T: std::fmt::Debug,
I: Iterator<Item = DiagnosticsResult<T>>,
{
let (oks, errs): (Vec<_>, Vec<_>) = results.partition(Result::is_ok);
diagnostics_result(
oks.into_iter().map(Result::unwrap).collect(),
errs.into_iter().map(Result::unwrap_err).flatten().collect(),
)
}
/// A diagnostic message as a result of validating some code. This struct is
/// modeled after the LSP Diagnostic type:
/// https://microsoft.github.io/language-server-protocol/specification#diagnostic
///
/// Changes from LSP:
/// - `location` is different from LSP in that it's a file + span instead of
/// just a span.
/// - Unused fields are omitted.
#[derive(fmt::Debug)]
pub struct Diagnostic(Box<DiagnosticData>);
impl Diagnostic {
/// Creates a new error Diagnostic.
/// Additional locations can be added with the `.annotate()` function.
pub fn error<T:'static + DiagnosticDisplay>(message: T, location: Location) -> Self {
Self(Box::new(DiagnosticData {
message: Box::new(message),
location,
related_information: Vec::new(),
}))
}
/// Annotates this error with an additional location and associated message.
pub fn annotate<T:'static + DiagnosticDisplay>(
mut self,
message: T,
location: Location,
) -> Self {
self.0
.related_information
.push(DiagnosticRelatedInformation {
message: Box::new(message),
location,
});
self
}
pub fn message(&self) -> &impl DiagnosticDisplay
|
pub fn location(&self) -> Location {
self.0.location
}
pub fn related_information(&self) -> &[DiagnosticRelatedInformation] {
&self.0.related_information
}
pub fn print_without_source(&self) -> String {
let mut result = String::new();
writeln!(
result,
"{message}:{location:?}",
message = &self.0.message,
location = self.0.location
)
.unwrap();
if!self.0.related_information.is_empty() {
for (ix, related) in self.0.related_information.iter().enumerate() {
writeln!(
result,
"[related {ix}] {message}:{location:?}",
ix = ix + 1,
message = related.message,
location = related.location
)
.unwrap();
}
};
result
}
}
impl fmt::Display for Diagnostic {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.print_without_source())
}
}
impl Error for Diagnostic {}
// statically verify that the Diagnostic type is thread safe
fn _assert_diagnostic_constraints()
where
Diagnostic: Send + Sync,
{
}
#[derive(fmt::Debug)]
struct DiagnosticData {
/// Human readable error message.
message: Box<dyn DiagnosticDisplay>,
/// The primary location of this diagnostic.
location: Location,
/// Related diagnostic information, such as other definitions in the case of
/// a duplicate definition error.
related_information: Vec<DiagnosticRelatedInformation>,
}
/// Secondary locations attached to a diagnostic.
#[derive(fmt::Debug)]
pub struct DiagnosticRelatedInformation {
/// The message of this related diagnostic information.
pub message: Box<dyn DiagnosticDisplay>,
/// The location of this related diagnostic information.
pub location: Location,
}
/// Trait for diagnostic messages to allow structs that capture
/// some data and can lazily convert it to a message.
pub trait DiagnosticDisplay: fmt::Debug + fmt::Display + Send + Sync {}
/// Automatically implement the trait if constraints are met, so that
/// implementors don't need to.
impl<T> DiagnosticDisplay for T where T: fmt::Debug + fmt::Display + Send + Sync {}
impl From<Diagnostic> for Vec<Diagnostic> {
fn from(diagnostic: Diagnostic) -> Self {
vec![diagnostic]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_combined_result() {
let input: Vec<DiagnosticsResult<u32>> = vec![Ok(0), Ok(1), Ok(2)];
let output = combined_result(input.into_iter());
assert_eq!(output.unwrap(), vec![0, 1, 2]);
let input: Vec<DiagnosticsResult<u32>> = vec![
Ok(1),
Err(vec![Diagnostic::error("err0", Location::generated())]),
];
let output = combined_result(input.into_iter());
assert_eq!(
output.as_ref().unwrap_err()[0].message().to_string(),
"err0"
);
let input: Vec<DiagnosticsResult<u32>> = vec![
Ok(0),
Err(vec![Diagnostic::error("err0", Location::generated())]),
Ok(1),
Err(vec![
Diagnostic::error("err1", Location::generated()),
Diagnostic::error("err2", Location::generated()),
]),
];
let output = combined_result(input.into_iter());
assert_eq!(
output.as_ref().unwrap_err()[0].message().to_string(),
"err0"
);
assert_eq!(
output.as_ref().unwrap_err()[1].message().to_string(),
"err1"
);
assert_eq!(
output.as_ref().unwrap_err()[2].message().to_string(),
"err2"
);
}
}
|
{
&self.0.message
}
|
identifier_body
|
diagnostic.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::Location;
use std::error::Error;
use std::fmt;
use std::fmt::Write;
pub type DiagnosticsResult<T> = Result<T, Vec<Diagnostic>>;
#[derive(fmt::Debug)]
pub struct WithDiagnostics<T> {
pub item: T,
pub errors: Vec<Diagnostic>,
}
impl<T> Into<Result<T, Vec<Diagnostic>>> for WithDiagnostics<T> {
fn into(self) -> Result<T, Vec<Diagnostic>> {
if self.errors.is_empty() {
Ok(self.item)
} else {
Err(self.errors)
}
}
}
pub fn diagnostics_result<T>(result: T, diagnostics: Vec<Diagnostic>) -> DiagnosticsResult<T> {
if diagnostics.is_empty() {
Ok(result)
} else
|
}
/// Convert a list of DiagnosticsResult<T> into a DiagnosticsResult<Vec<T>>.
/// This is similar to Result::from_iter except that in the case of an error
/// result, Result::from_iter returns the first error in the list. Whereas
/// this function concatenates all the Vec<Diagnostic> into one flat list.
pub fn combined_result<T, I>(results: I) -> DiagnosticsResult<Vec<T>>
where
T: std::fmt::Debug,
I: Iterator<Item = DiagnosticsResult<T>>,
{
let (oks, errs): (Vec<_>, Vec<_>) = results.partition(Result::is_ok);
diagnostics_result(
oks.into_iter().map(Result::unwrap).collect(),
errs.into_iter().map(Result::unwrap_err).flatten().collect(),
)
}
/// A diagnostic message as a result of validating some code. This struct is
/// modeled after the LSP Diagnostic type:
/// https://microsoft.github.io/language-server-protocol/specification#diagnostic
///
/// Changes from LSP:
/// - `location` is different from LSP in that it's a file + span instead of
/// just a span.
/// - Unused fields are omitted.
#[derive(fmt::Debug)]
pub struct Diagnostic(Box<DiagnosticData>);
impl Diagnostic {
/// Creates a new error Diagnostic.
/// Additional locations can be added with the `.annotate()` function.
pub fn error<T:'static + DiagnosticDisplay>(message: T, location: Location) -> Self {
Self(Box::new(DiagnosticData {
message: Box::new(message),
location,
related_information: Vec::new(),
}))
}
/// Annotates this error with an additional location and associated message.
pub fn annotate<T:'static + DiagnosticDisplay>(
mut self,
message: T,
location: Location,
) -> Self {
self.0
.related_information
.push(DiagnosticRelatedInformation {
message: Box::new(message),
location,
});
self
}
pub fn message(&self) -> &impl DiagnosticDisplay {
&self.0.message
}
pub fn location(&self) -> Location {
self.0.location
}
pub fn related_information(&self) -> &[DiagnosticRelatedInformation] {
&self.0.related_information
}
pub fn print_without_source(&self) -> String {
let mut result = String::new();
writeln!(
result,
"{message}:{location:?}",
message = &self.0.message,
location = self.0.location
)
.unwrap();
if!self.0.related_information.is_empty() {
for (ix, related) in self.0.related_information.iter().enumerate() {
writeln!(
result,
"[related {ix}] {message}:{location:?}",
ix = ix + 1,
message = related.message,
location = related.location
)
.unwrap();
}
};
result
}
}
impl fmt::Display for Diagnostic {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.print_without_source())
}
}
impl Error for Diagnostic {}
// statically verify that the Diagnostic type is thread safe
fn _assert_diagnostic_constraints()
where
Diagnostic: Send + Sync,
{
}
#[derive(fmt::Debug)]
struct DiagnosticData {
/// Human readable error message.
message: Box<dyn DiagnosticDisplay>,
/// The primary location of this diagnostic.
location: Location,
/// Related diagnostic information, such as other definitions in the case of
/// a duplicate definition error.
related_information: Vec<DiagnosticRelatedInformation>,
}
/// Secondary locations attached to a diagnostic.
#[derive(fmt::Debug)]
pub struct DiagnosticRelatedInformation {
/// The message of this related diagnostic information.
pub message: Box<dyn DiagnosticDisplay>,
/// The location of this related diagnostic information.
pub location: Location,
}
/// Trait for diagnostic messages to allow structs that capture
/// some data and can lazily convert it to a message.
pub trait DiagnosticDisplay: fmt::Debug + fmt::Display + Send + Sync {}
/// Automatically implement the trait if constraints are met, so that
/// implementors don't need to.
impl<T> DiagnosticDisplay for T where T: fmt::Debug + fmt::Display + Send + Sync {}
impl From<Diagnostic> for Vec<Diagnostic> {
fn from(diagnostic: Diagnostic) -> Self {
vec![diagnostic]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_combined_result() {
let input: Vec<DiagnosticsResult<u32>> = vec![Ok(0), Ok(1), Ok(2)];
let output = combined_result(input.into_iter());
assert_eq!(output.unwrap(), vec![0, 1, 2]);
let input: Vec<DiagnosticsResult<u32>> = vec![
Ok(1),
Err(vec![Diagnostic::error("err0", Location::generated())]),
];
let output = combined_result(input.into_iter());
assert_eq!(
output.as_ref().unwrap_err()[0].message().to_string(),
"err0"
);
let input: Vec<DiagnosticsResult<u32>> = vec![
Ok(0),
Err(vec![Diagnostic::error("err0", Location::generated())]),
Ok(1),
Err(vec![
Diagnostic::error("err1", Location::generated()),
Diagnostic::error("err2", Location::generated()),
]),
];
let output = combined_result(input.into_iter());
assert_eq!(
output.as_ref().unwrap_err()[0].message().to_string(),
"err0"
);
assert_eq!(
output.as_ref().unwrap_err()[1].message().to_string(),
"err1"
);
assert_eq!(
output.as_ref().unwrap_err()[2].message().to_string(),
"err2"
);
}
}
|
{
Err(diagnostics)
}
|
conditional_block
|
diagnostic.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::Location;
use std::error::Error;
use std::fmt;
use std::fmt::Write;
pub type DiagnosticsResult<T> = Result<T, Vec<Diagnostic>>;
#[derive(fmt::Debug)]
pub struct WithDiagnostics<T> {
pub item: T,
pub errors: Vec<Diagnostic>,
}
impl<T> Into<Result<T, Vec<Diagnostic>>> for WithDiagnostics<T> {
fn into(self) -> Result<T, Vec<Diagnostic>> {
if self.errors.is_empty() {
Ok(self.item)
} else {
Err(self.errors)
}
}
}
pub fn
|
<T>(result: T, diagnostics: Vec<Diagnostic>) -> DiagnosticsResult<T> {
if diagnostics.is_empty() {
Ok(result)
} else {
Err(diagnostics)
}
}
/// Convert a list of DiagnosticsResult<T> into a DiagnosticsResult<Vec<T>>.
/// This is similar to Result::from_iter except that in the case of an error
/// result, Result::from_iter returns the first error in the list. Whereas
/// this function concatenates all the Vec<Diagnostic> into one flat list.
pub fn combined_result<T, I>(results: I) -> DiagnosticsResult<Vec<T>>
where
T: std::fmt::Debug,
I: Iterator<Item = DiagnosticsResult<T>>,
{
let (oks, errs): (Vec<_>, Vec<_>) = results.partition(Result::is_ok);
diagnostics_result(
oks.into_iter().map(Result::unwrap).collect(),
errs.into_iter().map(Result::unwrap_err).flatten().collect(),
)
}
/// A diagnostic message as a result of validating some code. This struct is
/// modeled after the LSP Diagnostic type:
/// https://microsoft.github.io/language-server-protocol/specification#diagnostic
///
/// Changes from LSP:
/// - `location` is different from LSP in that it's a file + span instead of
/// just a span.
/// - Unused fields are omitted.
#[derive(fmt::Debug)]
pub struct Diagnostic(Box<DiagnosticData>);
impl Diagnostic {
/// Creates a new error Diagnostic.
/// Additional locations can be added with the `.annotate()` function.
pub fn error<T:'static + DiagnosticDisplay>(message: T, location: Location) -> Self {
Self(Box::new(DiagnosticData {
message: Box::new(message),
location,
related_information: Vec::new(),
}))
}
/// Annotates this error with an additional location and associated message.
pub fn annotate<T:'static + DiagnosticDisplay>(
mut self,
message: T,
location: Location,
) -> Self {
self.0
.related_information
.push(DiagnosticRelatedInformation {
message: Box::new(message),
location,
});
self
}
pub fn message(&self) -> &impl DiagnosticDisplay {
&self.0.message
}
pub fn location(&self) -> Location {
self.0.location
}
pub fn related_information(&self) -> &[DiagnosticRelatedInformation] {
&self.0.related_information
}
pub fn print_without_source(&self) -> String {
let mut result = String::new();
writeln!(
result,
"{message}:{location:?}",
message = &self.0.message,
location = self.0.location
)
.unwrap();
if!self.0.related_information.is_empty() {
for (ix, related) in self.0.related_information.iter().enumerate() {
writeln!(
result,
"[related {ix}] {message}:{location:?}",
ix = ix + 1,
message = related.message,
location = related.location
)
.unwrap();
}
};
result
}
}
impl fmt::Display for Diagnostic {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.print_without_source())
}
}
impl Error for Diagnostic {}
// statically verify that the Diagnostic type is thread safe
fn _assert_diagnostic_constraints()
where
Diagnostic: Send + Sync,
{
}
#[derive(fmt::Debug)]
struct DiagnosticData {
/// Human readable error message.
message: Box<dyn DiagnosticDisplay>,
/// The primary location of this diagnostic.
location: Location,
/// Related diagnostic information, such as other definitions in the case of
/// a duplicate definition error.
related_information: Vec<DiagnosticRelatedInformation>,
}
/// Secondary locations attached to a diagnostic.
#[derive(fmt::Debug)]
pub struct DiagnosticRelatedInformation {
/// The message of this related diagnostic information.
pub message: Box<dyn DiagnosticDisplay>,
/// The location of this related diagnostic information.
pub location: Location,
}
/// Trait for diagnostic messages to allow structs that capture
/// some data and can lazily convert it to a message.
pub trait DiagnosticDisplay: fmt::Debug + fmt::Display + Send + Sync {}
/// Automatically implement the trait if constraints are met, so that
/// implementors don't need to.
impl<T> DiagnosticDisplay for T where T: fmt::Debug + fmt::Display + Send + Sync {}
impl From<Diagnostic> for Vec<Diagnostic> {
fn from(diagnostic: Diagnostic) -> Self {
vec![diagnostic]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_combined_result() {
let input: Vec<DiagnosticsResult<u32>> = vec![Ok(0), Ok(1), Ok(2)];
let output = combined_result(input.into_iter());
assert_eq!(output.unwrap(), vec![0, 1, 2]);
let input: Vec<DiagnosticsResult<u32>> = vec![
Ok(1),
Err(vec![Diagnostic::error("err0", Location::generated())]),
];
let output = combined_result(input.into_iter());
assert_eq!(
output.as_ref().unwrap_err()[0].message().to_string(),
"err0"
);
let input: Vec<DiagnosticsResult<u32>> = vec![
Ok(0),
Err(vec![Diagnostic::error("err0", Location::generated())]),
Ok(1),
Err(vec![
Diagnostic::error("err1", Location::generated()),
Diagnostic::error("err2", Location::generated()),
]),
];
let output = combined_result(input.into_iter());
assert_eq!(
output.as_ref().unwrap_err()[0].message().to_string(),
"err0"
);
assert_eq!(
output.as_ref().unwrap_err()[1].message().to_string(),
"err1"
);
assert_eq!(
output.as_ref().unwrap_err()[2].message().to_string(),
"err2"
);
}
}
|
diagnostics_result
|
identifier_name
|
values.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper types and traits for the handling of CSS values.
use app_units::Au;
use cssparser::UnicodeRange;
use std::fmt;
/// The real `ToCss` trait can't be implemented for types in crates that don't
/// depend on each other.
pub trait ToCss {
/// Serialize `self` in CSS syntax, writing to `dest`.
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write;
/// Serialize `self` in CSS syntax and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self) -> String {
let mut s = String::new();
self.to_css(&mut s).unwrap();
s
}
}
/// Marker trait to automatically implement ToCss for Vec<T>.
pub trait OneOrMoreCommaSeparated {}
impl OneOrMoreCommaSeparated for UnicodeRange {}
impl<T> ToCss for Vec<T> where T: ToCss + OneOrMoreCommaSeparated {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let mut iter = self.iter();
iter.next().unwrap().to_css(dest)?;
for item in iter {
dest.write_str(", ")?;
item.to_css(dest)?;
}
Ok(())
}
}
impl<T: ToCss> ToCss for Box<T> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
(**self).to_css(dest)
}
}
impl ToCss for Au {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
write!(dest, "{}px", self.to_f64_px())
}
}
macro_rules! impl_to_css_for_predefined_type {
($name: ty) => {
impl<'a> ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
::cssparser::ToCss::to_css(self, dest)
}
}
};
}
impl_to_css_for_predefined_type!(f32);
impl_to_css_for_predefined_type!(i32);
impl_to_css_for_predefined_type!(u32);
impl_to_css_for_predefined_type!(::cssparser::Token<'a>);
impl_to_css_for_predefined_type!(::cssparser::RGBA);
impl_to_css_for_predefined_type!(::cssparser::Color);
impl_to_css_for_predefined_type!(::cssparser::UnicodeRange);
#[macro_export]
macro_rules! define_css_keyword_enum {
($name: ident: $( $css: expr => $variant: ident ),+,) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
($name: ident: $( $css: expr => $variant: ident ),+) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
}
#[cfg(feature = "servo")]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [ Deserialize, Serialize, HeapSizeOf ] [ $( $css => $variant ),+ ]
}
};
}
#[cfg(not(feature = "servo"))]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [] [ $( $css => $variant ),+ ]
}
};
}
#[macro_export]
macro_rules! __define_css_keyword_enum__actual {
($name: ident [ $( $derived_trait: ident),* ] [ $( $css: expr => $variant: ident ),+ ]) => {
#[allow(non_camel_case_types, missing_docs)]
#[derive(Clone, Eq, PartialEq, Copy, Hash, RustcEncodable, Debug $(, $derived_trait )* )]
pub enum $name {
$( $variant ),+
}
impl $name {
/// Parse this property from a CSS input stream.
pub fn parse(input: &mut ::cssparser::Parser) -> Result<$name, ()> {
match_ignore_ascii_case! { try!(input.expect_ident()),
$( $css => Ok($name::$variant), )+
_ => Err(())
}
}
}
impl ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> ::std::fmt::Result
where W: ::std::fmt::Write
{
match *self {
$( $name::$variant => dest.write_str($css) ),+
}
}
}
}
}
/// Helper types for the handling of specified values.
pub mod specified {
use app_units::Au;
/// Whether to allow negative values or not.
#[repr(u8)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AllowedNumericType {
/// Allow all kind of numeric values.
All,
/// Allow only non-negative values.
NonNegative
}
impl AllowedNumericType {
|
/// Whether value is valid for this allowed numeric type.
#[inline]
pub fn is_ok(&self, value: f32) -> bool {
match *self {
AllowedNumericType::All => true,
AllowedNumericType::NonNegative => value >= 0.,
}
}
/// Clamp the value following the rules of this numeric type.
#[inline]
pub fn clamp(&self, val: Au) -> Au {
use std::cmp;
match *self {
AllowedNumericType::All => val,
AllowedNumericType::NonNegative => cmp::max(Au(0), val),
}
}
}
}
|
random_line_split
|
|
values.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper types and traits for the handling of CSS values.
use app_units::Au;
use cssparser::UnicodeRange;
use std::fmt;
/// The real `ToCss` trait can't be implemented for types in crates that don't
/// depend on each other.
pub trait ToCss {
/// Serialize `self` in CSS syntax, writing to `dest`.
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write;
/// Serialize `self` in CSS syntax and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self) -> String {
let mut s = String::new();
self.to_css(&mut s).unwrap();
s
}
}
/// Marker trait to automatically implement ToCss for Vec<T>.
pub trait OneOrMoreCommaSeparated {}
impl OneOrMoreCommaSeparated for UnicodeRange {}
impl<T> ToCss for Vec<T> where T: ToCss + OneOrMoreCommaSeparated {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let mut iter = self.iter();
iter.next().unwrap().to_css(dest)?;
for item in iter {
dest.write_str(", ")?;
item.to_css(dest)?;
}
Ok(())
}
}
impl<T: ToCss> ToCss for Box<T> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
(**self).to_css(dest)
}
}
impl ToCss for Au {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
write!(dest, "{}px", self.to_f64_px())
}
}
macro_rules! impl_to_css_for_predefined_type {
($name: ty) => {
impl<'a> ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
::cssparser::ToCss::to_css(self, dest)
}
}
};
}
impl_to_css_for_predefined_type!(f32);
impl_to_css_for_predefined_type!(i32);
impl_to_css_for_predefined_type!(u32);
impl_to_css_for_predefined_type!(::cssparser::Token<'a>);
impl_to_css_for_predefined_type!(::cssparser::RGBA);
impl_to_css_for_predefined_type!(::cssparser::Color);
impl_to_css_for_predefined_type!(::cssparser::UnicodeRange);
#[macro_export]
macro_rules! define_css_keyword_enum {
($name: ident: $( $css: expr => $variant: ident ),+,) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
($name: ident: $( $css: expr => $variant: ident ),+) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
}
#[cfg(feature = "servo")]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [ Deserialize, Serialize, HeapSizeOf ] [ $( $css => $variant ),+ ]
}
};
}
#[cfg(not(feature = "servo"))]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [] [ $( $css => $variant ),+ ]
}
};
}
#[macro_export]
macro_rules! __define_css_keyword_enum__actual {
($name: ident [ $( $derived_trait: ident),* ] [ $( $css: expr => $variant: ident ),+ ]) => {
#[allow(non_camel_case_types, missing_docs)]
#[derive(Clone, Eq, PartialEq, Copy, Hash, RustcEncodable, Debug $(, $derived_trait )* )]
pub enum $name {
$( $variant ),+
}
impl $name {
/// Parse this property from a CSS input stream.
pub fn parse(input: &mut ::cssparser::Parser) -> Result<$name, ()> {
match_ignore_ascii_case! { try!(input.expect_ident()),
$( $css => Ok($name::$variant), )+
_ => Err(())
}
}
}
impl ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> ::std::fmt::Result
where W: ::std::fmt::Write
{
match *self {
$( $name::$variant => dest.write_str($css) ),+
}
}
}
}
}
/// Helper types for the handling of specified values.
pub mod specified {
use app_units::Au;
/// Whether to allow negative values or not.
#[repr(u8)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AllowedNumericType {
/// Allow all kind of numeric values.
All,
/// Allow only non-negative values.
NonNegative
}
impl AllowedNumericType {
/// Whether value is valid for this allowed numeric type.
#[inline]
pub fn
|
(&self, value: f32) -> bool {
match *self {
AllowedNumericType::All => true,
AllowedNumericType::NonNegative => value >= 0.,
}
}
/// Clamp the value following the rules of this numeric type.
#[inline]
pub fn clamp(&self, val: Au) -> Au {
use std::cmp;
match *self {
AllowedNumericType::All => val,
AllowedNumericType::NonNegative => cmp::max(Au(0), val),
}
}
}
}
|
is_ok
|
identifier_name
|
values.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Helper types and traits for the handling of CSS values.
use app_units::Au;
use cssparser::UnicodeRange;
use std::fmt;
/// The real `ToCss` trait can't be implemented for types in crates that don't
/// depend on each other.
pub trait ToCss {
/// Serialize `self` in CSS syntax, writing to `dest`.
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write;
/// Serialize `self` in CSS syntax and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self) -> String
|
}
/// Marker trait to automatically implement ToCss for Vec<T>.
pub trait OneOrMoreCommaSeparated {}
impl OneOrMoreCommaSeparated for UnicodeRange {}
impl<T> ToCss for Vec<T> where T: ToCss + OneOrMoreCommaSeparated {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let mut iter = self.iter();
iter.next().unwrap().to_css(dest)?;
for item in iter {
dest.write_str(", ")?;
item.to_css(dest)?;
}
Ok(())
}
}
impl<T: ToCss> ToCss for Box<T> {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write,
{
(**self).to_css(dest)
}
}
impl ToCss for Au {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
write!(dest, "{}px", self.to_f64_px())
}
}
macro_rules! impl_to_css_for_predefined_type {
($name: ty) => {
impl<'a> ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
::cssparser::ToCss::to_css(self, dest)
}
}
};
}
impl_to_css_for_predefined_type!(f32);
impl_to_css_for_predefined_type!(i32);
impl_to_css_for_predefined_type!(u32);
impl_to_css_for_predefined_type!(::cssparser::Token<'a>);
impl_to_css_for_predefined_type!(::cssparser::RGBA);
impl_to_css_for_predefined_type!(::cssparser::Color);
impl_to_css_for_predefined_type!(::cssparser::UnicodeRange);
#[macro_export]
macro_rules! define_css_keyword_enum {
($name: ident: $( $css: expr => $variant: ident ),+,) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
($name: ident: $( $css: expr => $variant: ident ),+) => {
__define_css_keyword_enum__add_optional_traits!($name [ $( $css => $variant ),+ ]);
};
}
#[cfg(feature = "servo")]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [ Deserialize, Serialize, HeapSizeOf ] [ $( $css => $variant ),+ ]
}
};
}
#[cfg(not(feature = "servo"))]
#[macro_export]
macro_rules! __define_css_keyword_enum__add_optional_traits {
($name: ident [ $( $css: expr => $variant: ident ),+ ]) => {
__define_css_keyword_enum__actual! {
$name [] [ $( $css => $variant ),+ ]
}
};
}
#[macro_export]
macro_rules! __define_css_keyword_enum__actual {
($name: ident [ $( $derived_trait: ident),* ] [ $( $css: expr => $variant: ident ),+ ]) => {
#[allow(non_camel_case_types, missing_docs)]
#[derive(Clone, Eq, PartialEq, Copy, Hash, RustcEncodable, Debug $(, $derived_trait )* )]
pub enum $name {
$( $variant ),+
}
impl $name {
/// Parse this property from a CSS input stream.
pub fn parse(input: &mut ::cssparser::Parser) -> Result<$name, ()> {
match_ignore_ascii_case! { try!(input.expect_ident()),
$( $css => Ok($name::$variant), )+
_ => Err(())
}
}
}
impl ToCss for $name {
fn to_css<W>(&self, dest: &mut W) -> ::std::fmt::Result
where W: ::std::fmt::Write
{
match *self {
$( $name::$variant => dest.write_str($css) ),+
}
}
}
}
}
/// Helper types for the handling of specified values.
pub mod specified {
use app_units::Au;
/// Whether to allow negative values or not.
#[repr(u8)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AllowedNumericType {
/// Allow all kind of numeric values.
All,
/// Allow only non-negative values.
NonNegative
}
impl AllowedNumericType {
/// Whether value is valid for this allowed numeric type.
#[inline]
pub fn is_ok(&self, value: f32) -> bool {
match *self {
AllowedNumericType::All => true,
AllowedNumericType::NonNegative => value >= 0.,
}
}
/// Clamp the value following the rules of this numeric type.
#[inline]
pub fn clamp(&self, val: Au) -> Au {
use std::cmp;
match *self {
AllowedNumericType::All => val,
AllowedNumericType::NonNegative => cmp::max(Au(0), val),
}
}
}
}
|
{
let mut s = String::new();
self.to_css(&mut s).unwrap();
s
}
|
identifier_body
|
orientable.rs
|
// This file is part of rgtk.
|
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
use gtk::Orientation;
use gtk::cast::GTK_ORIENTABLE;
use gtk::{mod, ffi};
pub trait OrientableTrait: gtk::WidgetTrait {
fn get_orientation(&self) -> Orientation {
unsafe {
ffi::gtk_orientable_get_orientation(GTK_ORIENTABLE(self.get_widget()))
}
}
fn set_orientation(&mut self, orientation: Orientation) -> () {
unsafe {
ffi::gtk_orientable_set_orientation(GTK_ORIENTABLE(self.get_widget()), orientation)
}
}
}
|
//
// rgtk is free software: you can redistribute it and/or modify
|
random_line_split
|
orientable.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
use gtk::Orientation;
use gtk::cast::GTK_ORIENTABLE;
use gtk::{mod, ffi};
pub trait OrientableTrait: gtk::WidgetTrait {
fn get_orientation(&self) -> Orientation {
unsafe {
ffi::gtk_orientable_get_orientation(GTK_ORIENTABLE(self.get_widget()))
}
}
fn set_orientation(&mut self, orientation: Orientation) -> ()
|
}
|
{
unsafe {
ffi::gtk_orientable_set_orientation(GTK_ORIENTABLE(self.get_widget()), orientation)
}
}
|
identifier_body
|
orientable.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
use gtk::Orientation;
use gtk::cast::GTK_ORIENTABLE;
use gtk::{mod, ffi};
pub trait OrientableTrait: gtk::WidgetTrait {
fn get_orientation(&self) -> Orientation {
unsafe {
ffi::gtk_orientable_get_orientation(GTK_ORIENTABLE(self.get_widget()))
}
}
fn
|
(&mut self, orientation: Orientation) -> () {
unsafe {
ffi::gtk_orientable_set_orientation(GTK_ORIENTABLE(self.get_widget()), orientation)
}
}
}
|
set_orientation
|
identifier_name
|
interrupts.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/main.rs
//! Low-level interrupt handling and CPU error handling
//use prelude::*;
#[repr(C)]
/// A handler for an ISR
pub type ISRHandler = extern "C" fn(isrnum: usize,info:*const(),idx:usize);
struct IRQHandlersEnt
{
handler: Option<ISRHandler>,
info: *const(),
idx: usize,
}
impl Copy for IRQHandlersEnt {}
impl Clone for IRQHandlersEnt { fn clone(&self)->Self { *self } }
unsafe impl Send for IRQHandlersEnt {}
#[derive(Default)]
/// A handle for a bound ISR, unbound on drop
pub struct
|
{
idx: usize,
}
static S_IRQ_HANDLERS_LOCK: ::sync::Spinlock<[IRQHandlersEnt; 256]> = ::sync::Spinlock::new( [IRQHandlersEnt{
handler: None,
info: 0 as *const _,
idx: 0
}; 256] );
#[no_mangle]
#[doc(hidden)]
#[tag_safe(irq)]
/// ISR handler called by assembly
pub extern "C" fn irq_handler(index: usize)
{
let lh = S_IRQ_HANDLERS_LOCK.lock_irqsafe();
let ent = (*lh)[index];
if let Some(h) = ent.handler {
(h)(index, ent.info, ent.idx);
}
}
#[derive(Debug,Copy,Clone)]
/// Error code for bind_isr
pub enum BindISRError
{
Used,
}
pub use super::hw::apic::IRQHandle;
pub use super::hw::apic::IrqError as BindError;
pub use super::hw::apic::register_irq as bind_gsi;
/// Bind a callback (and params) to an allocatable ISR
pub fn bind_isr(isr: u8, callback: ISRHandler, info: *const(), idx: usize) -> Result<ISRHandle,BindISRError>
{
log_trace!("bind_isr(isr={},callback={:?},info={:?},idx={})",
isr, callback as *const u8, info, idx);
// TODO: Validate if the requested ISR slot is valid (i.e. it's one of the allocatable ones)
// 1. Check that this ISR slot on this CPU isn't taken
let _irq_hold = ::arch::sync::hold_interrupts();
let mut mh = S_IRQ_HANDLERS_LOCK.lock();
let h = &mut mh[isr as usize];
log_trace!("&h = {:p}", h);
if h.handler.is_some()
{
Err( BindISRError::Used )
}
else
{
// 2. Assign
*h = IRQHandlersEnt {
handler: Some(callback),
info: info,
idx: idx,
};
Ok( ISRHandle {
idx: isr as usize,
} )
}
}
pub fn bind_free_isr(callback: ISRHandler, info: *const(), idx: usize) -> Result<ISRHandle,BindISRError>
{
log_trace!("bind_free_isr(callback={:?},info={:?},idx={})", callback as *const u8, info, idx);
let _irq_hold = ::arch::sync::hold_interrupts();
let mut lh = S_IRQ_HANDLERS_LOCK.lock();
for i in 32.. lh.len()
{
if lh[i].handler.is_none() {
log_trace!("- Using ISR {}", i);
lh[i] = IRQHandlersEnt {
handler: Some(callback),
info: info,
idx: idx,
};
return Ok( ISRHandle {
idx: i,
} )
}
}
Err( BindISRError::Used )
}
impl ISRHandle
{
/// Returns an unbound ISR handle (null)
pub fn unbound() -> ISRHandle {
ISRHandle {
idx:!0,
}
}
/// Returns the bound ISR index
pub fn idx(&self) -> usize { self.idx }
}
impl ::core::ops::Drop for ISRHandle
{
fn drop(&mut self)
{
if self.idx < 256
{
let _irq_hold = ::arch::sync::hold_interrupts();
let mut mh = S_IRQ_HANDLERS_LOCK.lock();
let h = &mut mh[self.idx];
h.handler = None;
}
}
}
// vim: ft=rust
|
ISRHandle
|
identifier_name
|
interrupts.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/main.rs
//! Low-level interrupt handling and CPU error handling
//use prelude::*;
#[repr(C)]
/// A handler for an ISR
pub type ISRHandler = extern "C" fn(isrnum: usize,info:*const(),idx:usize);
struct IRQHandlersEnt
{
handler: Option<ISRHandler>,
info: *const(),
idx: usize,
}
impl Copy for IRQHandlersEnt {}
impl Clone for IRQHandlersEnt { fn clone(&self)->Self { *self } }
unsafe impl Send for IRQHandlersEnt {}
#[derive(Default)]
/// A handle for a bound ISR, unbound on drop
pub struct ISRHandle
{
idx: usize,
}
static S_IRQ_HANDLERS_LOCK: ::sync::Spinlock<[IRQHandlersEnt; 256]> = ::sync::Spinlock::new( [IRQHandlersEnt{
handler: None,
info: 0 as *const _,
idx: 0
}; 256] );
#[no_mangle]
#[doc(hidden)]
#[tag_safe(irq)]
/// ISR handler called by assembly
pub extern "C" fn irq_handler(index: usize)
{
let lh = S_IRQ_HANDLERS_LOCK.lock_irqsafe();
let ent = (*lh)[index];
if let Some(h) = ent.handler {
(h)(index, ent.info, ent.idx);
}
}
#[derive(Debug,Copy,Clone)]
/// Error code for bind_isr
pub enum BindISRError
{
Used,
}
pub use super::hw::apic::IRQHandle;
pub use super::hw::apic::IrqError as BindError;
pub use super::hw::apic::register_irq as bind_gsi;
/// Bind a callback (and params) to an allocatable ISR
pub fn bind_isr(isr: u8, callback: ISRHandler, info: *const(), idx: usize) -> Result<ISRHandle,BindISRError>
{
log_trace!("bind_isr(isr={},callback={:?},info={:?},idx={})",
isr, callback as *const u8, info, idx);
// TODO: Validate if the requested ISR slot is valid (i.e. it's one of the allocatable ones)
// 1. Check that this ISR slot on this CPU isn't taken
let _irq_hold = ::arch::sync::hold_interrupts();
let mut mh = S_IRQ_HANDLERS_LOCK.lock();
let h = &mut mh[isr as usize];
log_trace!("&h = {:p}", h);
if h.handler.is_some()
{
Err( BindISRError::Used )
}
else
{
// 2. Assign
*h = IRQHandlersEnt {
handler: Some(callback),
info: info,
idx: idx,
};
Ok( ISRHandle {
idx: isr as usize,
} )
}
}
pub fn bind_free_isr(callback: ISRHandler, info: *const(), idx: usize) -> Result<ISRHandle,BindISRError>
{
log_trace!("bind_free_isr(callback={:?},info={:?},idx={})", callback as *const u8, info, idx);
let _irq_hold = ::arch::sync::hold_interrupts();
let mut lh = S_IRQ_HANDLERS_LOCK.lock();
for i in 32.. lh.len()
{
if lh[i].handler.is_none() {
log_trace!("- Using ISR {}", i);
lh[i] = IRQHandlersEnt {
handler: Some(callback),
|
};
return Ok( ISRHandle {
idx: i,
} )
}
}
Err( BindISRError::Used )
}
impl ISRHandle
{
/// Returns an unbound ISR handle (null)
pub fn unbound() -> ISRHandle {
ISRHandle {
idx:!0,
}
}
/// Returns the bound ISR index
pub fn idx(&self) -> usize { self.idx }
}
impl ::core::ops::Drop for ISRHandle
{
fn drop(&mut self)
{
if self.idx < 256
{
let _irq_hold = ::arch::sync::hold_interrupts();
let mut mh = S_IRQ_HANDLERS_LOCK.lock();
let h = &mut mh[self.idx];
h.handler = None;
}
}
}
// vim: ft=rust
|
info: info,
idx: idx,
|
random_line_split
|
interrupts.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/main.rs
//! Low-level interrupt handling and CPU error handling
//use prelude::*;
#[repr(C)]
/// A handler for an ISR
pub type ISRHandler = extern "C" fn(isrnum: usize,info:*const(),idx:usize);
struct IRQHandlersEnt
{
handler: Option<ISRHandler>,
info: *const(),
idx: usize,
}
impl Copy for IRQHandlersEnt {}
impl Clone for IRQHandlersEnt { fn clone(&self)->Self { *self } }
unsafe impl Send for IRQHandlersEnt {}
#[derive(Default)]
/// A handle for a bound ISR, unbound on drop
pub struct ISRHandle
{
idx: usize,
}
static S_IRQ_HANDLERS_LOCK: ::sync::Spinlock<[IRQHandlersEnt; 256]> = ::sync::Spinlock::new( [IRQHandlersEnt{
handler: None,
info: 0 as *const _,
idx: 0
}; 256] );
#[no_mangle]
#[doc(hidden)]
#[tag_safe(irq)]
/// ISR handler called by assembly
pub extern "C" fn irq_handler(index: usize)
{
let lh = S_IRQ_HANDLERS_LOCK.lock_irqsafe();
let ent = (*lh)[index];
if let Some(h) = ent.handler {
(h)(index, ent.info, ent.idx);
}
}
#[derive(Debug,Copy,Clone)]
/// Error code for bind_isr
pub enum BindISRError
{
Used,
}
pub use super::hw::apic::IRQHandle;
pub use super::hw::apic::IrqError as BindError;
pub use super::hw::apic::register_irq as bind_gsi;
/// Bind a callback (and params) to an allocatable ISR
pub fn bind_isr(isr: u8, callback: ISRHandler, info: *const(), idx: usize) -> Result<ISRHandle,BindISRError>
{
log_trace!("bind_isr(isr={},callback={:?},info={:?},idx={})",
isr, callback as *const u8, info, idx);
// TODO: Validate if the requested ISR slot is valid (i.e. it's one of the allocatable ones)
// 1. Check that this ISR slot on this CPU isn't taken
let _irq_hold = ::arch::sync::hold_interrupts();
let mut mh = S_IRQ_HANDLERS_LOCK.lock();
let h = &mut mh[isr as usize];
log_trace!("&h = {:p}", h);
if h.handler.is_some()
{
Err( BindISRError::Used )
}
else
{
// 2. Assign
*h = IRQHandlersEnt {
handler: Some(callback),
info: info,
idx: idx,
};
Ok( ISRHandle {
idx: isr as usize,
} )
}
}
pub fn bind_free_isr(callback: ISRHandler, info: *const(), idx: usize) -> Result<ISRHandle,BindISRError>
{
log_trace!("bind_free_isr(callback={:?},info={:?},idx={})", callback as *const u8, info, idx);
let _irq_hold = ::arch::sync::hold_interrupts();
let mut lh = S_IRQ_HANDLERS_LOCK.lock();
for i in 32.. lh.len()
{
if lh[i].handler.is_none() {
log_trace!("- Using ISR {}", i);
lh[i] = IRQHandlersEnt {
handler: Some(callback),
info: info,
idx: idx,
};
return Ok( ISRHandle {
idx: i,
} )
}
}
Err( BindISRError::Used )
}
impl ISRHandle
{
/// Returns an unbound ISR handle (null)
pub fn unbound() -> ISRHandle {
ISRHandle {
idx:!0,
}
}
/// Returns the bound ISR index
pub fn idx(&self) -> usize { self.idx }
}
impl ::core::ops::Drop for ISRHandle
{
fn drop(&mut self)
{
if self.idx < 256
|
}
}
// vim: ft=rust
|
{
let _irq_hold = ::arch::sync::hold_interrupts();
let mut mh = S_IRQ_HANDLERS_LOCK.lock();
let h = &mut mh[self.idx];
h.handler = None;
}
|
conditional_block
|
mod.rs
|
pub mod client_connection;
pub mod client_session;
pub mod local_router;
pub mod router_follower;
pub mod router_leader;
pub mod session_timer;
use std::fmt;
use std::net::SocketAddr;
use std::time::Instant;
use mqtt::{QualityOfService};
use mqtt::packet::{PublishPacket, SubscribePacket, UnsubscribePacket};
use common::{UserId, ClientIdentifier};
/// Message structure send to `client_connection`
#[derive(Clone)]
pub enum ClientConnectionMsg {
Data(SocketAddr, Vec<u8>),
DisconnectClient(SocketAddr, String)
// Shutdown
}
#[derive(Clone)]
pub enum ClientSessionMsg {
Data(SocketAddr, Vec<u8>),
// (user_id, client_identifier, qos, packet)
Publish(UserId, ClientIdentifier, QualityOfService, PublishPacket),
ClientDisconnect(SocketAddr, String),
// (user_id, addr, packets, subscribe_qos)
RetainPackets(UserId, SocketAddr, Vec<PublishPacket>, QualityOfService),
Timeout(SessionTimerPayload)
// Shutdown
}
#[derive(Debug, Clone)]
pub enum LocalRouterMsg {
// Forward publish message to `router_follower` or `local_router`
ForwardPublish(UserId, PublishPacket),
// Receive publish packet from `router_follower` or `local_router`
Publish(UserId, PublishPacket),
// (user_id, client_identifier, packet)
Subscribe(UserId, ClientIdentifier, SocketAddr, SubscribePacket),
Unsubscribe(UserId, ClientIdentifier, UnsubscribePacket),
ClientDisconnect(UserId, ClientIdentifier),
// Shutdown
}
#[derive(Debug, Clone)]
pub enum RouterFollowerMsg {
_Shutdown
}
#[derive(Debug, Clone)]
pub enum
|
{
_Shutdown
}
#[derive(Debug, Copy, Clone)]
pub enum SessionTimerAction {
Set(Instant), Cancel
}
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub enum SessionTimerPacketType {
// [QoS.1.send] Receive PUBACK timeout
RecvPubackTimeout,
// [QoS.2.send] Receive PUBREC timeout
RecvPubrecTimeout,
// [QoS.2.send] Receive PUBCOMP timeout
RecvPubcompTimeout,
// [QoS.2.recv] Receive PUBREL timeout
RecvPubrelTimeout,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub enum SessionTimerPayload {
// SocketAddr => Client addr
// u16 => packet_identifier (pkid)
RecvPacketTimer(SessionTimerPacketType, SocketAddr, u16),
// Receive PINGREQ timeout
KeepAliveTimer(SocketAddr),
// Decode one packet timeout (maybe useless??)
// DecodePacketTimer(SocketAddr),
}
/// impl Debug for structures
impl fmt::Debug for ClientConnectionMsg {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ClientConnectionMsg::Data(addr, ref bytes) => {
write!(f, "Write <<{} bytes>> to client [{:?}]", bytes.len(), addr)
}
&ClientConnectionMsg::DisconnectClient(addr, ref reason) => {
write!(f, "Disconnect [{:?}] because: <<{}>>", addr, reason)
}
}
}
}
|
RouterLeaderMsg
|
identifier_name
|
mod.rs
|
pub mod client_connection;
pub mod client_session;
pub mod local_router;
pub mod router_follower;
pub mod router_leader;
pub mod session_timer;
use std::fmt;
use std::net::SocketAddr;
use std::time::Instant;
use mqtt::{QualityOfService};
use mqtt::packet::{PublishPacket, SubscribePacket, UnsubscribePacket};
use common::{UserId, ClientIdentifier};
/// Message structure send to `client_connection`
#[derive(Clone)]
pub enum ClientConnectionMsg {
Data(SocketAddr, Vec<u8>),
DisconnectClient(SocketAddr, String)
|
#[derive(Clone)]
pub enum ClientSessionMsg {
Data(SocketAddr, Vec<u8>),
// (user_id, client_identifier, qos, packet)
Publish(UserId, ClientIdentifier, QualityOfService, PublishPacket),
ClientDisconnect(SocketAddr, String),
// (user_id, addr, packets, subscribe_qos)
RetainPackets(UserId, SocketAddr, Vec<PublishPacket>, QualityOfService),
Timeout(SessionTimerPayload)
// Shutdown
}
#[derive(Debug, Clone)]
pub enum LocalRouterMsg {
// Forward publish message to `router_follower` or `local_router`
ForwardPublish(UserId, PublishPacket),
// Receive publish packet from `router_follower` or `local_router`
Publish(UserId, PublishPacket),
// (user_id, client_identifier, packet)
Subscribe(UserId, ClientIdentifier, SocketAddr, SubscribePacket),
Unsubscribe(UserId, ClientIdentifier, UnsubscribePacket),
ClientDisconnect(UserId, ClientIdentifier),
// Shutdown
}
#[derive(Debug, Clone)]
pub enum RouterFollowerMsg {
_Shutdown
}
#[derive(Debug, Clone)]
pub enum RouterLeaderMsg {
_Shutdown
}
#[derive(Debug, Copy, Clone)]
pub enum SessionTimerAction {
Set(Instant), Cancel
}
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub enum SessionTimerPacketType {
// [QoS.1.send] Receive PUBACK timeout
RecvPubackTimeout,
// [QoS.2.send] Receive PUBREC timeout
RecvPubrecTimeout,
// [QoS.2.send] Receive PUBCOMP timeout
RecvPubcompTimeout,
// [QoS.2.recv] Receive PUBREL timeout
RecvPubrelTimeout,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub enum SessionTimerPayload {
// SocketAddr => Client addr
// u16 => packet_identifier (pkid)
RecvPacketTimer(SessionTimerPacketType, SocketAddr, u16),
// Receive PINGREQ timeout
KeepAliveTimer(SocketAddr),
// Decode one packet timeout (maybe useless??)
// DecodePacketTimer(SocketAddr),
}
/// impl Debug for structures
impl fmt::Debug for ClientConnectionMsg {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ClientConnectionMsg::Data(addr, ref bytes) => {
write!(f, "Write <<{} bytes>> to client [{:?}]", bytes.len(), addr)
}
&ClientConnectionMsg::DisconnectClient(addr, ref reason) => {
write!(f, "Disconnect [{:?}] because: <<{}>>", addr, reason)
}
}
}
}
|
// Shutdown
}
|
random_line_split
|
syntax-extension-bytes.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
static static_vec: &'static [u8] = bytes!("abc", 0xFF, '!');
pub fn main()
|
{
let vec = bytes!("abc");
assert_eq!(vec, &[97_u8, 98_u8, 99_u8]);
let vec = bytes!("null", 0);
assert_eq!(vec, &[110_u8, 117_u8, 108_u8, 108_u8, 0_u8]);
let vec = bytes!(' ', " ", 32, 32u8);
assert_eq!(vec, &[32_u8, 32_u8, 32_u8, 32_u8]);
assert_eq!(static_vec, &[97_u8, 98_u8, 99_u8, 255_u8, 33_u8]);
}
|
identifier_body
|
|
syntax-extension-bytes.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
static static_vec: &'static [u8] = bytes!("abc", 0xFF, '!');
pub fn main() {
let vec = bytes!("abc");
assert_eq!(vec, &[97_u8, 98_u8, 99_u8]);
let vec = bytes!("null", 0);
assert_eq!(vec, &[110_u8, 117_u8, 108_u8, 108_u8, 0_u8]);
let vec = bytes!(' ', " ", 32, 32u8);
assert_eq!(vec, &[32_u8, 32_u8, 32_u8, 32_u8]);
assert_eq!(static_vec, &[97_u8, 98_u8, 99_u8, 255_u8, 33_u8]);
|
}
|
random_line_split
|
|
syntax-extension-bytes.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
static static_vec: &'static [u8] = bytes!("abc", 0xFF, '!');
pub fn
|
() {
let vec = bytes!("abc");
assert_eq!(vec, &[97_u8, 98_u8, 99_u8]);
let vec = bytes!("null", 0);
assert_eq!(vec, &[110_u8, 117_u8, 108_u8, 108_u8, 0_u8]);
let vec = bytes!(' ', " ", 32, 32u8);
assert_eq!(vec, &[32_u8, 32_u8, 32_u8, 32_u8]);
assert_eq!(static_vec, &[97_u8, 98_u8, 99_u8, 255_u8, 33_u8]);
}
|
main
|
identifier_name
|
lib.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use crate::function_target_pipeline::FunctionTargetsHolder;
use move_model::model::GlobalEnv;
pub mod access_path;
pub mod access_path_trie;
pub mod annotations;
pub mod borrow_analysis;
pub mod clean_and_optimize;
pub mod compositional_analysis;
pub mod data_invariant_instrumentation;
pub mod dataflow_analysis;
pub mod debug_instrumentation;
pub mod eliminate_imm_refs;
pub mod function_data_builder;
pub mod function_target;
pub mod function_target_pipeline;
pub mod global_invariant_instrumentation;
pub mod global_invariant_instrumentation_v2;
pub mod graph;
pub mod livevar_analysis;
pub mod loop_analysis;
pub mod memory_instrumentation;
pub mod mut_ref_instrumentation;
pub mod options;
pub mod packed_types_analysis;
pub mod reaching_def_analysis;
pub mod read_write_set_analysis;
pub mod spec_instrumentation;
mod spec_translator;
pub mod stackless_bytecode;
pub mod stackless_bytecode_generator;
pub mod stackless_control_flow_graph;
pub mod usage_analysis;
pub mod verification_analysis;
/// Print function targets for testing and debugging.
pub fn print_targets_for_test(
env: &GlobalEnv,
header: &str,
targets: &FunctionTargetsHolder,
) -> String {
let mut text = String::new();
text.push_str(&format!("============ {} ================\n", header));
for module_env in env.get_modules() {
for func_env in module_env.get_functions() {
for (variant, target) in targets.get_targets(&func_env) {
if!target.data.code.is_empty()
|
}
}
}
text
}
|
{
target.register_annotation_formatters_for_test();
text += &format!("\n[variant {}]\n{}\n", variant, target);
}
|
conditional_block
|
lib.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use crate::function_target_pipeline::FunctionTargetsHolder;
use move_model::model::GlobalEnv;
pub mod access_path;
pub mod access_path_trie;
pub mod annotations;
pub mod borrow_analysis;
pub mod clean_and_optimize;
pub mod compositional_analysis;
pub mod data_invariant_instrumentation;
pub mod dataflow_analysis;
pub mod debug_instrumentation;
pub mod eliminate_imm_refs;
pub mod function_data_builder;
pub mod function_target;
pub mod function_target_pipeline;
pub mod global_invariant_instrumentation;
pub mod global_invariant_instrumentation_v2;
pub mod graph;
pub mod livevar_analysis;
pub mod loop_analysis;
pub mod memory_instrumentation;
pub mod mut_ref_instrumentation;
pub mod options;
pub mod packed_types_analysis;
pub mod reaching_def_analysis;
pub mod read_write_set_analysis;
pub mod spec_instrumentation;
mod spec_translator;
pub mod stackless_bytecode;
pub mod stackless_bytecode_generator;
pub mod stackless_control_flow_graph;
pub mod usage_analysis;
pub mod verification_analysis;
/// Print function targets for testing and debugging.
pub fn print_targets_for_test(
env: &GlobalEnv,
header: &str,
targets: &FunctionTargetsHolder,
) -> String
|
{
let mut text = String::new();
text.push_str(&format!("============ {} ================\n", header));
for module_env in env.get_modules() {
for func_env in module_env.get_functions() {
for (variant, target) in targets.get_targets(&func_env) {
if !target.data.code.is_empty() {
target.register_annotation_formatters_for_test();
text += &format!("\n[variant {}]\n{}\n", variant, target);
}
}
}
}
text
}
|
identifier_body
|
|
lib.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use crate::function_target_pipeline::FunctionTargetsHolder;
use move_model::model::GlobalEnv;
pub mod access_path;
pub mod access_path_trie;
pub mod annotations;
pub mod borrow_analysis;
pub mod clean_and_optimize;
pub mod compositional_analysis;
pub mod data_invariant_instrumentation;
pub mod dataflow_analysis;
pub mod debug_instrumentation;
pub mod eliminate_imm_refs;
pub mod function_data_builder;
pub mod function_target;
pub mod function_target_pipeline;
pub mod global_invariant_instrumentation;
pub mod global_invariant_instrumentation_v2;
pub mod graph;
pub mod livevar_analysis;
pub mod loop_analysis;
pub mod memory_instrumentation;
pub mod mut_ref_instrumentation;
pub mod options;
pub mod packed_types_analysis;
pub mod reaching_def_analysis;
pub mod read_write_set_analysis;
pub mod spec_instrumentation;
mod spec_translator;
pub mod stackless_bytecode;
pub mod stackless_bytecode_generator;
pub mod stackless_control_flow_graph;
pub mod usage_analysis;
pub mod verification_analysis;
/// Print function targets for testing and debugging.
pub fn
|
(
env: &GlobalEnv,
header: &str,
targets: &FunctionTargetsHolder,
) -> String {
let mut text = String::new();
text.push_str(&format!("============ {} ================\n", header));
for module_env in env.get_modules() {
for func_env in module_env.get_functions() {
for (variant, target) in targets.get_targets(&func_env) {
if!target.data.code.is_empty() {
target.register_annotation_formatters_for_test();
text += &format!("\n[variant {}]\n{}\n", variant, target);
}
}
}
}
text
}
|
print_targets_for_test
|
identifier_name
|
lib.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use crate::function_target_pipeline::FunctionTargetsHolder;
use move_model::model::GlobalEnv;
|
pub mod clean_and_optimize;
pub mod compositional_analysis;
pub mod data_invariant_instrumentation;
pub mod dataflow_analysis;
pub mod debug_instrumentation;
pub mod eliminate_imm_refs;
pub mod function_data_builder;
pub mod function_target;
pub mod function_target_pipeline;
pub mod global_invariant_instrumentation;
pub mod global_invariant_instrumentation_v2;
pub mod graph;
pub mod livevar_analysis;
pub mod loop_analysis;
pub mod memory_instrumentation;
pub mod mut_ref_instrumentation;
pub mod options;
pub mod packed_types_analysis;
pub mod reaching_def_analysis;
pub mod read_write_set_analysis;
pub mod spec_instrumentation;
mod spec_translator;
pub mod stackless_bytecode;
pub mod stackless_bytecode_generator;
pub mod stackless_control_flow_graph;
pub mod usage_analysis;
pub mod verification_analysis;
/// Print function targets for testing and debugging.
pub fn print_targets_for_test(
env: &GlobalEnv,
header: &str,
targets: &FunctionTargetsHolder,
) -> String {
let mut text = String::new();
text.push_str(&format!("============ {} ================\n", header));
for module_env in env.get_modules() {
for func_env in module_env.get_functions() {
for (variant, target) in targets.get_targets(&func_env) {
if!target.data.code.is_empty() {
target.register_annotation_formatters_for_test();
text += &format!("\n[variant {}]\n{}\n", variant, target);
}
}
}
}
text
}
|
pub mod access_path;
pub mod access_path_trie;
pub mod annotations;
pub mod borrow_analysis;
|
random_line_split
|
strand.rs
|
/*
* strand.rs
*
* striking-db - Persistent key/value store for SSDs.
* Copyright (c) 2017 Maxwell Duzen, Ammon Smith
*
* striking-db is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 2 of
* the License, or (at your option) any later version.
*
* striking-db is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with striking-db. If not, see <http://www.gnu.org/licenses/>.
*
*/
use super::{PAGE_SIZE64, FilePointer, Result};
use buffer::Page;
use device::Device;
use parking_lot::Mutex;
use serial::StrandHeader;
use stats::Stats;
#[derive(Debug)]
pub struct Strand<'d> {
device: &'d Device,
id: u16,
start: u64,
|
}
impl<'d> Strand<'d> {
pub fn new(
device: &'d Device,
id: u16,
start: u64,
capacity: u64,
read_strand: bool,
) -> Result<Self> {
assert_eq!(
start % PAGE_SIZE64,
0,
"Start is not a multiple of the page size"
);
assert_eq!(
capacity % PAGE_SIZE64,
0,
"Capacity is not a multiple of the page size"
);
assert!(
start + capacity <= device.capacity(),
"Strand extends off the boundary of the device"
);
assert!(capacity > PAGE_SIZE64, "Strand only one page long");
let offset = {
let mut page = Page::default();
if read_strand {
// Read existing header
device.read(0, &mut page[..])?;
let header = StrandHeader::read(&page)?;
header.get_offset()
} else {
// Format strand
let header = StrandHeader::new(id, capacity);
header.write(&mut page)?;
device.write(0, &page[..])?;
PAGE_SIZE64
}
};
Ok(Strand {
device: device,
id: id,
start: start,
capacity: capacity,
offset: offset,
stats: Mutex::new(Stats::default()),
})
}
#[inline]
pub fn id(&self) -> u16 {
self.id
}
#[inline]
pub fn start(&self) -> u64 {
self.start
}
#[inline]
pub fn end(&self) -> u64 {
self.start + self.capacity
}
#[inline]
pub fn capacity(&self) -> u64 {
self.capacity
}
#[inline]
pub fn remaining(&self) -> u64 {
self.capacity - self.offset
}
#[inline]
pub fn offset(&self) -> u64 {
self.offset
}
#[inline]
pub fn push_offset(&mut self, amt: u64) {
self.offset += amt;
}
#[inline]
pub fn contains_ptr(&self, ptr: FilePointer) -> bool {
self.start <= ptr && ptr <= self.end()
}
pub fn write_metadata(&mut self) -> Result<()> {
let mut page = Page::default();
let header = StrandHeader::from(self);
header.write(&mut page)?;
self.write(0, &page[..])
}
pub fn read(&self, off: u64, buf: &mut [u8]) -> Result<()> {
let len = buf.len() as u64;
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.read_bytes += buf.len() as u64;
}
self.device.read(self.start + off, buf)
}
pub fn write(&self, off: u64, buf: &[u8]) -> Result<()> {
let len = buf.len() as u64;
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.written_bytes += buf.len() as u64;
}
self.device.write(self.start + off, buf)
}
#[allow(unused)]
pub fn trim(&self, off: u64, len: u64) -> Result<()> {
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.trimmed_bytes += len;
}
self.device.trim(self.start + off, len)
}
}
impl<'d> Drop for Strand<'d> {
fn drop(&mut self) {
self.write_metadata().expect("Error writing metadata");
}
}
|
capacity: u64,
offset: u64,
pub stats: Mutex<Stats>,
|
random_line_split
|
strand.rs
|
/*
* strand.rs
*
* striking-db - Persistent key/value store for SSDs.
* Copyright (c) 2017 Maxwell Duzen, Ammon Smith
*
* striking-db is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 2 of
* the License, or (at your option) any later version.
*
* striking-db is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with striking-db. If not, see <http://www.gnu.org/licenses/>.
*
*/
use super::{PAGE_SIZE64, FilePointer, Result};
use buffer::Page;
use device::Device;
use parking_lot::Mutex;
use serial::StrandHeader;
use stats::Stats;
#[derive(Debug)]
pub struct Strand<'d> {
device: &'d Device,
id: u16,
start: u64,
capacity: u64,
offset: u64,
pub stats: Mutex<Stats>,
}
impl<'d> Strand<'d> {
pub fn new(
device: &'d Device,
id: u16,
start: u64,
capacity: u64,
read_strand: bool,
) -> Result<Self> {
assert_eq!(
start % PAGE_SIZE64,
0,
"Start is not a multiple of the page size"
);
assert_eq!(
capacity % PAGE_SIZE64,
0,
"Capacity is not a multiple of the page size"
);
assert!(
start + capacity <= device.capacity(),
"Strand extends off the boundary of the device"
);
assert!(capacity > PAGE_SIZE64, "Strand only one page long");
let offset = {
let mut page = Page::default();
if read_strand {
// Read existing header
device.read(0, &mut page[..])?;
let header = StrandHeader::read(&page)?;
header.get_offset()
} else {
// Format strand
let header = StrandHeader::new(id, capacity);
header.write(&mut page)?;
device.write(0, &page[..])?;
PAGE_SIZE64
}
};
Ok(Strand {
device: device,
id: id,
start: start,
capacity: capacity,
offset: offset,
stats: Mutex::new(Stats::default()),
})
}
#[inline]
pub fn id(&self) -> u16 {
self.id
}
#[inline]
pub fn start(&self) -> u64 {
self.start
}
#[inline]
pub fn end(&self) -> u64 {
self.start + self.capacity
}
#[inline]
pub fn capacity(&self) -> u64 {
self.capacity
}
#[inline]
pub fn remaining(&self) -> u64 {
self.capacity - self.offset
}
#[inline]
pub fn offset(&self) -> u64
|
#[inline]
pub fn push_offset(&mut self, amt: u64) {
self.offset += amt;
}
#[inline]
pub fn contains_ptr(&self, ptr: FilePointer) -> bool {
self.start <= ptr && ptr <= self.end()
}
pub fn write_metadata(&mut self) -> Result<()> {
let mut page = Page::default();
let header = StrandHeader::from(self);
header.write(&mut page)?;
self.write(0, &page[..])
}
pub fn read(&self, off: u64, buf: &mut [u8]) -> Result<()> {
let len = buf.len() as u64;
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.read_bytes += buf.len() as u64;
}
self.device.read(self.start + off, buf)
}
pub fn write(&self, off: u64, buf: &[u8]) -> Result<()> {
let len = buf.len() as u64;
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.written_bytes += buf.len() as u64;
}
self.device.write(self.start + off, buf)
}
#[allow(unused)]
pub fn trim(&self, off: u64, len: u64) -> Result<()> {
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.trimmed_bytes += len;
}
self.device.trim(self.start + off, len)
}
}
impl<'d> Drop for Strand<'d> {
fn drop(&mut self) {
self.write_metadata().expect("Error writing metadata");
}
}
|
{
self.offset
}
|
identifier_body
|
strand.rs
|
/*
* strand.rs
*
* striking-db - Persistent key/value store for SSDs.
* Copyright (c) 2017 Maxwell Duzen, Ammon Smith
*
* striking-db is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 2 of
* the License, or (at your option) any later version.
*
* striking-db is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with striking-db. If not, see <http://www.gnu.org/licenses/>.
*
*/
use super::{PAGE_SIZE64, FilePointer, Result};
use buffer::Page;
use device::Device;
use parking_lot::Mutex;
use serial::StrandHeader;
use stats::Stats;
#[derive(Debug)]
pub struct Strand<'d> {
device: &'d Device,
id: u16,
start: u64,
capacity: u64,
offset: u64,
pub stats: Mutex<Stats>,
}
impl<'d> Strand<'d> {
pub fn new(
device: &'d Device,
id: u16,
start: u64,
capacity: u64,
read_strand: bool,
) -> Result<Self> {
assert_eq!(
start % PAGE_SIZE64,
0,
"Start is not a multiple of the page size"
);
assert_eq!(
capacity % PAGE_SIZE64,
0,
"Capacity is not a multiple of the page size"
);
assert!(
start + capacity <= device.capacity(),
"Strand extends off the boundary of the device"
);
assert!(capacity > PAGE_SIZE64, "Strand only one page long");
let offset = {
let mut page = Page::default();
if read_strand {
// Read existing header
device.read(0, &mut page[..])?;
let header = StrandHeader::read(&page)?;
header.get_offset()
} else {
// Format strand
let header = StrandHeader::new(id, capacity);
header.write(&mut page)?;
device.write(0, &page[..])?;
PAGE_SIZE64
}
};
Ok(Strand {
device: device,
id: id,
start: start,
capacity: capacity,
offset: offset,
stats: Mutex::new(Stats::default()),
})
}
#[inline]
pub fn id(&self) -> u16 {
self.id
}
#[inline]
pub fn start(&self) -> u64 {
self.start
}
#[inline]
pub fn end(&self) -> u64 {
self.start + self.capacity
}
#[inline]
pub fn capacity(&self) -> u64 {
self.capacity
}
#[inline]
pub fn remaining(&self) -> u64 {
self.capacity - self.offset
}
#[inline]
pub fn offset(&self) -> u64 {
self.offset
}
#[inline]
pub fn push_offset(&mut self, amt: u64) {
self.offset += amt;
}
#[inline]
pub fn contains_ptr(&self, ptr: FilePointer) -> bool {
self.start <= ptr && ptr <= self.end()
}
pub fn write_metadata(&mut self) -> Result<()> {
let mut page = Page::default();
let header = StrandHeader::from(self);
header.write(&mut page)?;
self.write(0, &page[..])
}
pub fn read(&self, off: u64, buf: &mut [u8]) -> Result<()> {
let len = buf.len() as u64;
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.read_bytes += buf.len() as u64;
}
self.device.read(self.start + off, buf)
}
pub fn write(&self, off: u64, buf: &[u8]) -> Result<()> {
let len = buf.len() as u64;
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.written_bytes += buf.len() as u64;
}
self.device.write(self.start + off, buf)
}
#[allow(unused)]
pub fn trim(&self, off: u64, len: u64) -> Result<()> {
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.trimmed_bytes += len;
}
self.device.trim(self.start + off, len)
}
}
impl<'d> Drop for Strand<'d> {
fn
|
(&mut self) {
self.write_metadata().expect("Error writing metadata");
}
}
|
drop
|
identifier_name
|
strand.rs
|
/*
* strand.rs
*
* striking-db - Persistent key/value store for SSDs.
* Copyright (c) 2017 Maxwell Duzen, Ammon Smith
*
* striking-db is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 2 of
* the License, or (at your option) any later version.
*
* striking-db is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with striking-db. If not, see <http://www.gnu.org/licenses/>.
*
*/
use super::{PAGE_SIZE64, FilePointer, Result};
use buffer::Page;
use device::Device;
use parking_lot::Mutex;
use serial::StrandHeader;
use stats::Stats;
#[derive(Debug)]
pub struct Strand<'d> {
device: &'d Device,
id: u16,
start: u64,
capacity: u64,
offset: u64,
pub stats: Mutex<Stats>,
}
impl<'d> Strand<'d> {
pub fn new(
device: &'d Device,
id: u16,
start: u64,
capacity: u64,
read_strand: bool,
) -> Result<Self> {
assert_eq!(
start % PAGE_SIZE64,
0,
"Start is not a multiple of the page size"
);
assert_eq!(
capacity % PAGE_SIZE64,
0,
"Capacity is not a multiple of the page size"
);
assert!(
start + capacity <= device.capacity(),
"Strand extends off the boundary of the device"
);
assert!(capacity > PAGE_SIZE64, "Strand only one page long");
let offset = {
let mut page = Page::default();
if read_strand {
// Read existing header
device.read(0, &mut page[..])?;
let header = StrandHeader::read(&page)?;
header.get_offset()
} else
|
};
Ok(Strand {
device: device,
id: id,
start: start,
capacity: capacity,
offset: offset,
stats: Mutex::new(Stats::default()),
})
}
#[inline]
pub fn id(&self) -> u16 {
self.id
}
#[inline]
pub fn start(&self) -> u64 {
self.start
}
#[inline]
pub fn end(&self) -> u64 {
self.start + self.capacity
}
#[inline]
pub fn capacity(&self) -> u64 {
self.capacity
}
#[inline]
pub fn remaining(&self) -> u64 {
self.capacity - self.offset
}
#[inline]
pub fn offset(&self) -> u64 {
self.offset
}
#[inline]
pub fn push_offset(&mut self, amt: u64) {
self.offset += amt;
}
#[inline]
pub fn contains_ptr(&self, ptr: FilePointer) -> bool {
self.start <= ptr && ptr <= self.end()
}
pub fn write_metadata(&mut self) -> Result<()> {
let mut page = Page::default();
let header = StrandHeader::from(self);
header.write(&mut page)?;
self.write(0, &page[..])
}
pub fn read(&self, off: u64, buf: &mut [u8]) -> Result<()> {
let len = buf.len() as u64;
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.read_bytes += buf.len() as u64;
}
self.device.read(self.start + off, buf)
}
pub fn write(&self, off: u64, buf: &[u8]) -> Result<()> {
let len = buf.len() as u64;
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.written_bytes += buf.len() as u64;
}
self.device.write(self.start + off, buf)
}
#[allow(unused)]
pub fn trim(&self, off: u64, len: u64) -> Result<()> {
debug_assert!(off < self.capacity, "Offset is outside strand");
debug_assert!(
off + len <= self.start + self.capacity,
"Length outside of strand"
);
{
let mut stats = self.stats.lock();
stats.trimmed_bytes += len;
}
self.device.trim(self.start + off, len)
}
}
impl<'d> Drop for Strand<'d> {
fn drop(&mut self) {
self.write_metadata().expect("Error writing metadata");
}
}
|
{
// Format strand
let header = StrandHeader::new(id, capacity);
header.write(&mut page)?;
device.write(0, &page[..])?;
PAGE_SIZE64
}
|
conditional_block
|
transaction.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::errors;
use crate::statecallback::StateCallback;
use crate::transport::platformmonitor::Monitor;
use runloop::RunLoop;
use std::ffi::OsString;
pub struct Transaction {
// Handle to the thread loop.
thread: Option<RunLoop>,
}
impl Transaction {
pub fn new<F, T>(
timeout: u64,
callback: StateCallback<crate::Result<T>>,
new_device_cb: F,
) -> crate::Result<Self>
where
F: Fn(OsString, &dyn Fn() -> bool) + Sync + Send +'static,
T:'static,
{
let thread = RunLoop::new_with_timeout(
move |alive| {
// Create a new device monitor.
let mut monitor = Monitor::new(new_device_cb);
// Start polling for new devices.
try_or!(monitor.run(alive), |_| callback
.call(Err(errors::AuthenticatorError::Platform)));
// Send an error, if the callback wasn't called already.
callback.call(Err(errors::AuthenticatorError::U2FToken(
errors::U2FTokenError::NotAllowed,
)));
},
|
timeout,
)
.map_err(|_| errors::AuthenticatorError::Platform)?;
Ok(Self {
thread: Some(thread),
})
}
pub fn cancel(&mut self) {
// This must never be None.
self.thread.take().unwrap().cancel();
}
}
|
random_line_split
|
|
transaction.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::errors;
use crate::statecallback::StateCallback;
use crate::transport::platformmonitor::Monitor;
use runloop::RunLoop;
use std::ffi::OsString;
pub struct Transaction {
// Handle to the thread loop.
thread: Option<RunLoop>,
}
impl Transaction {
pub fn new<F, T>(
timeout: u64,
callback: StateCallback<crate::Result<T>>,
new_device_cb: F,
) -> crate::Result<Self>
where
F: Fn(OsString, &dyn Fn() -> bool) + Sync + Send +'static,
T:'static,
{
let thread = RunLoop::new_with_timeout(
move |alive| {
// Create a new device monitor.
let mut monitor = Monitor::new(new_device_cb);
// Start polling for new devices.
try_or!(monitor.run(alive), |_| callback
.call(Err(errors::AuthenticatorError::Platform)));
// Send an error, if the callback wasn't called already.
callback.call(Err(errors::AuthenticatorError::U2FToken(
errors::U2FTokenError::NotAllowed,
)));
},
timeout,
)
.map_err(|_| errors::AuthenticatorError::Platform)?;
Ok(Self {
thread: Some(thread),
})
}
pub fn
|
(&mut self) {
// This must never be None.
self.thread.take().unwrap().cancel();
}
}
|
cancel
|
identifier_name
|
prelude.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
//! The I/O Prelude
//!
//! The purpose of this module is to alleviate imports of many common I/O traits
//! by adding a glob import to the top of I/O heavy modules:
//!
//! ```
//! # #![allow(unused_imports)]
//! use std::io::prelude::*;
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
pub use super::{Read, Write, BufRead, Seek};
pub use fs::PathExt;
|
random_line_split
|
|
main.rs
|
use std::io::{self, Read, Write};
use std::env;
fn main() {
let content_length = env::var("CONTENT_LENGTH").unwrap_or("0".into())
.parse::<u64>().expect("Error parsing CONTENT_LENGTH");
let status = match handle(content_length) {
Ok(_) => 0,
Err(_) => 1,
};
::std::process::exit(status);
}
fn
|
(content_length: u64) -> io::Result<()> {
let mut buffer = Vec::new();
io::stdin().take(content_length).read_to_end(&mut buffer)?;
println!("Content-Type: text/html");
println!();
println!("<p>Hello, world!</p>");
println!("<ul>");
for (key, value) in ::std::env::vars() {
println!("<li>{}: {}</li>", key, value);
}
println!("</ul>");
println!("<p>");
io::stdout().write(&buffer[..])?;
println!("</p>");
Ok(())
}
|
handle
|
identifier_name
|
main.rs
|
use std::io::{self, Read, Write};
use std::env;
fn main() {
let content_length = env::var("CONTENT_LENGTH").unwrap_or("0".into())
.parse::<u64>().expect("Error parsing CONTENT_LENGTH");
let status = match handle(content_length) {
Ok(_) => 0,
Err(_) => 1,
};
::std::process::exit(status);
}
fn handle(content_length: u64) -> io::Result<()> {
let mut buffer = Vec::new();
io::stdin().take(content_length).read_to_end(&mut buffer)?;
println!("Content-Type: text/html");
|
println!("<ul>");
for (key, value) in ::std::env::vars() {
println!("<li>{}: {}</li>", key, value);
}
println!("</ul>");
println!("<p>");
io::stdout().write(&buffer[..])?;
println!("</p>");
Ok(())
}
|
println!();
println!("<p>Hello, world!</p>");
|
random_line_split
|
main.rs
|
use std::io::{self, Read, Write};
use std::env;
fn main()
|
fn handle(content_length: u64) -> io::Result<()> {
let mut buffer = Vec::new();
io::stdin().take(content_length).read_to_end(&mut buffer)?;
println!("Content-Type: text/html");
println!();
println!("<p>Hello, world!</p>");
println!("<ul>");
for (key, value) in ::std::env::vars() {
println!("<li>{}: {}</li>", key, value);
}
println!("</ul>");
println!("<p>");
io::stdout().write(&buffer[..])?;
println!("</p>");
Ok(())
}
|
{
let content_length = env::var("CONTENT_LENGTH").unwrap_or("0".into())
.parse::<u64>().expect("Error parsing CONTENT_LENGTH");
let status = match handle(content_length) {
Ok(_) => 0,
Err(_) => 1,
};
::std::process::exit(status);
}
|
identifier_body
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern mod freetype;
use font::{FontHandleMethods, FontMetrics, FontTableMethods};
use font::{FontTableTag, FractionalPixel, SpecifiedFontStyle, UsedFontStyle};
use servo_util::geometry::Au;
use servo_util::geometry;
use platform::font_context::FontContextHandle;
use text::glyph::GlyphIndex;
use text::util::{float_to_fixed, fixed_to_float};
use style::computed_values::font_weight;
use freetype::freetype::{FT_Get_Char_Index, FT_Get_Postscript_Name};
use freetype::freetype::{FT_Load_Glyph, FT_Set_Char_Size};
use freetype::freetype::{FT_New_Face, FT_Get_Sfnt_Table};
use freetype::freetype::{FT_New_Memory_Face, FT_Done_Face};
use freetype::freetype::{FTErrorMethods, FT_F26Dot6, FT_Face, FT_FaceRec};
use freetype::freetype::{FT_GlyphSlot, FT_Library, FT_Long, FT_ULong};
use freetype::freetype::{FT_STYLE_FLAG_ITALIC, FT_STYLE_FLAG_BOLD};
use freetype::freetype::{FT_SizeRec, FT_UInt, FT_Size_Metrics};
use freetype::freetype::{ft_sfnt_os2};
use freetype::tt_os2::TT_OS2;
use std::cast;
use std::ptr;
use std::str;
fn float_to_fixed_ft(f: f64) -> i32 {
float_to_fixed(6, f)
}
fn fixed_to_float_ft(f: i32) -> f64 {
fixed_to_float(6, f)
}
pub struct FontTable {
bogus: ()
}
impl FontTableMethods for FontTable {
fn with_buffer(&self, _blk: |*u8, uint|) {
fail!()
}
}
enum FontSource {
FontSourceMem(~[u8]),
FontSourceFile(~str)
}
pub struct FontHandle {
// The font binary. This must stay valid for the lifetime of the font,
// if the font is created using FT_Memory_Face.
source: FontSource,
face: FT_Face,
handle: FontContextHandle
}
#[unsafe_destructor]
impl Drop for FontHandle {
fn drop(&mut self) {
assert!(self.face.is_not_null());
unsafe {
if!FT_Done_Face(self.face).succeeded() {
fail!(~"FT_Done_Face failed");
}
}
}
}
impl FontHandleMethods for FontHandle {
fn new_from_buffer(fctx: &FontContextHandle,
buf: ~[u8],
style: &SpecifiedFontStyle)
-> Result<FontHandle, ()> {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let face_result = create_face_from_buffer(ft_ctx, buf.as_ptr(), buf.len(), style.pt_size);
// TODO: this could be more simply written as result::chain
// and moving buf into the struct ctor, but cant' move out of
// captured binding.
return match face_result {
Ok(face) => {
let handle = FontHandle {
face: face,
source: FontSourceMem(buf),
handle: fctx.clone()
};
Ok(handle)
}
Err(()) => Err(())
};
fn create_face_from_buffer(lib: FT_Library, cbuf: *u8, cbuflen: uint, pt_size: f64)
-> Result<FT_Face, ()> {
unsafe {
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
let result = FT_New_Memory_Face(lib, cbuf, cbuflen as FT_Long,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
if!result.succeeded() || face.is_null() {
return Err(());
}
if FontHandle::set_char_size(face, pt_size).is_ok() {
Ok(face)
} else {
Err(())
}
}
}
}
// an identifier usable by FontContextHandle to recreate this FontHandle.
fn face_identifier(&self) -> ~str {
/* FT_Get_Postscript_Name seems like a better choice here, but it
doesn't give usable results for fontconfig when deserializing. */
unsafe { str::raw::from_c_str((*self.face).family_name) }
}
fn family_name(&self) -> ~str {
unsafe { str::raw::from_c_str((*self.face).family_name) }
}
fn face_name(&self) -> ~str {
unsafe { str::raw::from_c_str(FT_Get_Postscript_Name(self.face)) }
}
fn is_italic(&self) -> bool {
unsafe { (*self.face).style_flags & FT_STYLE_FLAG_ITALIC!= 0 }
}
fn boldness(&self) -> font_weight::T {
let default_weight = font_weight::Weight400;
if unsafe { (*self.face).style_flags & FT_STYLE_FLAG_BOLD == 0 } {
default_weight
} else {
unsafe {
let os2 = FT_Get_Sfnt_Table(self.face, ft_sfnt_os2) as *TT_OS2;
let valid = os2.is_not_null() && (*os2).version!= 0xffff;
if valid {
let weight =(*os2).usWeightClass;
match weight {
1 | 100..199 => font_weight::Weight100,
2 | 200..299 => font_weight::Weight200,
3 | 300..399 => font_weight::Weight300,
4 | 400..499 => font_weight::Weight400,
5 | 500..599 => font_weight::Weight500,
6 | 600..699 => font_weight::Weight600,
7 | 700..799 => font_weight::Weight700,
8 | 800..899 => font_weight::Weight800,
9 | 900..999 => font_weight::Weight900,
_ => default_weight
}
} else {
default_weight
}
}
}
}
fn clone_with_style(&self,
fctx: &FontContextHandle,
style: &UsedFontStyle) -> Result<FontHandle, ()> {
match self.source {
FontSourceMem(ref buf) => {
FontHandleMethods::new_from_buffer(fctx, buf.clone(), style)
}
FontSourceFile(ref file) => {
FontHandle::new_from_file(fctx, (*file).clone(), style)
}
}
}
fn glyph_index(&self,
codepoint: char) -> Option<GlyphIndex> {
assert!(self.face.is_not_null());
unsafe {
let idx = FT_Get_Char_Index(self.face, codepoint as FT_ULong);
return if idx!= 0 as FT_UInt {
Some(idx as GlyphIndex)
} else {
debug!("Invalid codepoint: {}", codepoint);
None
};
}
}
fn glyph_h_advance(&self,
glyph: GlyphIndex) -> Option<FractionalPixel> {
assert!(self.face.is_not_null());
unsafe {
let res = FT_Load_Glyph(self.face, glyph as FT_UInt, 0);
if res.succeeded() {
let void_glyph = (*self.face).glyph;
let slot: FT_GlyphSlot = cast::transmute(void_glyph);
assert!(slot.is_not_null());
debug!("metrics: {:?}", (*slot).metrics);
let advance = (*slot).metrics.horiAdvance;
debug!("h_advance for {} is {}", glyph, advance);
let advance = advance as i32;
return Some(fixed_to_float_ft(advance) as FractionalPixel);
} else {
debug!("Unable to load glyph {}. reason: {}", glyph, res);
return None;
}
}
}
fn get_metrics(&self) -> FontMetrics {
/* TODO(Issue #76): complete me */
let face = self.get_face_rec();
let underline_size = self.font_units_to_au(face.underline_thickness as f64);
let underline_offset = self.font_units_to_au(face.underline_position as f64);
let em_size = self.font_units_to_au(face.units_per_EM as f64);
let ascent = self.font_units_to_au(face.ascender as f64);
let descent = self.font_units_to_au(face.descender as f64);
let max_advance = self.font_units_to_au(face.max_advance_width as f64);
// 'leading' is supposed to be the vertical distance between two baselines,
// reflected by the height attibute in freetype. On OS X (w/ CTFont),
// leading represents the distance between the bottom of a line descent to
// the top of the next line's ascent or: (line_height - ascent - descent),
// see http://stackoverflow.com/a/5635981 for CTFont implementation.
// Convert using a formular similar to what CTFont returns for consistency.
let height = self.font_units_to_au(face.height as f64);
let leading = height - (ascent + descent);
let mut strikeout_size = geometry::from_pt(0.0);
let mut strikeout_offset = geometry::from_pt(0.0);
let mut x_height = geometry::from_pt(0.0);
unsafe {
let os2 = FT_Get_Sfnt_Table(face, ft_sfnt_os2) as *TT_OS2;
let valid = os2.is_not_null() && (*os2).version!= 0xffff;
if valid {
strikeout_size = self.font_units_to_au((*os2).yStrikeoutSize as f64);
strikeout_offset = self.font_units_to_au((*os2).yStrikeoutPosition as f64);
x_height = self.font_units_to_au((*os2).sxHeight as f64);
}
}
let metrics = FontMetrics {
underline_size: underline_size,
underline_offset: underline_offset,
strikeout_size: strikeout_size,
strikeout_offset: strikeout_offset,
leading: leading,
x_height: x_height,
em_size: em_size,
ascent: ascent,
descent: -descent, // linux font's seem to use the opposite sign from mac
max_advance: max_advance
};
debug!("Font metrics (@{:f} pt): {:?}", geometry::to_pt(em_size), metrics);
return metrics;
}
fn get_table_for_tag(&self, _: FontTableTag) -> Option<FontTable> {
None
}
}
impl<'a> FontHandle {
fn set_char_size(face: FT_Face, pt_size: f64) -> Result<(), ()>{
let char_width = float_to_fixed_ft(pt_size) as FT_F26Dot6;
let char_height = float_to_fixed_ft(pt_size) as FT_F26Dot6;
let h_dpi = 72;
let v_dpi = 72;
unsafe {
let result = FT_Set_Char_Size(face, char_width, char_height, h_dpi, v_dpi);
if result.succeeded() { Ok(()) } else { Err(()) }
}
}
pub fn new_from_file(fctx: &FontContextHandle, file: &str,
style: &SpecifiedFontStyle) -> Result<FontHandle, ()> {
unsafe {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null()
|
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
file.to_c_str().with_ref(|file_str| {
FT_New_Face(ft_ctx, file_str,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
});
if face.is_null() {
return Err(());
}
if FontHandle::set_char_size(face, style.pt_size).is_ok() {
Ok(FontHandle {
source: FontSourceFile(file.to_str()),
face: face,
handle: fctx.clone()
})
} else {
Err(())
}
}
}
pub fn new_from_file_unstyled(fctx: &FontContextHandle, file: ~str)
-> Result<FontHandle, ()> {
unsafe {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
file.to_c_str().with_ref(|file_str| {
FT_New_Face(ft_ctx, file_str,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
});
if face.is_null() {
return Err(());
}
Ok(FontHandle {
source: FontSourceFile(file),
face: face,
handle: fctx.clone()
})
}
}
fn get_face_rec(&'a self) -> &'a FT_FaceRec {
unsafe {
&(*self.face)
}
}
fn font_units_to_au(&self, value: f64) -> Au {
let face = self.get_face_rec();
// face.size is a *c_void in the bindings, presumably to avoid
// recursive structural types
let size: &FT_SizeRec = unsafe { cast::transmute(&(*face.size)) };
let metrics: &FT_Size_Metrics = &(*size).metrics;
let em_size = face.units_per_EM as f64;
let x_scale = (metrics.x_ppem as f64) / em_size as f64;
// If this isn't true then we're scaling one of the axes wrong
assert!(metrics.x_ppem == metrics.y_ppem);
return geometry::from_frac_px(value * x_scale);
}
}
|
{ return Err(()); }
|
conditional_block
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern mod freetype;
use font::{FontHandleMethods, FontMetrics, FontTableMethods};
use font::{FontTableTag, FractionalPixel, SpecifiedFontStyle, UsedFontStyle};
use servo_util::geometry::Au;
use servo_util::geometry;
use platform::font_context::FontContextHandle;
use text::glyph::GlyphIndex;
use text::util::{float_to_fixed, fixed_to_float};
use style::computed_values::font_weight;
use freetype::freetype::{FT_Get_Char_Index, FT_Get_Postscript_Name};
use freetype::freetype::{FT_Load_Glyph, FT_Set_Char_Size};
use freetype::freetype::{FT_New_Face, FT_Get_Sfnt_Table};
use freetype::freetype::{FT_New_Memory_Face, FT_Done_Face};
use freetype::freetype::{FTErrorMethods, FT_F26Dot6, FT_Face, FT_FaceRec};
use freetype::freetype::{FT_GlyphSlot, FT_Library, FT_Long, FT_ULong};
use freetype::freetype::{FT_STYLE_FLAG_ITALIC, FT_STYLE_FLAG_BOLD};
use freetype::freetype::{FT_SizeRec, FT_UInt, FT_Size_Metrics};
use freetype::freetype::{ft_sfnt_os2};
use freetype::tt_os2::TT_OS2;
use std::cast;
use std::ptr;
use std::str;
fn float_to_fixed_ft(f: f64) -> i32 {
float_to_fixed(6, f)
}
fn fixed_to_float_ft(f: i32) -> f64 {
fixed_to_float(6, f)
}
pub struct FontTable {
bogus: ()
}
impl FontTableMethods for FontTable {
fn with_buffer(&self, _blk: |*u8, uint|) {
fail!()
}
}
enum FontSource {
FontSourceMem(~[u8]),
FontSourceFile(~str)
}
pub struct FontHandle {
// The font binary. This must stay valid for the lifetime of the font,
// if the font is created using FT_Memory_Face.
source: FontSource,
face: FT_Face,
handle: FontContextHandle
}
#[unsafe_destructor]
impl Drop for FontHandle {
fn drop(&mut self) {
assert!(self.face.is_not_null());
unsafe {
if!FT_Done_Face(self.face).succeeded() {
fail!(~"FT_Done_Face failed");
}
}
}
}
impl FontHandleMethods for FontHandle {
fn new_from_buffer(fctx: &FontContextHandle,
buf: ~[u8],
style: &SpecifiedFontStyle)
-> Result<FontHandle, ()> {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let face_result = create_face_from_buffer(ft_ctx, buf.as_ptr(), buf.len(), style.pt_size);
// TODO: this could be more simply written as result::chain
// and moving buf into the struct ctor, but cant' move out of
// captured binding.
return match face_result {
Ok(face) => {
let handle = FontHandle {
face: face,
source: FontSourceMem(buf),
handle: fctx.clone()
};
Ok(handle)
}
Err(()) => Err(())
};
fn create_face_from_buffer(lib: FT_Library, cbuf: *u8, cbuflen: uint, pt_size: f64)
-> Result<FT_Face, ()> {
unsafe {
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
let result = FT_New_Memory_Face(lib, cbuf, cbuflen as FT_Long,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
if!result.succeeded() || face.is_null() {
return Err(());
}
if FontHandle::set_char_size(face, pt_size).is_ok() {
Ok(face)
} else {
Err(())
}
}
}
}
// an identifier usable by FontContextHandle to recreate this FontHandle.
fn face_identifier(&self) -> ~str {
/* FT_Get_Postscript_Name seems like a better choice here, but it
doesn't give usable results for fontconfig when deserializing. */
unsafe { str::raw::from_c_str((*self.face).family_name) }
}
fn family_name(&self) -> ~str {
unsafe { str::raw::from_c_str((*self.face).family_name) }
}
fn face_name(&self) -> ~str {
unsafe { str::raw::from_c_str(FT_Get_Postscript_Name(self.face)) }
}
fn
|
(&self) -> bool {
unsafe { (*self.face).style_flags & FT_STYLE_FLAG_ITALIC!= 0 }
}
fn boldness(&self) -> font_weight::T {
let default_weight = font_weight::Weight400;
if unsafe { (*self.face).style_flags & FT_STYLE_FLAG_BOLD == 0 } {
default_weight
} else {
unsafe {
let os2 = FT_Get_Sfnt_Table(self.face, ft_sfnt_os2) as *TT_OS2;
let valid = os2.is_not_null() && (*os2).version!= 0xffff;
if valid {
let weight =(*os2).usWeightClass;
match weight {
1 | 100..199 => font_weight::Weight100,
2 | 200..299 => font_weight::Weight200,
3 | 300..399 => font_weight::Weight300,
4 | 400..499 => font_weight::Weight400,
5 | 500..599 => font_weight::Weight500,
6 | 600..699 => font_weight::Weight600,
7 | 700..799 => font_weight::Weight700,
8 | 800..899 => font_weight::Weight800,
9 | 900..999 => font_weight::Weight900,
_ => default_weight
}
} else {
default_weight
}
}
}
}
fn clone_with_style(&self,
fctx: &FontContextHandle,
style: &UsedFontStyle) -> Result<FontHandle, ()> {
match self.source {
FontSourceMem(ref buf) => {
FontHandleMethods::new_from_buffer(fctx, buf.clone(), style)
}
FontSourceFile(ref file) => {
FontHandle::new_from_file(fctx, (*file).clone(), style)
}
}
}
fn glyph_index(&self,
codepoint: char) -> Option<GlyphIndex> {
assert!(self.face.is_not_null());
unsafe {
let idx = FT_Get_Char_Index(self.face, codepoint as FT_ULong);
return if idx!= 0 as FT_UInt {
Some(idx as GlyphIndex)
} else {
debug!("Invalid codepoint: {}", codepoint);
None
};
}
}
fn glyph_h_advance(&self,
glyph: GlyphIndex) -> Option<FractionalPixel> {
assert!(self.face.is_not_null());
unsafe {
let res = FT_Load_Glyph(self.face, glyph as FT_UInt, 0);
if res.succeeded() {
let void_glyph = (*self.face).glyph;
let slot: FT_GlyphSlot = cast::transmute(void_glyph);
assert!(slot.is_not_null());
debug!("metrics: {:?}", (*slot).metrics);
let advance = (*slot).metrics.horiAdvance;
debug!("h_advance for {} is {}", glyph, advance);
let advance = advance as i32;
return Some(fixed_to_float_ft(advance) as FractionalPixel);
} else {
debug!("Unable to load glyph {}. reason: {}", glyph, res);
return None;
}
}
}
fn get_metrics(&self) -> FontMetrics {
/* TODO(Issue #76): complete me */
let face = self.get_face_rec();
let underline_size = self.font_units_to_au(face.underline_thickness as f64);
let underline_offset = self.font_units_to_au(face.underline_position as f64);
let em_size = self.font_units_to_au(face.units_per_EM as f64);
let ascent = self.font_units_to_au(face.ascender as f64);
let descent = self.font_units_to_au(face.descender as f64);
let max_advance = self.font_units_to_au(face.max_advance_width as f64);
// 'leading' is supposed to be the vertical distance between two baselines,
// reflected by the height attibute in freetype. On OS X (w/ CTFont),
// leading represents the distance between the bottom of a line descent to
// the top of the next line's ascent or: (line_height - ascent - descent),
// see http://stackoverflow.com/a/5635981 for CTFont implementation.
// Convert using a formular similar to what CTFont returns for consistency.
let height = self.font_units_to_au(face.height as f64);
let leading = height - (ascent + descent);
let mut strikeout_size = geometry::from_pt(0.0);
let mut strikeout_offset = geometry::from_pt(0.0);
let mut x_height = geometry::from_pt(0.0);
unsafe {
let os2 = FT_Get_Sfnt_Table(face, ft_sfnt_os2) as *TT_OS2;
let valid = os2.is_not_null() && (*os2).version!= 0xffff;
if valid {
strikeout_size = self.font_units_to_au((*os2).yStrikeoutSize as f64);
strikeout_offset = self.font_units_to_au((*os2).yStrikeoutPosition as f64);
x_height = self.font_units_to_au((*os2).sxHeight as f64);
}
}
let metrics = FontMetrics {
underline_size: underline_size,
underline_offset: underline_offset,
strikeout_size: strikeout_size,
strikeout_offset: strikeout_offset,
leading: leading,
x_height: x_height,
em_size: em_size,
ascent: ascent,
descent: -descent, // linux font's seem to use the opposite sign from mac
max_advance: max_advance
};
debug!("Font metrics (@{:f} pt): {:?}", geometry::to_pt(em_size), metrics);
return metrics;
}
fn get_table_for_tag(&self, _: FontTableTag) -> Option<FontTable> {
None
}
}
impl<'a> FontHandle {
fn set_char_size(face: FT_Face, pt_size: f64) -> Result<(), ()>{
let char_width = float_to_fixed_ft(pt_size) as FT_F26Dot6;
let char_height = float_to_fixed_ft(pt_size) as FT_F26Dot6;
let h_dpi = 72;
let v_dpi = 72;
unsafe {
let result = FT_Set_Char_Size(face, char_width, char_height, h_dpi, v_dpi);
if result.succeeded() { Ok(()) } else { Err(()) }
}
}
pub fn new_from_file(fctx: &FontContextHandle, file: &str,
style: &SpecifiedFontStyle) -> Result<FontHandle, ()> {
unsafe {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
file.to_c_str().with_ref(|file_str| {
FT_New_Face(ft_ctx, file_str,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
});
if face.is_null() {
return Err(());
}
if FontHandle::set_char_size(face, style.pt_size).is_ok() {
Ok(FontHandle {
source: FontSourceFile(file.to_str()),
face: face,
handle: fctx.clone()
})
} else {
Err(())
}
}
}
pub fn new_from_file_unstyled(fctx: &FontContextHandle, file: ~str)
-> Result<FontHandle, ()> {
unsafe {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
file.to_c_str().with_ref(|file_str| {
FT_New_Face(ft_ctx, file_str,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
});
if face.is_null() {
return Err(());
}
Ok(FontHandle {
source: FontSourceFile(file),
face: face,
handle: fctx.clone()
})
}
}
fn get_face_rec(&'a self) -> &'a FT_FaceRec {
unsafe {
&(*self.face)
}
}
fn font_units_to_au(&self, value: f64) -> Au {
let face = self.get_face_rec();
// face.size is a *c_void in the bindings, presumably to avoid
// recursive structural types
let size: &FT_SizeRec = unsafe { cast::transmute(&(*face.size)) };
let metrics: &FT_Size_Metrics = &(*size).metrics;
let em_size = face.units_per_EM as f64;
let x_scale = (metrics.x_ppem as f64) / em_size as f64;
// If this isn't true then we're scaling one of the axes wrong
assert!(metrics.x_ppem == metrics.y_ppem);
return geometry::from_frac_px(value * x_scale);
}
}
|
is_italic
|
identifier_name
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern mod freetype;
use font::{FontHandleMethods, FontMetrics, FontTableMethods};
use font::{FontTableTag, FractionalPixel, SpecifiedFontStyle, UsedFontStyle};
use servo_util::geometry::Au;
use servo_util::geometry;
use platform::font_context::FontContextHandle;
use text::glyph::GlyphIndex;
use text::util::{float_to_fixed, fixed_to_float};
use style::computed_values::font_weight;
use freetype::freetype::{FT_Get_Char_Index, FT_Get_Postscript_Name};
use freetype::freetype::{FT_Load_Glyph, FT_Set_Char_Size};
use freetype::freetype::{FT_New_Face, FT_Get_Sfnt_Table};
use freetype::freetype::{FT_New_Memory_Face, FT_Done_Face};
use freetype::freetype::{FTErrorMethods, FT_F26Dot6, FT_Face, FT_FaceRec};
use freetype::freetype::{FT_GlyphSlot, FT_Library, FT_Long, FT_ULong};
use freetype::freetype::{FT_STYLE_FLAG_ITALIC, FT_STYLE_FLAG_BOLD};
use freetype::freetype::{FT_SizeRec, FT_UInt, FT_Size_Metrics};
use freetype::freetype::{ft_sfnt_os2};
use freetype::tt_os2::TT_OS2;
use std::cast;
|
float_to_fixed(6, f)
}
fn fixed_to_float_ft(f: i32) -> f64 {
fixed_to_float(6, f)
}
pub struct FontTable {
bogus: ()
}
impl FontTableMethods for FontTable {
fn with_buffer(&self, _blk: |*u8, uint|) {
fail!()
}
}
enum FontSource {
FontSourceMem(~[u8]),
FontSourceFile(~str)
}
pub struct FontHandle {
// The font binary. This must stay valid for the lifetime of the font,
// if the font is created using FT_Memory_Face.
source: FontSource,
face: FT_Face,
handle: FontContextHandle
}
#[unsafe_destructor]
impl Drop for FontHandle {
fn drop(&mut self) {
assert!(self.face.is_not_null());
unsafe {
if!FT_Done_Face(self.face).succeeded() {
fail!(~"FT_Done_Face failed");
}
}
}
}
impl FontHandleMethods for FontHandle {
fn new_from_buffer(fctx: &FontContextHandle,
buf: ~[u8],
style: &SpecifiedFontStyle)
-> Result<FontHandle, ()> {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let face_result = create_face_from_buffer(ft_ctx, buf.as_ptr(), buf.len(), style.pt_size);
// TODO: this could be more simply written as result::chain
// and moving buf into the struct ctor, but cant' move out of
// captured binding.
return match face_result {
Ok(face) => {
let handle = FontHandle {
face: face,
source: FontSourceMem(buf),
handle: fctx.clone()
};
Ok(handle)
}
Err(()) => Err(())
};
fn create_face_from_buffer(lib: FT_Library, cbuf: *u8, cbuflen: uint, pt_size: f64)
-> Result<FT_Face, ()> {
unsafe {
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
let result = FT_New_Memory_Face(lib, cbuf, cbuflen as FT_Long,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
if!result.succeeded() || face.is_null() {
return Err(());
}
if FontHandle::set_char_size(face, pt_size).is_ok() {
Ok(face)
} else {
Err(())
}
}
}
}
// an identifier usable by FontContextHandle to recreate this FontHandle.
fn face_identifier(&self) -> ~str {
/* FT_Get_Postscript_Name seems like a better choice here, but it
doesn't give usable results for fontconfig when deserializing. */
unsafe { str::raw::from_c_str((*self.face).family_name) }
}
fn family_name(&self) -> ~str {
unsafe { str::raw::from_c_str((*self.face).family_name) }
}
fn face_name(&self) -> ~str {
unsafe { str::raw::from_c_str(FT_Get_Postscript_Name(self.face)) }
}
fn is_italic(&self) -> bool {
unsafe { (*self.face).style_flags & FT_STYLE_FLAG_ITALIC!= 0 }
}
fn boldness(&self) -> font_weight::T {
let default_weight = font_weight::Weight400;
if unsafe { (*self.face).style_flags & FT_STYLE_FLAG_BOLD == 0 } {
default_weight
} else {
unsafe {
let os2 = FT_Get_Sfnt_Table(self.face, ft_sfnt_os2) as *TT_OS2;
let valid = os2.is_not_null() && (*os2).version!= 0xffff;
if valid {
let weight =(*os2).usWeightClass;
match weight {
1 | 100..199 => font_weight::Weight100,
2 | 200..299 => font_weight::Weight200,
3 | 300..399 => font_weight::Weight300,
4 | 400..499 => font_weight::Weight400,
5 | 500..599 => font_weight::Weight500,
6 | 600..699 => font_weight::Weight600,
7 | 700..799 => font_weight::Weight700,
8 | 800..899 => font_weight::Weight800,
9 | 900..999 => font_weight::Weight900,
_ => default_weight
}
} else {
default_weight
}
}
}
}
fn clone_with_style(&self,
fctx: &FontContextHandle,
style: &UsedFontStyle) -> Result<FontHandle, ()> {
match self.source {
FontSourceMem(ref buf) => {
FontHandleMethods::new_from_buffer(fctx, buf.clone(), style)
}
FontSourceFile(ref file) => {
FontHandle::new_from_file(fctx, (*file).clone(), style)
}
}
}
fn glyph_index(&self,
codepoint: char) -> Option<GlyphIndex> {
assert!(self.face.is_not_null());
unsafe {
let idx = FT_Get_Char_Index(self.face, codepoint as FT_ULong);
return if idx!= 0 as FT_UInt {
Some(idx as GlyphIndex)
} else {
debug!("Invalid codepoint: {}", codepoint);
None
};
}
}
fn glyph_h_advance(&self,
glyph: GlyphIndex) -> Option<FractionalPixel> {
assert!(self.face.is_not_null());
unsafe {
let res = FT_Load_Glyph(self.face, glyph as FT_UInt, 0);
if res.succeeded() {
let void_glyph = (*self.face).glyph;
let slot: FT_GlyphSlot = cast::transmute(void_glyph);
assert!(slot.is_not_null());
debug!("metrics: {:?}", (*slot).metrics);
let advance = (*slot).metrics.horiAdvance;
debug!("h_advance for {} is {}", glyph, advance);
let advance = advance as i32;
return Some(fixed_to_float_ft(advance) as FractionalPixel);
} else {
debug!("Unable to load glyph {}. reason: {}", glyph, res);
return None;
}
}
}
fn get_metrics(&self) -> FontMetrics {
/* TODO(Issue #76): complete me */
let face = self.get_face_rec();
let underline_size = self.font_units_to_au(face.underline_thickness as f64);
let underline_offset = self.font_units_to_au(face.underline_position as f64);
let em_size = self.font_units_to_au(face.units_per_EM as f64);
let ascent = self.font_units_to_au(face.ascender as f64);
let descent = self.font_units_to_au(face.descender as f64);
let max_advance = self.font_units_to_au(face.max_advance_width as f64);
// 'leading' is supposed to be the vertical distance between two baselines,
// reflected by the height attibute in freetype. On OS X (w/ CTFont),
// leading represents the distance between the bottom of a line descent to
// the top of the next line's ascent or: (line_height - ascent - descent),
// see http://stackoverflow.com/a/5635981 for CTFont implementation.
// Convert using a formular similar to what CTFont returns for consistency.
let height = self.font_units_to_au(face.height as f64);
let leading = height - (ascent + descent);
let mut strikeout_size = geometry::from_pt(0.0);
let mut strikeout_offset = geometry::from_pt(0.0);
let mut x_height = geometry::from_pt(0.0);
unsafe {
let os2 = FT_Get_Sfnt_Table(face, ft_sfnt_os2) as *TT_OS2;
let valid = os2.is_not_null() && (*os2).version!= 0xffff;
if valid {
strikeout_size = self.font_units_to_au((*os2).yStrikeoutSize as f64);
strikeout_offset = self.font_units_to_au((*os2).yStrikeoutPosition as f64);
x_height = self.font_units_to_au((*os2).sxHeight as f64);
}
}
let metrics = FontMetrics {
underline_size: underline_size,
underline_offset: underline_offset,
strikeout_size: strikeout_size,
strikeout_offset: strikeout_offset,
leading: leading,
x_height: x_height,
em_size: em_size,
ascent: ascent,
descent: -descent, // linux font's seem to use the opposite sign from mac
max_advance: max_advance
};
debug!("Font metrics (@{:f} pt): {:?}", geometry::to_pt(em_size), metrics);
return metrics;
}
fn get_table_for_tag(&self, _: FontTableTag) -> Option<FontTable> {
None
}
}
impl<'a> FontHandle {
fn set_char_size(face: FT_Face, pt_size: f64) -> Result<(), ()>{
let char_width = float_to_fixed_ft(pt_size) as FT_F26Dot6;
let char_height = float_to_fixed_ft(pt_size) as FT_F26Dot6;
let h_dpi = 72;
let v_dpi = 72;
unsafe {
let result = FT_Set_Char_Size(face, char_width, char_height, h_dpi, v_dpi);
if result.succeeded() { Ok(()) } else { Err(()) }
}
}
pub fn new_from_file(fctx: &FontContextHandle, file: &str,
style: &SpecifiedFontStyle) -> Result<FontHandle, ()> {
unsafe {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
file.to_c_str().with_ref(|file_str| {
FT_New_Face(ft_ctx, file_str,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
});
if face.is_null() {
return Err(());
}
if FontHandle::set_char_size(face, style.pt_size).is_ok() {
Ok(FontHandle {
source: FontSourceFile(file.to_str()),
face: face,
handle: fctx.clone()
})
} else {
Err(())
}
}
}
pub fn new_from_file_unstyled(fctx: &FontContextHandle, file: ~str)
-> Result<FontHandle, ()> {
unsafe {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
file.to_c_str().with_ref(|file_str| {
FT_New_Face(ft_ctx, file_str,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
});
if face.is_null() {
return Err(());
}
Ok(FontHandle {
source: FontSourceFile(file),
face: face,
handle: fctx.clone()
})
}
}
fn get_face_rec(&'a self) -> &'a FT_FaceRec {
unsafe {
&(*self.face)
}
}
fn font_units_to_au(&self, value: f64) -> Au {
let face = self.get_face_rec();
// face.size is a *c_void in the bindings, presumably to avoid
// recursive structural types
let size: &FT_SizeRec = unsafe { cast::transmute(&(*face.size)) };
let metrics: &FT_Size_Metrics = &(*size).metrics;
let em_size = face.units_per_EM as f64;
let x_scale = (metrics.x_ppem as f64) / em_size as f64;
// If this isn't true then we're scaling one of the axes wrong
assert!(metrics.x_ppem == metrics.y_ppem);
return geometry::from_frac_px(value * x_scale);
}
}
|
use std::ptr;
use std::str;
fn float_to_fixed_ft(f: f64) -> i32 {
|
random_line_split
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern mod freetype;
use font::{FontHandleMethods, FontMetrics, FontTableMethods};
use font::{FontTableTag, FractionalPixel, SpecifiedFontStyle, UsedFontStyle};
use servo_util::geometry::Au;
use servo_util::geometry;
use platform::font_context::FontContextHandle;
use text::glyph::GlyphIndex;
use text::util::{float_to_fixed, fixed_to_float};
use style::computed_values::font_weight;
use freetype::freetype::{FT_Get_Char_Index, FT_Get_Postscript_Name};
use freetype::freetype::{FT_Load_Glyph, FT_Set_Char_Size};
use freetype::freetype::{FT_New_Face, FT_Get_Sfnt_Table};
use freetype::freetype::{FT_New_Memory_Face, FT_Done_Face};
use freetype::freetype::{FTErrorMethods, FT_F26Dot6, FT_Face, FT_FaceRec};
use freetype::freetype::{FT_GlyphSlot, FT_Library, FT_Long, FT_ULong};
use freetype::freetype::{FT_STYLE_FLAG_ITALIC, FT_STYLE_FLAG_BOLD};
use freetype::freetype::{FT_SizeRec, FT_UInt, FT_Size_Metrics};
use freetype::freetype::{ft_sfnt_os2};
use freetype::tt_os2::TT_OS2;
use std::cast;
use std::ptr;
use std::str;
fn float_to_fixed_ft(f: f64) -> i32 {
float_to_fixed(6, f)
}
fn fixed_to_float_ft(f: i32) -> f64 {
fixed_to_float(6, f)
}
pub struct FontTable {
bogus: ()
}
impl FontTableMethods for FontTable {
fn with_buffer(&self, _blk: |*u8, uint|) {
fail!()
}
}
enum FontSource {
FontSourceMem(~[u8]),
FontSourceFile(~str)
}
pub struct FontHandle {
// The font binary. This must stay valid for the lifetime of the font,
// if the font is created using FT_Memory_Face.
source: FontSource,
face: FT_Face,
handle: FontContextHandle
}
#[unsafe_destructor]
impl Drop for FontHandle {
fn drop(&mut self) {
assert!(self.face.is_not_null());
unsafe {
if!FT_Done_Face(self.face).succeeded() {
fail!(~"FT_Done_Face failed");
}
}
}
}
impl FontHandleMethods for FontHandle {
fn new_from_buffer(fctx: &FontContextHandle,
buf: ~[u8],
style: &SpecifiedFontStyle)
-> Result<FontHandle, ()> {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let face_result = create_face_from_buffer(ft_ctx, buf.as_ptr(), buf.len(), style.pt_size);
// TODO: this could be more simply written as result::chain
// and moving buf into the struct ctor, but cant' move out of
// captured binding.
return match face_result {
Ok(face) => {
let handle = FontHandle {
face: face,
source: FontSourceMem(buf),
handle: fctx.clone()
};
Ok(handle)
}
Err(()) => Err(())
};
fn create_face_from_buffer(lib: FT_Library, cbuf: *u8, cbuflen: uint, pt_size: f64)
-> Result<FT_Face, ()> {
unsafe {
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
let result = FT_New_Memory_Face(lib, cbuf, cbuflen as FT_Long,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
if!result.succeeded() || face.is_null() {
return Err(());
}
if FontHandle::set_char_size(face, pt_size).is_ok() {
Ok(face)
} else {
Err(())
}
}
}
}
// an identifier usable by FontContextHandle to recreate this FontHandle.
fn face_identifier(&self) -> ~str {
/* FT_Get_Postscript_Name seems like a better choice here, but it
doesn't give usable results for fontconfig when deserializing. */
unsafe { str::raw::from_c_str((*self.face).family_name) }
}
fn family_name(&self) -> ~str {
unsafe { str::raw::from_c_str((*self.face).family_name) }
}
fn face_name(&self) -> ~str {
unsafe { str::raw::from_c_str(FT_Get_Postscript_Name(self.face)) }
}
fn is_italic(&self) -> bool {
unsafe { (*self.face).style_flags & FT_STYLE_FLAG_ITALIC!= 0 }
}
fn boldness(&self) -> font_weight::T {
let default_weight = font_weight::Weight400;
if unsafe { (*self.face).style_flags & FT_STYLE_FLAG_BOLD == 0 } {
default_weight
} else {
unsafe {
let os2 = FT_Get_Sfnt_Table(self.face, ft_sfnt_os2) as *TT_OS2;
let valid = os2.is_not_null() && (*os2).version!= 0xffff;
if valid {
let weight =(*os2).usWeightClass;
match weight {
1 | 100..199 => font_weight::Weight100,
2 | 200..299 => font_weight::Weight200,
3 | 300..399 => font_weight::Weight300,
4 | 400..499 => font_weight::Weight400,
5 | 500..599 => font_weight::Weight500,
6 | 600..699 => font_weight::Weight600,
7 | 700..799 => font_weight::Weight700,
8 | 800..899 => font_weight::Weight800,
9 | 900..999 => font_weight::Weight900,
_ => default_weight
}
} else {
default_weight
}
}
}
}
fn clone_with_style(&self,
fctx: &FontContextHandle,
style: &UsedFontStyle) -> Result<FontHandle, ()>
|
fn glyph_index(&self,
codepoint: char) -> Option<GlyphIndex> {
assert!(self.face.is_not_null());
unsafe {
let idx = FT_Get_Char_Index(self.face, codepoint as FT_ULong);
return if idx!= 0 as FT_UInt {
Some(idx as GlyphIndex)
} else {
debug!("Invalid codepoint: {}", codepoint);
None
};
}
}
fn glyph_h_advance(&self,
glyph: GlyphIndex) -> Option<FractionalPixel> {
assert!(self.face.is_not_null());
unsafe {
let res = FT_Load_Glyph(self.face, glyph as FT_UInt, 0);
if res.succeeded() {
let void_glyph = (*self.face).glyph;
let slot: FT_GlyphSlot = cast::transmute(void_glyph);
assert!(slot.is_not_null());
debug!("metrics: {:?}", (*slot).metrics);
let advance = (*slot).metrics.horiAdvance;
debug!("h_advance for {} is {}", glyph, advance);
let advance = advance as i32;
return Some(fixed_to_float_ft(advance) as FractionalPixel);
} else {
debug!("Unable to load glyph {}. reason: {}", glyph, res);
return None;
}
}
}
fn get_metrics(&self) -> FontMetrics {
/* TODO(Issue #76): complete me */
let face = self.get_face_rec();
let underline_size = self.font_units_to_au(face.underline_thickness as f64);
let underline_offset = self.font_units_to_au(face.underline_position as f64);
let em_size = self.font_units_to_au(face.units_per_EM as f64);
let ascent = self.font_units_to_au(face.ascender as f64);
let descent = self.font_units_to_au(face.descender as f64);
let max_advance = self.font_units_to_au(face.max_advance_width as f64);
// 'leading' is supposed to be the vertical distance between two baselines,
// reflected by the height attibute in freetype. On OS X (w/ CTFont),
// leading represents the distance between the bottom of a line descent to
// the top of the next line's ascent or: (line_height - ascent - descent),
// see http://stackoverflow.com/a/5635981 for CTFont implementation.
// Convert using a formular similar to what CTFont returns for consistency.
let height = self.font_units_to_au(face.height as f64);
let leading = height - (ascent + descent);
let mut strikeout_size = geometry::from_pt(0.0);
let mut strikeout_offset = geometry::from_pt(0.0);
let mut x_height = geometry::from_pt(0.0);
unsafe {
let os2 = FT_Get_Sfnt_Table(face, ft_sfnt_os2) as *TT_OS2;
let valid = os2.is_not_null() && (*os2).version!= 0xffff;
if valid {
strikeout_size = self.font_units_to_au((*os2).yStrikeoutSize as f64);
strikeout_offset = self.font_units_to_au((*os2).yStrikeoutPosition as f64);
x_height = self.font_units_to_au((*os2).sxHeight as f64);
}
}
let metrics = FontMetrics {
underline_size: underline_size,
underline_offset: underline_offset,
strikeout_size: strikeout_size,
strikeout_offset: strikeout_offset,
leading: leading,
x_height: x_height,
em_size: em_size,
ascent: ascent,
descent: -descent, // linux font's seem to use the opposite sign from mac
max_advance: max_advance
};
debug!("Font metrics (@{:f} pt): {:?}", geometry::to_pt(em_size), metrics);
return metrics;
}
fn get_table_for_tag(&self, _: FontTableTag) -> Option<FontTable> {
None
}
}
impl<'a> FontHandle {
fn set_char_size(face: FT_Face, pt_size: f64) -> Result<(), ()>{
let char_width = float_to_fixed_ft(pt_size) as FT_F26Dot6;
let char_height = float_to_fixed_ft(pt_size) as FT_F26Dot6;
let h_dpi = 72;
let v_dpi = 72;
unsafe {
let result = FT_Set_Char_Size(face, char_width, char_height, h_dpi, v_dpi);
if result.succeeded() { Ok(()) } else { Err(()) }
}
}
pub fn new_from_file(fctx: &FontContextHandle, file: &str,
style: &SpecifiedFontStyle) -> Result<FontHandle, ()> {
unsafe {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
file.to_c_str().with_ref(|file_str| {
FT_New_Face(ft_ctx, file_str,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
});
if face.is_null() {
return Err(());
}
if FontHandle::set_char_size(face, style.pt_size).is_ok() {
Ok(FontHandle {
source: FontSourceFile(file.to_str()),
face: face,
handle: fctx.clone()
})
} else {
Err(())
}
}
}
pub fn new_from_file_unstyled(fctx: &FontContextHandle, file: ~str)
-> Result<FontHandle, ()> {
unsafe {
let ft_ctx: FT_Library = fctx.ctx.borrow().ctx;
if ft_ctx.is_null() { return Err(()); }
let mut face: FT_Face = ptr::null();
let face_index = 0 as FT_Long;
file.to_c_str().with_ref(|file_str| {
FT_New_Face(ft_ctx, file_str,
face_index, ptr::to_mut_unsafe_ptr(&mut face));
});
if face.is_null() {
return Err(());
}
Ok(FontHandle {
source: FontSourceFile(file),
face: face,
handle: fctx.clone()
})
}
}
fn get_face_rec(&'a self) -> &'a FT_FaceRec {
unsafe {
&(*self.face)
}
}
fn font_units_to_au(&self, value: f64) -> Au {
let face = self.get_face_rec();
// face.size is a *c_void in the bindings, presumably to avoid
// recursive structural types
let size: &FT_SizeRec = unsafe { cast::transmute(&(*face.size)) };
let metrics: &FT_Size_Metrics = &(*size).metrics;
let em_size = face.units_per_EM as f64;
let x_scale = (metrics.x_ppem as f64) / em_size as f64;
// If this isn't true then we're scaling one of the axes wrong
assert!(metrics.x_ppem == metrics.y_ppem);
return geometry::from_frac_px(value * x_scale);
}
}
|
{
match self.source {
FontSourceMem(ref buf) => {
FontHandleMethods::new_from_buffer(fctx, buf.clone(), style)
}
FontSourceFile(ref file) => {
FontHandle::new_from_file(fctx, (*file).clone(), style)
}
}
}
|
identifier_body
|
imag_category.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::process::Command;
use assert_fs::fixture::TempDir;
pub fn
|
(tempdir: &TempDir) -> Command {
crate::imag::binary(tempdir, "imag-category")
}
pub fn call(tmpdir: &TempDir, args: &[&str]) -> Vec<String> {
let mut binary = binary(tmpdir);
binary.stdin(std::process::Stdio::inherit());
binary.arg("--pipe-input");
binary.arg("--pipe-output");
binary.args(args);
debug!("Command = {:?}", binary);
crate::imag::stdout_of_command(binary)
}
#[test]
fn test_new_entry_has_no_category() {
crate::setup_logging();
let imag_home = crate::imag::make_temphome();
crate::imag_init::call(&imag_home);
crate::imag_create::call(&imag_home, &["test"]);
let (assert, stderr_output) = {
let mut binary = binary(&imag_home);
binary.stdin(std::process::Stdio::inherit());
binary.arg("--pipe-input");
binary.arg("--pipe-output");
binary.arg("get");
binary.arg("test");
crate::imag::stderr_of_command(&mut binary)
};
assert.failure();
assert!(stderr_output.iter().any(|substr| substr.contains("Category name missing")));
}
#[test]
fn test_after_setting_a_new_category_there_is_a_category() {
crate::setup_logging();
let imag_home = crate::imag::make_temphome();
crate::imag_init::call(&imag_home);
crate::imag_create::call(&imag_home, &["test"]);
let _ = call(&imag_home, &["create-category", "cat"]);
let _ = call(&imag_home, &["set", "cat", "test"]);
let output = call(&imag_home, &["get", "test"]);
debug!("output = {:?}", output);
assert!(!output.is_empty());
assert_eq!(output.len(), 1);
assert_eq!(output[0], "cat");
}
|
binary
|
identifier_name
|
imag_category.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
|
use std::process::Command;
use assert_fs::fixture::TempDir;
pub fn binary(tempdir: &TempDir) -> Command {
crate::imag::binary(tempdir, "imag-category")
}
pub fn call(tmpdir: &TempDir, args: &[&str]) -> Vec<String> {
let mut binary = binary(tmpdir);
binary.stdin(std::process::Stdio::inherit());
binary.arg("--pipe-input");
binary.arg("--pipe-output");
binary.args(args);
debug!("Command = {:?}", binary);
crate::imag::stdout_of_command(binary)
}
#[test]
fn test_new_entry_has_no_category() {
crate::setup_logging();
let imag_home = crate::imag::make_temphome();
crate::imag_init::call(&imag_home);
crate::imag_create::call(&imag_home, &["test"]);
let (assert, stderr_output) = {
let mut binary = binary(&imag_home);
binary.stdin(std::process::Stdio::inherit());
binary.arg("--pipe-input");
binary.arg("--pipe-output");
binary.arg("get");
binary.arg("test");
crate::imag::stderr_of_command(&mut binary)
};
assert.failure();
assert!(stderr_output.iter().any(|substr| substr.contains("Category name missing")));
}
#[test]
fn test_after_setting_a_new_category_there_is_a_category() {
crate::setup_logging();
let imag_home = crate::imag::make_temphome();
crate::imag_init::call(&imag_home);
crate::imag_create::call(&imag_home, &["test"]);
let _ = call(&imag_home, &["create-category", "cat"]);
let _ = call(&imag_home, &["set", "cat", "test"]);
let output = call(&imag_home, &["get", "test"]);
debug!("output = {:?}", output);
assert!(!output.is_empty());
assert_eq!(output.len(), 1);
assert_eq!(output[0], "cat");
}
|
random_line_split
|
|
imag_category.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::process::Command;
use assert_fs::fixture::TempDir;
pub fn binary(tempdir: &TempDir) -> Command {
crate::imag::binary(tempdir, "imag-category")
}
pub fn call(tmpdir: &TempDir, args: &[&str]) -> Vec<String>
|
#[test]
fn test_new_entry_has_no_category() {
crate::setup_logging();
let imag_home = crate::imag::make_temphome();
crate::imag_init::call(&imag_home);
crate::imag_create::call(&imag_home, &["test"]);
let (assert, stderr_output) = {
let mut binary = binary(&imag_home);
binary.stdin(std::process::Stdio::inherit());
binary.arg("--pipe-input");
binary.arg("--pipe-output");
binary.arg("get");
binary.arg("test");
crate::imag::stderr_of_command(&mut binary)
};
assert.failure();
assert!(stderr_output.iter().any(|substr| substr.contains("Category name missing")));
}
#[test]
fn test_after_setting_a_new_category_there_is_a_category() {
crate::setup_logging();
let imag_home = crate::imag::make_temphome();
crate::imag_init::call(&imag_home);
crate::imag_create::call(&imag_home, &["test"]);
let _ = call(&imag_home, &["create-category", "cat"]);
let _ = call(&imag_home, &["set", "cat", "test"]);
let output = call(&imag_home, &["get", "test"]);
debug!("output = {:?}", output);
assert!(!output.is_empty());
assert_eq!(output.len(), 1);
assert_eq!(output[0], "cat");
}
|
{
let mut binary = binary(tmpdir);
binary.stdin(std::process::Stdio::inherit());
binary.arg("--pipe-input");
binary.arg("--pipe-output");
binary.args(args);
debug!("Command = {:?}", binary);
crate::imag::stdout_of_command(binary)
}
|
identifier_body
|
non-exhaustive-pattern-witness.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(advanced_slice_patterns)]
struct Foo {
first: bool,
second: Option<[uint; 4]>
}
enum Color {
Red,
Green,
CustomRGBA { a: bool, r: u8, g: u8, b: u8 }
}
fn struct_with_a_nested_enum_and_vector() {
match (Foo { first: true, second: None }) {
//~^ ERROR non-exhaustive patterns: `Foo { first: false, second: Some([_, _, _, _]) }` not covered
Foo { first: true, second: None } => (),
Foo { first: true, second: Some(_) } => (),
Foo { first: false, second: None } => (),
Foo { first: false, second: Some([1u, 2u, 3u, 4u]) } => ()
}
}
fn enum_with_multiple_missing_variants() {
match Color::Red {
//~^ ERROR non-exhaustive patterns: `Red` not covered
Color::CustomRGBA {.. } => ()
}
}
fn enum_struct_variant() {
match Color::Red {
//~^ ERROR non-exhaustive patterns: `CustomRGBA { a: true,.. }` not covered
Color::Red => (),
Color::Green => (),
Color::CustomRGBA { a: false, r: _, g: _, b: 0 } => (),
Color::CustomRGBA { a: false, r: _, g: _, b: _ } => ()
}
}
enum Enum {
First,
Second(bool)
}
fn vectors_with_nested_enums() {
let x: &'static [Enum] = &[Enum::First, Enum::Second(false)];
match x {
//~^ ERROR non-exhaustive patterns: `[Second(true), Second(false)]` not covered
[] => (),
[_] => (),
[Enum::First, _] => (),
[Enum::Second(true), Enum::First] => (),
[Enum::Second(true), Enum::Second(true)] => (),
[Enum::Second(false), _] => (),
[_, _, tail.., _] => ()
}
}
fn
|
() {
match ((), false) {
//~^ ERROR non-exhaustive patterns: `((), false)` not covered
((), true) => ()
}
}
fn main() {}
|
missing_nil
|
identifier_name
|
non-exhaustive-pattern-witness.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(advanced_slice_patterns)]
struct Foo {
first: bool,
second: Option<[uint; 4]>
}
enum Color {
Red,
Green,
CustomRGBA { a: bool, r: u8, g: u8, b: u8 }
}
fn struct_with_a_nested_enum_and_vector() {
match (Foo { first: true, second: None }) {
//~^ ERROR non-exhaustive patterns: `Foo { first: false, second: Some([_, _, _, _]) }` not covered
Foo { first: true, second: None } => (),
Foo { first: true, second: Some(_) } => (),
Foo { first: false, second: None } => (),
Foo { first: false, second: Some([1u, 2u, 3u, 4u]) } => ()
}
}
fn enum_with_multiple_missing_variants() {
match Color::Red {
//~^ ERROR non-exhaustive patterns: `Red` not covered
Color::CustomRGBA {.. } => ()
}
}
fn enum_struct_variant() {
match Color::Red {
//~^ ERROR non-exhaustive patterns: `CustomRGBA { a: true,.. }` not covered
Color::Red => (),
Color::Green => (),
Color::CustomRGBA { a: false, r: _, g: _, b: 0 } => (),
Color::CustomRGBA { a: false, r: _, g: _, b: _ } => ()
}
}
enum Enum {
First,
Second(bool)
}
fn vectors_with_nested_enums() {
let x: &'static [Enum] = &[Enum::First, Enum::Second(false)];
match x {
//~^ ERROR non-exhaustive patterns: `[Second(true), Second(false)]` not covered
[] => (),
|
[Enum::Second(true), Enum::First] => (),
[Enum::Second(true), Enum::Second(true)] => (),
[Enum::Second(false), _] => (),
[_, _, tail.., _] => ()
}
}
fn missing_nil() {
match ((), false) {
//~^ ERROR non-exhaustive patterns: `((), false)` not covered
((), true) => ()
}
}
fn main() {}
|
[_] => (),
[Enum::First, _] => (),
|
random_line_split
|
main.rs
|
#![feature(macro_rules)]
#![feature(globs)]
#![feature(phase)]
#![allow(missing_copy_implementations)]
#[phase(plugin)]
extern crate gl_generator;
extern crate glfw;
extern crate time;
use glfw::{Glfw, Context, OpenGlProfileHint, WindowHint, WindowMode};
use std::rand::{task_rng, Rng};
use std::num::FloatMath;
use self::gl::Gl;
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub mod gl {
use self::types::*;
generate_gl_bindings! {
api: "gl",
profile: "core",
version: "3.0",
generator: "struct",
extensions: [
"GL_EXT_texture_filter_anisotropic",
]
}
}
struct Color {
red: f32,
green: f32,
blue: f32,
alpha: f32
}
struct Particle {
life: f32,
fade: f32,
x: f32,
y: f32,
xi: f32,
yi: f32,
color: Color
}
static WINDOW_WIDTH: u32 = 800;
static WINDOW_HEIGHT: u32 = 600;
static MAX_PARTICLES: uint = 20000;
static MAX_LIFE: f32 = 2.5;
static PARTICLE_SIZE: f32 = 1.5;
fn init_gl(gl: &Gl, glfw: &Glfw) {
// Choose a GL profile that is compatible with OS X 10.7+
glfw.window_hint(WindowHint::ContextVersion(3, 2));
glfw.window_hint(WindowHint::OpenglForwardCompat(true));
glfw.window_hint(WindowHint::OpenglProfile(OpenGlProfileHint::Core));
glfw.set_swap_interval(1);
unsafe {
gl.Viewport(0, 0, WINDOW_WIDTH.to_i32().unwrap(), WINDOW_HEIGHT.to_i32().unwrap());
gl.MatrixMode(gl::PROJECTION);
gl.LoadIdentity();
gl.Ortho(0.0, 800.0, 600.0, 0.0, 0.0, 1.0);
gl.Enable(gl::TEXTURE_2D);
gl.Enable(gl::BLEND);
gl.ShadeModel(gl::SMOOTH);
gl.BlendFunc(gl::SRC_ALPHA, gl::ONE);
}
}
fn main() {
let glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
let (window, _) = glfw.create_window(WINDOW_WIDTH, WINDOW_HEIGHT, "OpenGL", WindowMode::Windowed)
.expect("Failed to create GLFW window.");
window.make_current();
// Load the OpenGL function pointers
let gl = Gl::load_with(|s| window.get_proc_address(s));
init_gl(&gl, &glfw);
let mut particles: Vec<Particle> = Vec::with_capacity(MAX_PARTICLES);
while!window.should_close() {
// Poll events
glfw.poll_events();
// Clear the screen to black
unsafe {
gl.ClearColor(0.0, 0.0, 0.0, 1.0);
gl.Clear(gl::COLOR_BUFFER_BIT);
gl.PointSize(PARTICLE_SIZE);
gl.Begin(gl::POINTS);
// Draw particles
for p in particles.iter() {
gl.Color4f(p.color.red, p.color.green, p.color.blue, p.color.alpha);
gl.Vertex2f(p.x, p.y);
}
gl.End();
}
move_particles(&mut particles);
// Swap buffers
window.swap_buffers();
}
}
fn create_new_particle() -> Particle {
let angle: f32 = task_rng().gen_range(0.0, 360.0);
let v: f32 = task_rng().gen_range(0.1, 1.1);
let timespec = time::get_time();
let millis = timespec.nsec as f32 / 1000.0 / 1000.0;
let sec = timespec.sec.rem(100) as f32;
let radius = (WINDOW_WIDTH as f32).min(WINDOW_HEIGHT as f32) / 4.0;
let x = millis.sin() * radius;
let y = millis.cos() * radius;
Particle {
life: task_rng().gen_range(MAX_LIFE * 0.8, MAX_LIFE),
fade: task_rng().gen_range(0.01, 0.05),
x: x + WINDOW_WIDTH as f32 / 2.0,
y: y + WINDOW_HEIGHT as f32 / 2.0,
xi: angle.cos() * v,
yi: angle.sin() * v,
color: Color {
red: sec.sin(),
green: sec.cos(),
blue: task_rng().gen_range(0.4, 0.6),
alpha: 1.0
}
}
}
fn move_particles(particles: &mut Vec<Particle>) {
// Move & decay existing particles
for mut particle in particles.iter_mut() {
particle.life -= particle.fade;
particle.x += particle.xi;
particle.y += particle.yi;
particle.xi *= 0.999;
|
particle.color.blue += particle.fade / 3.0;
}
// Replace dead particles
for i in range(0u, particles.len()) {
if particles[i].life < 0.05 {
particles.remove(i);
particles.insert(i, create_new_particle());
}
}
// Add new particles if missing
let mut i = 0i;
loop {
if particles.len() < MAX_PARTICLES {
particles.push(create_new_particle());
i += 1 ;
} else {
break;
}
if i > 20 {
break;
}
}
}
|
particle.yi *= 0.999;
particle.color.alpha = 1.0 * particle.life / MAX_LIFE;
particle.color.red += particle.fade / 3.0;
particle.color.green += particle.fade / 3.0;
|
random_line_split
|
main.rs
|
#![feature(macro_rules)]
#![feature(globs)]
#![feature(phase)]
#![allow(missing_copy_implementations)]
#[phase(plugin)]
extern crate gl_generator;
extern crate glfw;
extern crate time;
use glfw::{Glfw, Context, OpenGlProfileHint, WindowHint, WindowMode};
use std::rand::{task_rng, Rng};
use std::num::FloatMath;
use self::gl::Gl;
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub mod gl {
use self::types::*;
generate_gl_bindings! {
api: "gl",
profile: "core",
version: "3.0",
generator: "struct",
extensions: [
"GL_EXT_texture_filter_anisotropic",
]
}
}
struct Color {
red: f32,
green: f32,
blue: f32,
alpha: f32
}
struct Particle {
life: f32,
fade: f32,
x: f32,
y: f32,
xi: f32,
yi: f32,
color: Color
}
static WINDOW_WIDTH: u32 = 800;
static WINDOW_HEIGHT: u32 = 600;
static MAX_PARTICLES: uint = 20000;
static MAX_LIFE: f32 = 2.5;
static PARTICLE_SIZE: f32 = 1.5;
fn init_gl(gl: &Gl, glfw: &Glfw) {
// Choose a GL profile that is compatible with OS X 10.7+
glfw.window_hint(WindowHint::ContextVersion(3, 2));
glfw.window_hint(WindowHint::OpenglForwardCompat(true));
glfw.window_hint(WindowHint::OpenglProfile(OpenGlProfileHint::Core));
glfw.set_swap_interval(1);
unsafe {
gl.Viewport(0, 0, WINDOW_WIDTH.to_i32().unwrap(), WINDOW_HEIGHT.to_i32().unwrap());
gl.MatrixMode(gl::PROJECTION);
gl.LoadIdentity();
gl.Ortho(0.0, 800.0, 600.0, 0.0, 0.0, 1.0);
gl.Enable(gl::TEXTURE_2D);
gl.Enable(gl::BLEND);
gl.ShadeModel(gl::SMOOTH);
gl.BlendFunc(gl::SRC_ALPHA, gl::ONE);
}
}
fn main() {
let glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
let (window, _) = glfw.create_window(WINDOW_WIDTH, WINDOW_HEIGHT, "OpenGL", WindowMode::Windowed)
.expect("Failed to create GLFW window.");
window.make_current();
// Load the OpenGL function pointers
let gl = Gl::load_with(|s| window.get_proc_address(s));
init_gl(&gl, &glfw);
let mut particles: Vec<Particle> = Vec::with_capacity(MAX_PARTICLES);
while!window.should_close() {
// Poll events
glfw.poll_events();
// Clear the screen to black
unsafe {
gl.ClearColor(0.0, 0.0, 0.0, 1.0);
gl.Clear(gl::COLOR_BUFFER_BIT);
gl.PointSize(PARTICLE_SIZE);
gl.Begin(gl::POINTS);
// Draw particles
for p in particles.iter() {
gl.Color4f(p.color.red, p.color.green, p.color.blue, p.color.alpha);
gl.Vertex2f(p.x, p.y);
}
gl.End();
}
move_particles(&mut particles);
// Swap buffers
window.swap_buffers();
}
}
fn create_new_particle() -> Particle {
let angle: f32 = task_rng().gen_range(0.0, 360.0);
let v: f32 = task_rng().gen_range(0.1, 1.1);
let timespec = time::get_time();
let millis = timespec.nsec as f32 / 1000.0 / 1000.0;
let sec = timespec.sec.rem(100) as f32;
let radius = (WINDOW_WIDTH as f32).min(WINDOW_HEIGHT as f32) / 4.0;
let x = millis.sin() * radius;
let y = millis.cos() * radius;
Particle {
life: task_rng().gen_range(MAX_LIFE * 0.8, MAX_LIFE),
fade: task_rng().gen_range(0.01, 0.05),
x: x + WINDOW_WIDTH as f32 / 2.0,
y: y + WINDOW_HEIGHT as f32 / 2.0,
xi: angle.cos() * v,
yi: angle.sin() * v,
color: Color {
red: sec.sin(),
green: sec.cos(),
blue: task_rng().gen_range(0.4, 0.6),
alpha: 1.0
}
}
}
fn move_particles(particles: &mut Vec<Particle>)
|
}
}
// Add new particles if missing
let mut i = 0i;
loop {
if particles.len() < MAX_PARTICLES {
particles.push(create_new_particle());
i += 1 ;
} else {
break;
}
if i > 20 {
break;
}
}
}
|
{
// Move & decay existing particles
for mut particle in particles.iter_mut() {
particle.life -= particle.fade;
particle.x += particle.xi;
particle.y += particle.yi;
particle.xi *= 0.999;
particle.yi *= 0.999;
particle.color.alpha = 1.0 * particle.life / MAX_LIFE;
particle.color.red += particle.fade / 3.0;
particle.color.green += particle.fade / 3.0;
particle.color.blue += particle.fade / 3.0;
}
// Replace dead particles
for i in range(0u, particles.len()) {
if particles[i].life < 0.05 {
particles.remove(i);
particles.insert(i, create_new_particle());
|
identifier_body
|
main.rs
|
#![feature(macro_rules)]
#![feature(globs)]
#![feature(phase)]
#![allow(missing_copy_implementations)]
#[phase(plugin)]
extern crate gl_generator;
extern crate glfw;
extern crate time;
use glfw::{Glfw, Context, OpenGlProfileHint, WindowHint, WindowMode};
use std::rand::{task_rng, Rng};
use std::num::FloatMath;
use self::gl::Gl;
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub mod gl {
use self::types::*;
generate_gl_bindings! {
api: "gl",
profile: "core",
version: "3.0",
generator: "struct",
extensions: [
"GL_EXT_texture_filter_anisotropic",
]
}
}
struct
|
{
red: f32,
green: f32,
blue: f32,
alpha: f32
}
struct Particle {
life: f32,
fade: f32,
x: f32,
y: f32,
xi: f32,
yi: f32,
color: Color
}
static WINDOW_WIDTH: u32 = 800;
static WINDOW_HEIGHT: u32 = 600;
static MAX_PARTICLES: uint = 20000;
static MAX_LIFE: f32 = 2.5;
static PARTICLE_SIZE: f32 = 1.5;
fn init_gl(gl: &Gl, glfw: &Glfw) {
// Choose a GL profile that is compatible with OS X 10.7+
glfw.window_hint(WindowHint::ContextVersion(3, 2));
glfw.window_hint(WindowHint::OpenglForwardCompat(true));
glfw.window_hint(WindowHint::OpenglProfile(OpenGlProfileHint::Core));
glfw.set_swap_interval(1);
unsafe {
gl.Viewport(0, 0, WINDOW_WIDTH.to_i32().unwrap(), WINDOW_HEIGHT.to_i32().unwrap());
gl.MatrixMode(gl::PROJECTION);
gl.LoadIdentity();
gl.Ortho(0.0, 800.0, 600.0, 0.0, 0.0, 1.0);
gl.Enable(gl::TEXTURE_2D);
gl.Enable(gl::BLEND);
gl.ShadeModel(gl::SMOOTH);
gl.BlendFunc(gl::SRC_ALPHA, gl::ONE);
}
}
fn main() {
let glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
let (window, _) = glfw.create_window(WINDOW_WIDTH, WINDOW_HEIGHT, "OpenGL", WindowMode::Windowed)
.expect("Failed to create GLFW window.");
window.make_current();
// Load the OpenGL function pointers
let gl = Gl::load_with(|s| window.get_proc_address(s));
init_gl(&gl, &glfw);
let mut particles: Vec<Particle> = Vec::with_capacity(MAX_PARTICLES);
while!window.should_close() {
// Poll events
glfw.poll_events();
// Clear the screen to black
unsafe {
gl.ClearColor(0.0, 0.0, 0.0, 1.0);
gl.Clear(gl::COLOR_BUFFER_BIT);
gl.PointSize(PARTICLE_SIZE);
gl.Begin(gl::POINTS);
// Draw particles
for p in particles.iter() {
gl.Color4f(p.color.red, p.color.green, p.color.blue, p.color.alpha);
gl.Vertex2f(p.x, p.y);
}
gl.End();
}
move_particles(&mut particles);
// Swap buffers
window.swap_buffers();
}
}
fn create_new_particle() -> Particle {
let angle: f32 = task_rng().gen_range(0.0, 360.0);
let v: f32 = task_rng().gen_range(0.1, 1.1);
let timespec = time::get_time();
let millis = timespec.nsec as f32 / 1000.0 / 1000.0;
let sec = timespec.sec.rem(100) as f32;
let radius = (WINDOW_WIDTH as f32).min(WINDOW_HEIGHT as f32) / 4.0;
let x = millis.sin() * radius;
let y = millis.cos() * radius;
Particle {
life: task_rng().gen_range(MAX_LIFE * 0.8, MAX_LIFE),
fade: task_rng().gen_range(0.01, 0.05),
x: x + WINDOW_WIDTH as f32 / 2.0,
y: y + WINDOW_HEIGHT as f32 / 2.0,
xi: angle.cos() * v,
yi: angle.sin() * v,
color: Color {
red: sec.sin(),
green: sec.cos(),
blue: task_rng().gen_range(0.4, 0.6),
alpha: 1.0
}
}
}
fn move_particles(particles: &mut Vec<Particle>) {
// Move & decay existing particles
for mut particle in particles.iter_mut() {
particle.life -= particle.fade;
particle.x += particle.xi;
particle.y += particle.yi;
particle.xi *= 0.999;
particle.yi *= 0.999;
particle.color.alpha = 1.0 * particle.life / MAX_LIFE;
particle.color.red += particle.fade / 3.0;
particle.color.green += particle.fade / 3.0;
particle.color.blue += particle.fade / 3.0;
}
// Replace dead particles
for i in range(0u, particles.len()) {
if particles[i].life < 0.05 {
particles.remove(i);
particles.insert(i, create_new_particle());
}
}
// Add new particles if missing
let mut i = 0i;
loop {
if particles.len() < MAX_PARTICLES {
particles.push(create_new_particle());
i += 1 ;
} else {
break;
}
if i > 20 {
break;
}
}
}
|
Color
|
identifier_name
|
main.rs
|
#![feature(macro_rules)]
#![feature(globs)]
#![feature(phase)]
#![allow(missing_copy_implementations)]
#[phase(plugin)]
extern crate gl_generator;
extern crate glfw;
extern crate time;
use glfw::{Glfw, Context, OpenGlProfileHint, WindowHint, WindowMode};
use std::rand::{task_rng, Rng};
use std::num::FloatMath;
use self::gl::Gl;
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
pub mod gl {
use self::types::*;
generate_gl_bindings! {
api: "gl",
profile: "core",
version: "3.0",
generator: "struct",
extensions: [
"GL_EXT_texture_filter_anisotropic",
]
}
}
struct Color {
red: f32,
green: f32,
blue: f32,
alpha: f32
}
struct Particle {
life: f32,
fade: f32,
x: f32,
y: f32,
xi: f32,
yi: f32,
color: Color
}
static WINDOW_WIDTH: u32 = 800;
static WINDOW_HEIGHT: u32 = 600;
static MAX_PARTICLES: uint = 20000;
static MAX_LIFE: f32 = 2.5;
static PARTICLE_SIZE: f32 = 1.5;
fn init_gl(gl: &Gl, glfw: &Glfw) {
// Choose a GL profile that is compatible with OS X 10.7+
glfw.window_hint(WindowHint::ContextVersion(3, 2));
glfw.window_hint(WindowHint::OpenglForwardCompat(true));
glfw.window_hint(WindowHint::OpenglProfile(OpenGlProfileHint::Core));
glfw.set_swap_interval(1);
unsafe {
gl.Viewport(0, 0, WINDOW_WIDTH.to_i32().unwrap(), WINDOW_HEIGHT.to_i32().unwrap());
gl.MatrixMode(gl::PROJECTION);
gl.LoadIdentity();
gl.Ortho(0.0, 800.0, 600.0, 0.0, 0.0, 1.0);
gl.Enable(gl::TEXTURE_2D);
gl.Enable(gl::BLEND);
gl.ShadeModel(gl::SMOOTH);
gl.BlendFunc(gl::SRC_ALPHA, gl::ONE);
}
}
fn main() {
let glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap();
let (window, _) = glfw.create_window(WINDOW_WIDTH, WINDOW_HEIGHT, "OpenGL", WindowMode::Windowed)
.expect("Failed to create GLFW window.");
window.make_current();
// Load the OpenGL function pointers
let gl = Gl::load_with(|s| window.get_proc_address(s));
init_gl(&gl, &glfw);
let mut particles: Vec<Particle> = Vec::with_capacity(MAX_PARTICLES);
while!window.should_close() {
// Poll events
glfw.poll_events();
// Clear the screen to black
unsafe {
gl.ClearColor(0.0, 0.0, 0.0, 1.0);
gl.Clear(gl::COLOR_BUFFER_BIT);
gl.PointSize(PARTICLE_SIZE);
gl.Begin(gl::POINTS);
// Draw particles
for p in particles.iter() {
gl.Color4f(p.color.red, p.color.green, p.color.blue, p.color.alpha);
gl.Vertex2f(p.x, p.y);
}
gl.End();
}
move_particles(&mut particles);
// Swap buffers
window.swap_buffers();
}
}
fn create_new_particle() -> Particle {
let angle: f32 = task_rng().gen_range(0.0, 360.0);
let v: f32 = task_rng().gen_range(0.1, 1.1);
let timespec = time::get_time();
let millis = timespec.nsec as f32 / 1000.0 / 1000.0;
let sec = timespec.sec.rem(100) as f32;
let radius = (WINDOW_WIDTH as f32).min(WINDOW_HEIGHT as f32) / 4.0;
let x = millis.sin() * radius;
let y = millis.cos() * radius;
Particle {
life: task_rng().gen_range(MAX_LIFE * 0.8, MAX_LIFE),
fade: task_rng().gen_range(0.01, 0.05),
x: x + WINDOW_WIDTH as f32 / 2.0,
y: y + WINDOW_HEIGHT as f32 / 2.0,
xi: angle.cos() * v,
yi: angle.sin() * v,
color: Color {
red: sec.sin(),
green: sec.cos(),
blue: task_rng().gen_range(0.4, 0.6),
alpha: 1.0
}
}
}
fn move_particles(particles: &mut Vec<Particle>) {
// Move & decay existing particles
for mut particle in particles.iter_mut() {
particle.life -= particle.fade;
particle.x += particle.xi;
particle.y += particle.yi;
particle.xi *= 0.999;
particle.yi *= 0.999;
particle.color.alpha = 1.0 * particle.life / MAX_LIFE;
particle.color.red += particle.fade / 3.0;
particle.color.green += particle.fade / 3.0;
particle.color.blue += particle.fade / 3.0;
}
// Replace dead particles
for i in range(0u, particles.len()) {
if particles[i].life < 0.05 {
particles.remove(i);
particles.insert(i, create_new_particle());
}
}
// Add new particles if missing
let mut i = 0i;
loop {
if particles.len() < MAX_PARTICLES {
particles.push(create_new_particle());
i += 1 ;
} else
|
if i > 20 {
break;
}
}
}
|
{
break;
}
|
conditional_block
|
cli.rs
|
use structopt::StructOpt;
#[derive(StructOpt)]
#[structopt(
name = "blog",
after_help = "You can also run `blog SUBCOMMAND -h` to get more information about that subcommand."
)]
pub enum Cli {
/// Create a new wonderfully delighting blog post
#[structopt(name = "create_post")]
CreatePost {
/// The title of your post
#[structopt(long = "title")]
title: String,
},
/// Fine-tune an existing post to reach 100% reader delight
#[structopt(name = "edit_post")]
EditPost {
/// The id of the post to edit
post_id: i32,
/// Announce this piece of literary perfectionisms to the world?
#[structopt(short = "i")]
publish: bool,
},
/// Get an overview of all your literary accomplishments
#[structopt(name = "all_posts")]
AllPosts {
/// The page to display
#[structopt(long = "page", default_value = "1")]
page: i64,
/// The number of posts to display per page (cannot be larger than 25)
#[structopt(long = "per-page")]
per_page: Option<i64>,
},
/// Quickly add an important remark to a post
#[structopt(name = "add_comment")]
AddComment {
/// The id of the post to comment on
post_id: i32,
},
/// Edit a comment, e.g. to cite the sources of your facts
#[structopt(name = "edit_comment")]
EditComment {
/// The id of the comment to edit
comment_id: i32,
},
/// See all the instances where you were able to improve a post by adding a comment
#[structopt(name = "my_comments")]
MyComments {
/// The page to display
#[structopt(long = "page", default_value = "1")]
page: i64,
/// The number of comments to display per page (cannot be larger than 25)
#[structopt(long = "per-page")]
per_page: Option<i64>,
},
/// Register as the newest member of this slightly elitist community
#[structopt(name = "register")]
|
Register,
}
|
random_line_split
|
|
cli.rs
|
use structopt::StructOpt;
#[derive(StructOpt)]
#[structopt(
name = "blog",
after_help = "You can also run `blog SUBCOMMAND -h` to get more information about that subcommand."
)]
pub enum
|
{
/// Create a new wonderfully delighting blog post
#[structopt(name = "create_post")]
CreatePost {
/// The title of your post
#[structopt(long = "title")]
title: String,
},
/// Fine-tune an existing post to reach 100% reader delight
#[structopt(name = "edit_post")]
EditPost {
/// The id of the post to edit
post_id: i32,
/// Announce this piece of literary perfectionisms to the world?
#[structopt(short = "i")]
publish: bool,
},
/// Get an overview of all your literary accomplishments
#[structopt(name = "all_posts")]
AllPosts {
/// The page to display
#[structopt(long = "page", default_value = "1")]
page: i64,
/// The number of posts to display per page (cannot be larger than 25)
#[structopt(long = "per-page")]
per_page: Option<i64>,
},
/// Quickly add an important remark to a post
#[structopt(name = "add_comment")]
AddComment {
/// The id of the post to comment on
post_id: i32,
},
/// Edit a comment, e.g. to cite the sources of your facts
#[structopt(name = "edit_comment")]
EditComment {
/// The id of the comment to edit
comment_id: i32,
},
/// See all the instances where you were able to improve a post by adding a comment
#[structopt(name = "my_comments")]
MyComments {
/// The page to display
#[structopt(long = "page", default_value = "1")]
page: i64,
/// The number of comments to display per page (cannot be larger than 25)
#[structopt(long = "per-page")]
per_page: Option<i64>,
},
/// Register as the newest member of this slightly elitist community
#[structopt(name = "register")]
Register,
}
|
Cli
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(custom_derive)]
#![feature(box_raw)]
#![feature(plugin)]
#![feature(slice_patterns)]
#![feature(step_by)]
#![feature(vec_push_all)]
#![feature(custom_attribute)]
#![plugin(serde_macros, plugins)]
#![plugin(regex_macros)]
extern crate euclid;
extern crate hyper;
extern crate ipc_channel;
#[macro_use]
extern crate log;
extern crate png;
extern crate regex;
extern crate serde;
extern crate stb_image;
extern crate url;
extern crate util;
extern crate msg;
use hyper::header::{ContentType, Headers};
use hyper::http::RawStatus;
use hyper::method::Method;
use hyper::mime::{Mime, Attr};
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use msg::constellation_msg::{PipelineId};
use regex::Regex;
use serde::{Deserializer, Serializer};
use url::Url;
use util::mem::HeapSizeOf;
use std::thread;
pub mod hosts;
pub mod image_cache_task;
pub mod net_error_list;
pub mod storage_task;
pub static IPV4_REGEX: Regex = regex!(
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$");
pub static IPV6_REGEX: Regex = regex!(r"^([a-fA-F0-9]{0,4}[:]?){1,8}(/\d{1,3})?$");
/// Image handling.
///
/// It may be surprising that this goes in the network crate as opposed to the graphics crate.
/// However, image handling is generally very integrated with the network stack (especially where
/// caching is involved) and as a result it must live in here.
pub mod image {
pub mod base;
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct LoadData {
pub url: Url,
pub method: Method,
#[ignore_heap_size_of = "Defined in hyper"]
/// Headers that will apply to the initial request only
pub headers: Headers,
#[ignore_heap_size_of = "Defined in hyper"]
/// Headers that will apply to the initial request and any redirects
pub preserved_headers: Headers,
pub data: Option<Vec<u8>>,
pub cors: Option<ResourceCORSData>,
pub pipeline_id: Option<PipelineId>,
}
impl LoadData {
pub fn new(url: Url, id: Option<PipelineId>) -> LoadData {
LoadData {
url: url,
method: Method::Get,
headers: Headers::new(),
preserved_headers: Headers::new(),
data: None,
cors: None,
pipeline_id: id,
}
}
}
/// A listener for asynchronous network events. Cancelling the underlying request is unsupported.
pub trait AsyncResponseListener {
/// The response headers for a request have been received.
fn headers_available(&self, metadata: Metadata);
/// A portion of the response body has been received. This data is unavailable after
/// this method returned, and must be stored accordingly.
fn data_available(&self, payload: Vec<u8>);
|
/// Data for passing between threads/processes to indicate a particular action to
/// take on a provided network listener.
#[derive(Deserialize, Serialize)]
pub enum ResponseAction {
/// Invoke headers_available
HeadersAvailable(Metadata),
/// Invoke data_available
DataAvailable(Vec<u8>),
/// Invoke response_complete
ResponseComplete(Result<(), String>)
}
impl ResponseAction {
/// Execute the default action on a provided listener.
pub fn process(self, listener: &AsyncResponseListener) {
match self {
ResponseAction::HeadersAvailable(m) => listener.headers_available(m),
ResponseAction::DataAvailable(d) => listener.data_available(d),
ResponseAction::ResponseComplete(r) => listener.response_complete(r),
}
}
}
/// A target for async networking events. Commonly used to dispatch a runnable event to another
/// thread storing the wrapped closure for later execution.
#[derive(Deserialize, Serialize)]
pub struct AsyncResponseTarget {
pub sender: IpcSender<ResponseAction>,
}
impl AsyncResponseTarget {
pub fn invoke_with_listener(&self, action: ResponseAction) {
self.sender.send(action).unwrap()
}
}
/// A wrapper for a network load that can either be channel or event-based.
#[derive(Deserialize, Serialize)]
pub enum LoadConsumer {
Channel(IpcSender<LoadResponse>),
Listener(AsyncResponseTarget),
}
/// Handle to a resource task
pub type ResourceTask = IpcSender<ControlMsg>;
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum IncludeSubdomains {
Included,
NotIncluded
}
#[derive(Deserialize, Serialize)]
pub enum ControlMsg {
/// Request the data associated with a particular URL
Load(LoadData, LoadConsumer),
/// Store a set of cookies for a given originating URL
SetCookiesForUrl(Url, String, CookieSource),
/// Retrieve the stored cookies for a given URL
GetCookiesForUrl(Url, IpcSender<Option<String>>, CookieSource),
/// Store a domain's STS information
SetHSTSEntryForHost(String, IncludeSubdomains, u64),
Exit
}
/// Initialized but unsent request. Encapsulates everything necessary to instruct
/// the resource task to make a new request. The `load` method *must* be called before
/// destruction or the task will panic.
pub struct PendingAsyncLoad {
resource_task: ResourceTask,
url: Url,
pipeline: Option<PipelineId>,
guard: PendingLoadGuard,
}
struct PendingLoadGuard {
loaded: bool,
}
impl PendingLoadGuard {
fn neuter(&mut self) {
self.loaded = true;
}
}
impl Drop for PendingLoadGuard {
fn drop(&mut self) {
if!thread::panicking() {
assert!(self.loaded)
}
}
}
impl PendingAsyncLoad {
pub fn new(resource_task: ResourceTask, url: Url, pipeline: Option<PipelineId>)
-> PendingAsyncLoad {
PendingAsyncLoad {
resource_task: resource_task,
url: url,
pipeline: pipeline,
guard: PendingLoadGuard { loaded: false, },
}
}
/// Initiate the network request associated with this pending load.
pub fn load(mut self) -> IpcReceiver<LoadResponse> {
self.guard.neuter();
let load_data = LoadData::new(self.url, self.pipeline);
let (sender, receiver) = ipc::channel().unwrap();
let consumer = LoadConsumer::Channel(sender);
self.resource_task.send(ControlMsg::Load(load_data, consumer)).unwrap();
receiver
}
/// Initiate the network request associated with this pending load, using the provided target.
pub fn load_async(mut self, listener: AsyncResponseTarget) {
self.guard.neuter();
let load_data = LoadData::new(self.url, self.pipeline);
let consumer = LoadConsumer::Listener(listener);
self.resource_task.send(ControlMsg::Load(load_data, consumer)).unwrap();
}
}
/// Message sent in response to `Load`. Contains metadata, and a port
/// for receiving the data.
///
/// Even if loading fails immediately, we send one of these and the
/// progress_port will provide the error.
#[derive(Serialize, Deserialize)]
pub struct LoadResponse {
/// Metadata, such as from HTTP headers.
pub metadata: Metadata,
/// Port for reading data.
pub progress_port: IpcReceiver<ProgressMsg>,
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct ResourceCORSData {
/// CORS Preflight flag
pub preflight: bool,
/// Origin of CORS Request
pub origin: Url,
}
/// Metadata about a loaded resource, such as is obtained from HTTP headers.
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct Metadata {
/// Final URL after redirects.
pub final_url: Url,
/// MIME type / subtype.
pub content_type: Option<(ContentType)>,
/// Character set.
pub charset: Option<String>,
#[ignore_heap_size_of = "Defined in hyper"]
/// Headers
pub headers: Option<Headers>,
/// HTTP Status
pub status: Option<RawStatus>,
}
impl Metadata {
/// Metadata with defaults for everything optional.
pub fn default(url: Url) -> Self {
Metadata {
final_url: url,
content_type: None,
charset: None,
headers: None,
// https://fetch.spec.whatwg.org/#concept-response-status-message
status: Some(RawStatus(200, "OK".into())),
}
}
/// Extract the parts of a Mime that we care about.
pub fn set_content_type(&mut self, content_type: Option<&Mime>) {
match content_type {
None => (),
Some(mime) => {
self.content_type = Some(ContentType(mime.clone()));
let &Mime(_, _, ref parameters) = mime;
for &(ref k, ref v) in parameters {
if &Attr::Charset == k {
self.charset = Some(v.to_string());
}
}
}
}
}
}
/// The creator of a given cookie
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum CookieSource {
/// An HTTP API
HTTP,
/// A non-HTTP API
NonHTTP,
}
/// Messages sent in response to a `Load` message
#[derive(PartialEq, Debug, Deserialize, Serialize)]
pub enum ProgressMsg {
/// Binary data - there may be multiple of these
Payload(Vec<u8>),
/// Indicates loading is complete, either successfully or not
Done(Result<(), String>)
}
/// Convenience function for synchronously loading a whole resource.
pub fn load_whole_resource(resource_task: &ResourceTask, url: Url)
-> Result<(Metadata, Vec<u8>), String> {
let (start_chan, start_port) = ipc::channel().unwrap();
resource_task.send(ControlMsg::Load(LoadData::new(url, None),
LoadConsumer::Channel(start_chan))).unwrap();
let response = start_port.recv().unwrap();
let mut buf = vec!();
loop {
match response.progress_port.recv().unwrap() {
ProgressMsg::Payload(data) => buf.push_all(&data),
ProgressMsg::Done(Ok(())) => return Ok((response.metadata, buf)),
ProgressMsg::Done(Err(e)) => return Err(e)
}
}
}
/// Load a URL asynchronously and iterate over chunks of bytes from the response.
pub fn load_bytes_iter(pending: PendingAsyncLoad) -> (Metadata, ProgressMsgPortIterator) {
let input_port = pending.load();
let response = input_port.recv().unwrap();
let iter = ProgressMsgPortIterator {
progress_port: response.progress_port
};
(response.metadata, iter)
}
/// Iterator that reads chunks of bytes from a ProgressMsg port
pub struct ProgressMsgPortIterator {
progress_port: IpcReceiver<ProgressMsg>,
}
impl Iterator for ProgressMsgPortIterator {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Vec<u8>> {
match self.progress_port.recv().unwrap() {
ProgressMsg::Payload(data) => Some(data),
ProgressMsg::Done(Ok(())) => None,
ProgressMsg::Done(Err(e)) => {
error!("error receiving bytes: {}", e);
None
}
}
}
}
|
/// The response is complete. If the provided status is an Err value, there is no guarantee
/// that the response body was completely read.
fn response_complete(&self, status: Result<(), String>);
}
|
random_line_split
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(custom_derive)]
#![feature(box_raw)]
#![feature(plugin)]
#![feature(slice_patterns)]
#![feature(step_by)]
#![feature(vec_push_all)]
#![feature(custom_attribute)]
#![plugin(serde_macros, plugins)]
#![plugin(regex_macros)]
extern crate euclid;
extern crate hyper;
extern crate ipc_channel;
#[macro_use]
extern crate log;
extern crate png;
extern crate regex;
extern crate serde;
extern crate stb_image;
extern crate url;
extern crate util;
extern crate msg;
use hyper::header::{ContentType, Headers};
use hyper::http::RawStatus;
use hyper::method::Method;
use hyper::mime::{Mime, Attr};
use ipc_channel::ipc::{self, IpcReceiver, IpcSender};
use msg::constellation_msg::{PipelineId};
use regex::Regex;
use serde::{Deserializer, Serializer};
use url::Url;
use util::mem::HeapSizeOf;
use std::thread;
pub mod hosts;
pub mod image_cache_task;
pub mod net_error_list;
pub mod storage_task;
pub static IPV4_REGEX: Regex = regex!(
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$");
pub static IPV6_REGEX: Regex = regex!(r"^([a-fA-F0-9]{0,4}[:]?){1,8}(/\d{1,3})?$");
/// Image handling.
///
/// It may be surprising that this goes in the network crate as opposed to the graphics crate.
/// However, image handling is generally very integrated with the network stack (especially where
/// caching is involved) and as a result it must live in here.
pub mod image {
pub mod base;
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct LoadData {
pub url: Url,
pub method: Method,
#[ignore_heap_size_of = "Defined in hyper"]
/// Headers that will apply to the initial request only
pub headers: Headers,
#[ignore_heap_size_of = "Defined in hyper"]
/// Headers that will apply to the initial request and any redirects
pub preserved_headers: Headers,
pub data: Option<Vec<u8>>,
pub cors: Option<ResourceCORSData>,
pub pipeline_id: Option<PipelineId>,
}
impl LoadData {
pub fn new(url: Url, id: Option<PipelineId>) -> LoadData {
LoadData {
url: url,
method: Method::Get,
headers: Headers::new(),
preserved_headers: Headers::new(),
data: None,
cors: None,
pipeline_id: id,
}
}
}
/// A listener for asynchronous network events. Cancelling the underlying request is unsupported.
pub trait AsyncResponseListener {
/// The response headers for a request have been received.
fn headers_available(&self, metadata: Metadata);
/// A portion of the response body has been received. This data is unavailable after
/// this method returned, and must be stored accordingly.
fn data_available(&self, payload: Vec<u8>);
/// The response is complete. If the provided status is an Err value, there is no guarantee
/// that the response body was completely read.
fn response_complete(&self, status: Result<(), String>);
}
/// Data for passing between threads/processes to indicate a particular action to
/// take on a provided network listener.
#[derive(Deserialize, Serialize)]
pub enum ResponseAction {
/// Invoke headers_available
HeadersAvailable(Metadata),
/// Invoke data_available
DataAvailable(Vec<u8>),
/// Invoke response_complete
ResponseComplete(Result<(), String>)
}
impl ResponseAction {
/// Execute the default action on a provided listener.
pub fn process(self, listener: &AsyncResponseListener) {
match self {
ResponseAction::HeadersAvailable(m) => listener.headers_available(m),
ResponseAction::DataAvailable(d) => listener.data_available(d),
ResponseAction::ResponseComplete(r) => listener.response_complete(r),
}
}
}
/// A target for async networking events. Commonly used to dispatch a runnable event to another
/// thread storing the wrapped closure for later execution.
#[derive(Deserialize, Serialize)]
pub struct AsyncResponseTarget {
pub sender: IpcSender<ResponseAction>,
}
impl AsyncResponseTarget {
pub fn invoke_with_listener(&self, action: ResponseAction) {
self.sender.send(action).unwrap()
}
}
/// A wrapper for a network load that can either be channel or event-based.
#[derive(Deserialize, Serialize)]
pub enum LoadConsumer {
Channel(IpcSender<LoadResponse>),
Listener(AsyncResponseTarget),
}
/// Handle to a resource task
pub type ResourceTask = IpcSender<ControlMsg>;
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum IncludeSubdomains {
Included,
NotIncluded
}
#[derive(Deserialize, Serialize)]
pub enum ControlMsg {
/// Request the data associated with a particular URL
Load(LoadData, LoadConsumer),
/// Store a set of cookies for a given originating URL
SetCookiesForUrl(Url, String, CookieSource),
/// Retrieve the stored cookies for a given URL
GetCookiesForUrl(Url, IpcSender<Option<String>>, CookieSource),
/// Store a domain's STS information
SetHSTSEntryForHost(String, IncludeSubdomains, u64),
Exit
}
/// Initialized but unsent request. Encapsulates everything necessary to instruct
/// the resource task to make a new request. The `load` method *must* be called before
/// destruction or the task will panic.
pub struct PendingAsyncLoad {
resource_task: ResourceTask,
url: Url,
pipeline: Option<PipelineId>,
guard: PendingLoadGuard,
}
struct PendingLoadGuard {
loaded: bool,
}
impl PendingLoadGuard {
fn neuter(&mut self) {
self.loaded = true;
}
}
impl Drop for PendingLoadGuard {
fn drop(&mut self) {
if!thread::panicking() {
assert!(self.loaded)
}
}
}
impl PendingAsyncLoad {
pub fn new(resource_task: ResourceTask, url: Url, pipeline: Option<PipelineId>)
-> PendingAsyncLoad {
PendingAsyncLoad {
resource_task: resource_task,
url: url,
pipeline: pipeline,
guard: PendingLoadGuard { loaded: false, },
}
}
/// Initiate the network request associated with this pending load.
pub fn load(mut self) -> IpcReceiver<LoadResponse> {
self.guard.neuter();
let load_data = LoadData::new(self.url, self.pipeline);
let (sender, receiver) = ipc::channel().unwrap();
let consumer = LoadConsumer::Channel(sender);
self.resource_task.send(ControlMsg::Load(load_data, consumer)).unwrap();
receiver
}
/// Initiate the network request associated with this pending load, using the provided target.
pub fn load_async(mut self, listener: AsyncResponseTarget) {
self.guard.neuter();
let load_data = LoadData::new(self.url, self.pipeline);
let consumer = LoadConsumer::Listener(listener);
self.resource_task.send(ControlMsg::Load(load_data, consumer)).unwrap();
}
}
/// Message sent in response to `Load`. Contains metadata, and a port
/// for receiving the data.
///
/// Even if loading fails immediately, we send one of these and the
/// progress_port will provide the error.
#[derive(Serialize, Deserialize)]
pub struct LoadResponse {
/// Metadata, such as from HTTP headers.
pub metadata: Metadata,
/// Port for reading data.
pub progress_port: IpcReceiver<ProgressMsg>,
}
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct ResourceCORSData {
/// CORS Preflight flag
pub preflight: bool,
/// Origin of CORS Request
pub origin: Url,
}
/// Metadata about a loaded resource, such as is obtained from HTTP headers.
#[derive(Clone, Deserialize, Serialize, HeapSizeOf)]
pub struct Metadata {
/// Final URL after redirects.
pub final_url: Url,
/// MIME type / subtype.
pub content_type: Option<(ContentType)>,
/// Character set.
pub charset: Option<String>,
#[ignore_heap_size_of = "Defined in hyper"]
/// Headers
pub headers: Option<Headers>,
/// HTTP Status
pub status: Option<RawStatus>,
}
impl Metadata {
/// Metadata with defaults for everything optional.
pub fn default(url: Url) -> Self {
Metadata {
final_url: url,
content_type: None,
charset: None,
headers: None,
// https://fetch.spec.whatwg.org/#concept-response-status-message
status: Some(RawStatus(200, "OK".into())),
}
}
/// Extract the parts of a Mime that we care about.
pub fn set_content_type(&mut self, content_type: Option<&Mime>) {
match content_type {
None => (),
Some(mime) => {
self.content_type = Some(ContentType(mime.clone()));
let &Mime(_, _, ref parameters) = mime;
for &(ref k, ref v) in parameters {
if &Attr::Charset == k {
self.charset = Some(v.to_string());
}
}
}
}
}
}
/// The creator of a given cookie
#[derive(PartialEq, Copy, Clone, Deserialize, Serialize)]
pub enum CookieSource {
/// An HTTP API
HTTP,
/// A non-HTTP API
NonHTTP,
}
/// Messages sent in response to a `Load` message
#[derive(PartialEq, Debug, Deserialize, Serialize)]
pub enum ProgressMsg {
/// Binary data - there may be multiple of these
Payload(Vec<u8>),
/// Indicates loading is complete, either successfully or not
Done(Result<(), String>)
}
/// Convenience function for synchronously loading a whole resource.
pub fn load_whole_resource(resource_task: &ResourceTask, url: Url)
-> Result<(Metadata, Vec<u8>), String> {
let (start_chan, start_port) = ipc::channel().unwrap();
resource_task.send(ControlMsg::Load(LoadData::new(url, None),
LoadConsumer::Channel(start_chan))).unwrap();
let response = start_port.recv().unwrap();
let mut buf = vec!();
loop {
match response.progress_port.recv().unwrap() {
ProgressMsg::Payload(data) => buf.push_all(&data),
ProgressMsg::Done(Ok(())) => return Ok((response.metadata, buf)),
ProgressMsg::Done(Err(e)) => return Err(e)
}
}
}
/// Load a URL asynchronously and iterate over chunks of bytes from the response.
pub fn load_bytes_iter(pending: PendingAsyncLoad) -> (Metadata, ProgressMsgPortIterator) {
let input_port = pending.load();
let response = input_port.recv().unwrap();
let iter = ProgressMsgPortIterator {
progress_port: response.progress_port
};
(response.metadata, iter)
}
/// Iterator that reads chunks of bytes from a ProgressMsg port
pub struct ProgressMsgPortIterator {
progress_port: IpcReceiver<ProgressMsg>,
}
impl Iterator for ProgressMsgPortIterator {
type Item = Vec<u8>;
fn
|
(&mut self) -> Option<Vec<u8>> {
match self.progress_port.recv().unwrap() {
ProgressMsg::Payload(data) => Some(data),
ProgressMsg::Done(Ok(())) => None,
ProgressMsg::Done(Err(e)) => {
error!("error receiving bytes: {}", e);
None
}
}
}
}
|
next
|
identifier_name
|
link.rs
|
#![crate_name = "link"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::hard_link;
use std::io::Write;
use std::path::Path;
static NAME: &'static str = "link";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.len()!= 2 {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] FILE1 FILE2
Create a link named FILE2 to FILE1.", NAME, VERSION);
println!("{}", opts.usage(&msg));
if matches.free.len()!= 2 {
return 1;
}
return 0;
}
let old = Path::new(&matches.free[0]);
let new = Path::new(&matches.free[1]);
match hard_link(old, new) {
Ok(_) => 0,
Err(err) => {
show_error!("{}", err);
1
}
|
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
}
|
}
}
|
random_line_split
|
link.rs
|
#![crate_name = "link"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::hard_link;
use std::io::Write;
use std::path::Path;
static NAME: &'static str = "link";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version")
|
if matches.opt_present("help") || matches.free.len()!= 2 {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] FILE1 FILE2
Create a link named FILE2 to FILE1.", NAME, VERSION);
println!("{}", opts.usage(&msg));
if matches.free.len()!= 2 {
return 1;
}
return 0;
}
let old = Path::new(&matches.free[0]);
let new = Path::new(&matches.free[1]);
match hard_link(old, new) {
Ok(_) => 0,
Err(err) => {
show_error!("{}", err);
1
}
}
}
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
}
|
{
println!("{} {}", NAME, VERSION);
return 0;
}
|
conditional_block
|
link.rs
|
#![crate_name = "link"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::hard_link;
use std::io::Write;
use std::path::Path;
static NAME: &'static str = "link";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn
|
(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.len()!= 2 {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] FILE1 FILE2
Create a link named FILE2 to FILE1.", NAME, VERSION);
println!("{}", opts.usage(&msg));
if matches.free.len()!= 2 {
return 1;
}
return 0;
}
let old = Path::new(&matches.free[0]);
let new = Path::new(&matches.free[1]);
match hard_link(old, new) {
Ok(_) => 0,
Err(err) => {
show_error!("{}", err);
1
}
}
}
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
}
|
uumain
|
identifier_name
|
link.rs
|
#![crate_name = "link"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::hard_link;
use std::io::Write;
use std::path::Path;
static NAME: &'static str = "link";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32
|
{0} [OPTIONS] FILE1 FILE2
Create a link named FILE2 to FILE1.", NAME, VERSION);
println!("{}", opts.usage(&msg));
if matches.free.len()!= 2 {
return 1;
}
return 0;
}
let old = Path::new(&matches.free[0]);
let new = Path::new(&matches.free[1]);
match hard_link(old, new) {
Ok(_) => 0,
Err(err) => {
show_error!("{}", err);
1
}
}
}
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
}
|
{
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.len() != 2 {
let msg = format!("{0} {1}
Usage:
|
identifier_body
|
issue-14959.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unboxed_closures)]
use std::ops::Fn;
trait Response {}
trait Request {}
trait Ingot<R, S> {
fn enter(&mut self, _: &mut R, _: &mut S, a: &mut Alloy) -> Status;
}
#[allow(dead_code)]
struct HelloWorld;
struct SendFile<'a>;
struct Alloy;
enum Status {
Continue
}
|
}
}
impl<'a, 'b> Fn<(&'b mut Response+'b,),()> for SendFile<'a> {
extern "rust-call" fn call(&self, (_res,): (&'b mut Response+'b,)) {}
}
impl<Rq: Request, Rs: Response> Ingot<Rq, Rs> for HelloWorld {
fn enter(&mut self, _req: &mut Rq, res: &mut Rs, alloy: &mut Alloy) -> Status {
let send_file = alloy.find::<SendFile>().unwrap();
send_file(res);
Status::Continue
}
}
fn main() {}
|
impl Alloy {
fn find<T>(&self) -> Option<T> {
None
|
random_line_split
|
issue-14959.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unboxed_closures)]
use std::ops::Fn;
trait Response {}
trait Request {}
trait Ingot<R, S> {
fn enter(&mut self, _: &mut R, _: &mut S, a: &mut Alloy) -> Status;
}
#[allow(dead_code)]
struct HelloWorld;
struct SendFile<'a>;
struct Alloy;
enum Status {
Continue
}
impl Alloy {
fn find<T>(&self) -> Option<T> {
None
}
}
impl<'a, 'b> Fn<(&'b mut Response+'b,),()> for SendFile<'a> {
extern "rust-call" fn call(&self, (_res,): (&'b mut Response+'b,)) {}
}
impl<Rq: Request, Rs: Response> Ingot<Rq, Rs> for HelloWorld {
fn enter(&mut self, _req: &mut Rq, res: &mut Rs, alloy: &mut Alloy) -> Status
|
}
fn main() {}
|
{
let send_file = alloy.find::<SendFile>().unwrap();
send_file(res);
Status::Continue
}
|
identifier_body
|
issue-14959.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unboxed_closures)]
use std::ops::Fn;
trait Response {}
trait Request {}
trait Ingot<R, S> {
fn enter(&mut self, _: &mut R, _: &mut S, a: &mut Alloy) -> Status;
}
#[allow(dead_code)]
struct HelloWorld;
struct SendFile<'a>;
struct Alloy;
enum Status {
Continue
}
impl Alloy {
fn find<T>(&self) -> Option<T> {
None
}
}
impl<'a, 'b> Fn<(&'b mut Response+'b,),()> for SendFile<'a> {
extern "rust-call" fn call(&self, (_res,): (&'b mut Response+'b,)) {}
}
impl<Rq: Request, Rs: Response> Ingot<Rq, Rs> for HelloWorld {
fn enter(&mut self, _req: &mut Rq, res: &mut Rs, alloy: &mut Alloy) -> Status {
let send_file = alloy.find::<SendFile>().unwrap();
send_file(res);
Status::Continue
}
}
fn
|
() {}
|
main
|
identifier_name
|
net.rs
|
use super::abi;
use crate::{
cmp,
ffi::CStr,
io::{self, ErrorKind, IoSlice, IoSliceMut},
mem,
net::{Shutdown, SocketAddr},
ptr, str,
sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr},
sys_common::{AsInner, FromInner, IntoInner},
time::Duration,
};
use self::netc::{sockaddr, socklen_t, MSG_PEEK};
use libc::{c_int, c_void, size_t};
pub mod netc {
pub use super::super::abi::sockets::*;
}
pub type wrlen_t = size_t;
const READ_LIMIT: usize = libc::ssize_t::MAX as usize;
const fn max_iov() -> usize {
// Judging by the source code, it's unlimited, but specify a lower
// value just in case.
1024
}
/// A file descriptor.
#[rustc_layout_scalar_valid_range_start(0)]
// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
// 32-bit c_int. Below is -2, in two's complement, but that only works out
// because c_int is 32 bits.
#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
struct FileDesc {
fd: c_int,
}
impl FileDesc {
#[inline]
fn new(fd: c_int) -> FileDesc {
assert_ne!(fd, -1i32);
// Safety: we just asserted that the value is in the valid range and
// isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
unsafe { FileDesc { fd } }
}
#[inline]
fn raw(&self) -> c_int {
self.fd
}
/// Extracts the actual file descriptor without closing it.
#[inline]
fn into_raw(self) -> c_int {
let fd = self.fd;
mem::forget(self);
fd
}
fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT))
})?;
Ok(ret as usize)
}
fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::readv(
self.fd,
bufs.as_ptr() as *const netc::iovec,
cmp::min(bufs.len(), max_iov()) as c_int,
)
})?;
Ok(ret as usize)
}
#[inline]
fn is_read_vectored(&self) -> bool {
true
}
fn write(&self, buf: &[u8]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT))
})?;
Ok(ret as usize)
}
fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::writev(
self.fd,
bufs.as_ptr() as *const netc::iovec,
cmp::min(bufs.len(), max_iov()) as c_int,
)
})?;
Ok(ret as usize)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
fn duplicate(&self) -> io::Result<FileDesc> {
super::unsupported()
}
}
impl AsInner<c_int> for FileDesc {
fn as_inner(&self) -> &c_int {
&self.fd
}
}
impl Drop for FileDesc {
fn drop(&mut self) {
unsafe { netc::close(self.fd) };
}
}
#[doc(hidden)]
pub trait IsMinusOne {
fn is_minus_one(&self) -> bool;
}
macro_rules! impl_is_minus_one {
($($t:ident)*) => ($(impl IsMinusOne for $t {
fn is_minus_one(&self) -> bool {
*self == -1
}
})*)
}
impl_is_minus_one! { i8 i16 i32 i64 isize }
pub fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> {
if t.is_minus_one() { Err(last_error()) } else { Ok(t) }
}
/// A variant of `cvt` for `getaddrinfo` which return 0 for a success.
pub fn cvt_gai(err: c_int) -> io::Result<()>
|
/// Just to provide the same interface as sys/unix/net.rs
pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
where
T: IsMinusOne,
F: FnMut() -> T,
{
cvt(f())
}
/// Returns the last error from the network subsystem.
fn last_error() -> io::Error {
io::Error::from_raw_os_error(unsafe { netc::SOLID_NET_GetLastError() })
}
pub(super) fn error_name(er: abi::ER) -> Option<&'static str> {
unsafe { CStr::from_ptr(netc::strerror(er)) }.to_str().ok()
}
pub(super) fn decode_error_kind(er: abi::ER) -> ErrorKind {
let errno = netc::SOLID_NET_ERR_BASE - er;
match errno as libc::c_int {
libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
libc::ECONNRESET => ErrorKind::ConnectionReset,
libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
libc::EPIPE => ErrorKind::BrokenPipe,
libc::ENOTCONN => ErrorKind::NotConnected,
libc::ECONNABORTED => ErrorKind::ConnectionAborted,
libc::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable,
libc::EADDRINUSE => ErrorKind::AddrInUse,
libc::ENOENT => ErrorKind::NotFound,
libc::EINTR => ErrorKind::Interrupted,
libc::EINVAL => ErrorKind::InvalidInput,
libc::ETIMEDOUT => ErrorKind::TimedOut,
libc::EEXIST => ErrorKind::AlreadyExists,
libc::ENOSYS => ErrorKind::Unsupported,
libc::ENOMEM => ErrorKind::OutOfMemory,
libc::EAGAIN => ErrorKind::WouldBlock,
_ => ErrorKind::Uncategorized,
}
}
pub fn init() {}
pub struct Socket(FileDesc);
impl Socket {
pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
let fam = match *addr {
SocketAddr::V4(..) => netc::AF_INET,
SocketAddr::V6(..) => netc::AF_INET6,
};
Socket::new_raw(fam, ty)
}
pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> {
unsafe {
let fd = cvt(netc::socket(fam, ty, 0))?;
let fd = FileDesc::new(fd);
let socket = Socket(fd);
Ok(socket)
}
}
pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
self.set_nonblocking(true)?;
let r = unsafe {
let (addrp, len) = addr.into_inner();
cvt(netc::connect(self.0.raw(), addrp, len))
};
self.set_nonblocking(false)?;
match r {
Ok(_) => return Ok(()),
// there's no ErrorKind for EINPROGRESS
Err(ref e) if e.raw_os_error() == Some(netc::EINPROGRESS) => {}
Err(e) => return Err(e),
}
if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 {
return Err(io::Error::new_const(
io::ErrorKind::InvalidInput,
&"cannot set a 0 duration timeout",
));
}
let mut timeout =
netc::timeval { tv_sec: timeout.as_secs() as _, tv_usec: timeout.subsec_micros() as _ };
if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
timeout.tv_usec = 1;
}
let fds = netc::fd_set { num_fds: 1, fds: [self.0.raw()] };
let mut writefds = fds;
let mut errorfds = fds;
let n = unsafe {
cvt(netc::select(
self.0.raw() + 1,
ptr::null_mut(),
&mut writefds,
&mut errorfds,
&mut timeout,
))?
};
match n {
0 => Err(io::Error::new_const(io::ErrorKind::TimedOut, &"connection timed out")),
_ => {
let can_write = writefds.num_fds!= 0;
if!can_write {
if let Some(e) = self.take_error()? {
return Err(e);
}
}
Ok(())
}
}
}
pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> {
let fd = cvt_r(|| unsafe { netc::accept(self.0.raw(), storage, len) })?;
let fd = FileDesc::new(fd);
Ok(Socket(fd))
}
pub fn duplicate(&self) -> io::Result<Socket> {
self.0.duplicate().map(Socket)
}
fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::recv(self.0.raw(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags)
})?;
Ok(ret as usize)
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.recv_with_flags(buf, 0)
}
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
self.recv_with_flags(buf, MSG_PEEK)
}
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.0.read_vectored(bufs)
}
#[inline]
pub fn is_read_vectored(&self) -> bool {
self.0.is_read_vectored()
}
fn recv_from_with_flags(
&self,
buf: &mut [u8],
flags: c_int,
) -> io::Result<(usize, SocketAddr)> {
let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
let n = cvt(unsafe {
netc::recvfrom(
self.0.raw(),
buf.as_mut_ptr() as *mut c_void,
buf.len(),
flags,
&mut storage as *mut _ as *mut _,
&mut addrlen,
)
})?;
Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
}
pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from_with_flags(buf, 0)
}
pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from_with_flags(buf, MSG_PEEK)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.0.write_vectored(bufs)
}
#[inline]
pub fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
pub fn set_timeout(&self, dur: Option<Duration>, kind: c_int) -> io::Result<()> {
let timeout = match dur {
Some(dur) => {
if dur.as_secs() == 0 && dur.subsec_nanos() == 0 {
return Err(io::Error::new_const(
io::ErrorKind::InvalidInput,
&"cannot set a 0 duration timeout",
));
}
let secs = if dur.as_secs() > netc::c_long::MAX as u64 {
netc::c_long::MAX
} else {
dur.as_secs() as netc::c_long
};
let mut timeout = netc::timeval { tv_sec: secs, tv_usec: dur.subsec_micros() as _ };
if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
timeout.tv_usec = 1;
}
timeout
}
None => netc::timeval { tv_sec: 0, tv_usec: 0 },
};
setsockopt(self, netc::SOL_SOCKET, kind, timeout)
}
pub fn timeout(&self, kind: c_int) -> io::Result<Option<Duration>> {
let raw: netc::timeval = getsockopt(self, netc::SOL_SOCKET, kind)?;
if raw.tv_sec == 0 && raw.tv_usec == 0 {
Ok(None)
} else {
let sec = raw.tv_sec as u64;
let nsec = (raw.tv_usec as u32) * 1000;
Ok(Some(Duration::new(sec, nsec)))
}
}
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
let how = match how {
Shutdown::Write => netc::SHUT_WR,
Shutdown::Read => netc::SHUT_RD,
Shutdown::Both => netc::SHUT_RDWR,
};
cvt(unsafe { netc::shutdown(self.0.raw(), how) })?;
Ok(())
}
pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
let linger = netc::linger {
l_onoff: linger.is_some() as netc::c_int,
l_linger: linger.unwrap_or_default().as_secs() as netc::c_int,
};
setsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER, linger)
}
pub fn linger(&self) -> io::Result<Option<Duration>> {
let val: netc::linger = getsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER)?;
Ok((val.l_onoff!= 0).then(|| Duration::from_secs(val.l_linger as u64)))
}
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
setsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY, nodelay as c_int)
}
pub fn nodelay(&self) -> io::Result<bool> {
let raw: c_int = getsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY)?;
Ok(raw!= 0)
}
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
let mut nonblocking = nonblocking as c_int;
cvt(unsafe {
netc::ioctl(*self.as_inner(), netc::FIONBIO, (&mut nonblocking) as *mut c_int as _)
})
.map(drop)
}
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
let raw: c_int = getsockopt(self, netc::SOL_SOCKET, netc::SO_ERROR)?;
if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) }
}
// This method is used by sys_common code to abstract over targets.
pub fn as_raw(&self) -> c_int {
*self.as_inner()
}
}
impl AsInner<c_int> for Socket {
fn as_inner(&self) -> &c_int {
self.0.as_inner()
}
}
impl FromInner<c_int> for Socket {
fn from_inner(fd: c_int) -> Socket {
Socket(FileDesc::new(fd))
}
}
impl IntoInner<c_int> for Socket {
fn into_inner(self) -> c_int {
self.0.into_raw()
}
}
|
{
if err == 0 {
Ok(())
} else {
let msg: &dyn crate::fmt::Display = match err {
netc::EAI_NONAME => &"name or service not known",
netc::EAI_SERVICE => &"service not supported",
netc::EAI_FAIL => &"non-recoverable failure in name resolution",
netc::EAI_MEMORY => &"memory allocation failure",
netc::EAI_FAMILY => &"family not supported",
_ => &err,
};
Err(io::Error::new(
io::ErrorKind::Uncategorized,
&format!("failed to lookup address information: {}", msg)[..],
))
}
}
|
identifier_body
|
net.rs
|
use super::abi;
use crate::{
cmp,
ffi::CStr,
io::{self, ErrorKind, IoSlice, IoSliceMut},
mem,
net::{Shutdown, SocketAddr},
ptr, str,
sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr},
sys_common::{AsInner, FromInner, IntoInner},
time::Duration,
};
use self::netc::{sockaddr, socklen_t, MSG_PEEK};
use libc::{c_int, c_void, size_t};
pub mod netc {
pub use super::super::abi::sockets::*;
}
pub type wrlen_t = size_t;
const READ_LIMIT: usize = libc::ssize_t::MAX as usize;
const fn max_iov() -> usize {
// Judging by the source code, it's unlimited, but specify a lower
// value just in case.
1024
}
/// A file descriptor.
#[rustc_layout_scalar_valid_range_start(0)]
// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
// 32-bit c_int. Below is -2, in two's complement, but that only works out
// because c_int is 32 bits.
#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
struct FileDesc {
fd: c_int,
}
impl FileDesc {
#[inline]
fn new(fd: c_int) -> FileDesc {
assert_ne!(fd, -1i32);
// Safety: we just asserted that the value is in the valid range and
// isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
unsafe { FileDesc { fd } }
}
#[inline]
fn raw(&self) -> c_int {
self.fd
}
/// Extracts the actual file descriptor without closing it.
#[inline]
fn into_raw(self) -> c_int {
let fd = self.fd;
mem::forget(self);
fd
}
fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT))
})?;
Ok(ret as usize)
}
fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::readv(
self.fd,
bufs.as_ptr() as *const netc::iovec,
cmp::min(bufs.len(), max_iov()) as c_int,
)
})?;
Ok(ret as usize)
}
#[inline]
fn is_read_vectored(&self) -> bool {
true
}
fn write(&self, buf: &[u8]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT))
})?;
Ok(ret as usize)
}
fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::writev(
self.fd,
bufs.as_ptr() as *const netc::iovec,
cmp::min(bufs.len(), max_iov()) as c_int,
)
})?;
Ok(ret as usize)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
fn duplicate(&self) -> io::Result<FileDesc> {
super::unsupported()
}
}
impl AsInner<c_int> for FileDesc {
fn as_inner(&self) -> &c_int {
&self.fd
}
}
impl Drop for FileDesc {
fn drop(&mut self) {
unsafe { netc::close(self.fd) };
}
}
#[doc(hidden)]
pub trait IsMinusOne {
fn is_minus_one(&self) -> bool;
}
macro_rules! impl_is_minus_one {
($($t:ident)*) => ($(impl IsMinusOne for $t {
fn is_minus_one(&self) -> bool {
*self == -1
}
})*)
}
impl_is_minus_one! { i8 i16 i32 i64 isize }
pub fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> {
if t.is_minus_one() { Err(last_error()) } else { Ok(t) }
}
/// A variant of `cvt` for `getaddrinfo` which return 0 for a success.
pub fn cvt_gai(err: c_int) -> io::Result<()> {
if err == 0 {
Ok(())
} else {
let msg: &dyn crate::fmt::Display = match err {
netc::EAI_NONAME => &"name or service not known",
netc::EAI_SERVICE => &"service not supported",
netc::EAI_FAIL => &"non-recoverable failure in name resolution",
netc::EAI_MEMORY => &"memory allocation failure",
netc::EAI_FAMILY => &"family not supported",
_ => &err,
};
Err(io::Error::new(
io::ErrorKind::Uncategorized,
&format!("failed to lookup address information: {}", msg)[..],
))
}
}
|
F: FnMut() -> T,
{
cvt(f())
}
/// Returns the last error from the network subsystem.
fn last_error() -> io::Error {
io::Error::from_raw_os_error(unsafe { netc::SOLID_NET_GetLastError() })
}
pub(super) fn error_name(er: abi::ER) -> Option<&'static str> {
unsafe { CStr::from_ptr(netc::strerror(er)) }.to_str().ok()
}
pub(super) fn decode_error_kind(er: abi::ER) -> ErrorKind {
let errno = netc::SOLID_NET_ERR_BASE - er;
match errno as libc::c_int {
libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
libc::ECONNRESET => ErrorKind::ConnectionReset,
libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
libc::EPIPE => ErrorKind::BrokenPipe,
libc::ENOTCONN => ErrorKind::NotConnected,
libc::ECONNABORTED => ErrorKind::ConnectionAborted,
libc::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable,
libc::EADDRINUSE => ErrorKind::AddrInUse,
libc::ENOENT => ErrorKind::NotFound,
libc::EINTR => ErrorKind::Interrupted,
libc::EINVAL => ErrorKind::InvalidInput,
libc::ETIMEDOUT => ErrorKind::TimedOut,
libc::EEXIST => ErrorKind::AlreadyExists,
libc::ENOSYS => ErrorKind::Unsupported,
libc::ENOMEM => ErrorKind::OutOfMemory,
libc::EAGAIN => ErrorKind::WouldBlock,
_ => ErrorKind::Uncategorized,
}
}
pub fn init() {}
pub struct Socket(FileDesc);
impl Socket {
pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
let fam = match *addr {
SocketAddr::V4(..) => netc::AF_INET,
SocketAddr::V6(..) => netc::AF_INET6,
};
Socket::new_raw(fam, ty)
}
pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> {
unsafe {
let fd = cvt(netc::socket(fam, ty, 0))?;
let fd = FileDesc::new(fd);
let socket = Socket(fd);
Ok(socket)
}
}
pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
self.set_nonblocking(true)?;
let r = unsafe {
let (addrp, len) = addr.into_inner();
cvt(netc::connect(self.0.raw(), addrp, len))
};
self.set_nonblocking(false)?;
match r {
Ok(_) => return Ok(()),
// there's no ErrorKind for EINPROGRESS
Err(ref e) if e.raw_os_error() == Some(netc::EINPROGRESS) => {}
Err(e) => return Err(e),
}
if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 {
return Err(io::Error::new_const(
io::ErrorKind::InvalidInput,
&"cannot set a 0 duration timeout",
));
}
let mut timeout =
netc::timeval { tv_sec: timeout.as_secs() as _, tv_usec: timeout.subsec_micros() as _ };
if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
timeout.tv_usec = 1;
}
let fds = netc::fd_set { num_fds: 1, fds: [self.0.raw()] };
let mut writefds = fds;
let mut errorfds = fds;
let n = unsafe {
cvt(netc::select(
self.0.raw() + 1,
ptr::null_mut(),
&mut writefds,
&mut errorfds,
&mut timeout,
))?
};
match n {
0 => Err(io::Error::new_const(io::ErrorKind::TimedOut, &"connection timed out")),
_ => {
let can_write = writefds.num_fds!= 0;
if!can_write {
if let Some(e) = self.take_error()? {
return Err(e);
}
}
Ok(())
}
}
}
pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> {
let fd = cvt_r(|| unsafe { netc::accept(self.0.raw(), storage, len) })?;
let fd = FileDesc::new(fd);
Ok(Socket(fd))
}
pub fn duplicate(&self) -> io::Result<Socket> {
self.0.duplicate().map(Socket)
}
fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::recv(self.0.raw(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags)
})?;
Ok(ret as usize)
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.recv_with_flags(buf, 0)
}
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
self.recv_with_flags(buf, MSG_PEEK)
}
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.0.read_vectored(bufs)
}
#[inline]
pub fn is_read_vectored(&self) -> bool {
self.0.is_read_vectored()
}
fn recv_from_with_flags(
&self,
buf: &mut [u8],
flags: c_int,
) -> io::Result<(usize, SocketAddr)> {
let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
let n = cvt(unsafe {
netc::recvfrom(
self.0.raw(),
buf.as_mut_ptr() as *mut c_void,
buf.len(),
flags,
&mut storage as *mut _ as *mut _,
&mut addrlen,
)
})?;
Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
}
pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from_with_flags(buf, 0)
}
pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from_with_flags(buf, MSG_PEEK)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.0.write_vectored(bufs)
}
#[inline]
pub fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
pub fn set_timeout(&self, dur: Option<Duration>, kind: c_int) -> io::Result<()> {
let timeout = match dur {
Some(dur) => {
if dur.as_secs() == 0 && dur.subsec_nanos() == 0 {
return Err(io::Error::new_const(
io::ErrorKind::InvalidInput,
&"cannot set a 0 duration timeout",
));
}
let secs = if dur.as_secs() > netc::c_long::MAX as u64 {
netc::c_long::MAX
} else {
dur.as_secs() as netc::c_long
};
let mut timeout = netc::timeval { tv_sec: secs, tv_usec: dur.subsec_micros() as _ };
if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
timeout.tv_usec = 1;
}
timeout
}
None => netc::timeval { tv_sec: 0, tv_usec: 0 },
};
setsockopt(self, netc::SOL_SOCKET, kind, timeout)
}
pub fn timeout(&self, kind: c_int) -> io::Result<Option<Duration>> {
let raw: netc::timeval = getsockopt(self, netc::SOL_SOCKET, kind)?;
if raw.tv_sec == 0 && raw.tv_usec == 0 {
Ok(None)
} else {
let sec = raw.tv_sec as u64;
let nsec = (raw.tv_usec as u32) * 1000;
Ok(Some(Duration::new(sec, nsec)))
}
}
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
let how = match how {
Shutdown::Write => netc::SHUT_WR,
Shutdown::Read => netc::SHUT_RD,
Shutdown::Both => netc::SHUT_RDWR,
};
cvt(unsafe { netc::shutdown(self.0.raw(), how) })?;
Ok(())
}
pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
let linger = netc::linger {
l_onoff: linger.is_some() as netc::c_int,
l_linger: linger.unwrap_or_default().as_secs() as netc::c_int,
};
setsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER, linger)
}
pub fn linger(&self) -> io::Result<Option<Duration>> {
let val: netc::linger = getsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER)?;
Ok((val.l_onoff!= 0).then(|| Duration::from_secs(val.l_linger as u64)))
}
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
setsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY, nodelay as c_int)
}
pub fn nodelay(&self) -> io::Result<bool> {
let raw: c_int = getsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY)?;
Ok(raw!= 0)
}
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
let mut nonblocking = nonblocking as c_int;
cvt(unsafe {
netc::ioctl(*self.as_inner(), netc::FIONBIO, (&mut nonblocking) as *mut c_int as _)
})
.map(drop)
}
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
let raw: c_int = getsockopt(self, netc::SOL_SOCKET, netc::SO_ERROR)?;
if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) }
}
// This method is used by sys_common code to abstract over targets.
pub fn as_raw(&self) -> c_int {
*self.as_inner()
}
}
impl AsInner<c_int> for Socket {
fn as_inner(&self) -> &c_int {
self.0.as_inner()
}
}
impl FromInner<c_int> for Socket {
fn from_inner(fd: c_int) -> Socket {
Socket(FileDesc::new(fd))
}
}
impl IntoInner<c_int> for Socket {
fn into_inner(self) -> c_int {
self.0.into_raw()
}
}
|
/// Just to provide the same interface as sys/unix/net.rs
pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
where
T: IsMinusOne,
|
random_line_split
|
net.rs
|
use super::abi;
use crate::{
cmp,
ffi::CStr,
io::{self, ErrorKind, IoSlice, IoSliceMut},
mem,
net::{Shutdown, SocketAddr},
ptr, str,
sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr},
sys_common::{AsInner, FromInner, IntoInner},
time::Duration,
};
use self::netc::{sockaddr, socklen_t, MSG_PEEK};
use libc::{c_int, c_void, size_t};
pub mod netc {
pub use super::super::abi::sockets::*;
}
pub type wrlen_t = size_t;
const READ_LIMIT: usize = libc::ssize_t::MAX as usize;
const fn max_iov() -> usize {
// Judging by the source code, it's unlimited, but specify a lower
// value just in case.
1024
}
/// A file descriptor.
#[rustc_layout_scalar_valid_range_start(0)]
// libstd/os/raw/mod.rs assures me that every libstd-supported platform has a
// 32-bit c_int. Below is -2, in two's complement, but that only works out
// because c_int is 32 bits.
#[rustc_layout_scalar_valid_range_end(0xFF_FF_FF_FE)]
struct FileDesc {
fd: c_int,
}
impl FileDesc {
#[inline]
fn new(fd: c_int) -> FileDesc {
assert_ne!(fd, -1i32);
// Safety: we just asserted that the value is in the valid range and
// isn't `-1` (the only value bigger than `0xFF_FF_FF_FE` unsigned)
unsafe { FileDesc { fd } }
}
#[inline]
fn raw(&self) -> c_int {
self.fd
}
/// Extracts the actual file descriptor without closing it.
#[inline]
fn into_raw(self) -> c_int {
let fd = self.fd;
mem::forget(self);
fd
}
fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::read(self.fd, buf.as_mut_ptr() as *mut c_void, cmp::min(buf.len(), READ_LIMIT))
})?;
Ok(ret as usize)
}
fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::readv(
self.fd,
bufs.as_ptr() as *const netc::iovec,
cmp::min(bufs.len(), max_iov()) as c_int,
)
})?;
Ok(ret as usize)
}
#[inline]
fn is_read_vectored(&self) -> bool {
true
}
fn write(&self, buf: &[u8]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::write(self.fd, buf.as_ptr() as *const c_void, cmp::min(buf.len(), READ_LIMIT))
})?;
Ok(ret as usize)
}
fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::writev(
self.fd,
bufs.as_ptr() as *const netc::iovec,
cmp::min(bufs.len(), max_iov()) as c_int,
)
})?;
Ok(ret as usize)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
fn duplicate(&self) -> io::Result<FileDesc> {
super::unsupported()
}
}
impl AsInner<c_int> for FileDesc {
fn as_inner(&self) -> &c_int {
&self.fd
}
}
impl Drop for FileDesc {
fn drop(&mut self) {
unsafe { netc::close(self.fd) };
}
}
#[doc(hidden)]
pub trait IsMinusOne {
fn is_minus_one(&self) -> bool;
}
macro_rules! impl_is_minus_one {
($($t:ident)*) => ($(impl IsMinusOne for $t {
fn is_minus_one(&self) -> bool {
*self == -1
}
})*)
}
impl_is_minus_one! { i8 i16 i32 i64 isize }
pub fn cvt<T: IsMinusOne>(t: T) -> io::Result<T> {
if t.is_minus_one() { Err(last_error()) } else { Ok(t) }
}
/// A variant of `cvt` for `getaddrinfo` which return 0 for a success.
pub fn cvt_gai(err: c_int) -> io::Result<()> {
if err == 0 {
Ok(())
} else {
let msg: &dyn crate::fmt::Display = match err {
netc::EAI_NONAME => &"name or service not known",
netc::EAI_SERVICE => &"service not supported",
netc::EAI_FAIL => &"non-recoverable failure in name resolution",
netc::EAI_MEMORY => &"memory allocation failure",
netc::EAI_FAMILY => &"family not supported",
_ => &err,
};
Err(io::Error::new(
io::ErrorKind::Uncategorized,
&format!("failed to lookup address information: {}", msg)[..],
))
}
}
/// Just to provide the same interface as sys/unix/net.rs
pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
where
T: IsMinusOne,
F: FnMut() -> T,
{
cvt(f())
}
/// Returns the last error from the network subsystem.
fn last_error() -> io::Error {
io::Error::from_raw_os_error(unsafe { netc::SOLID_NET_GetLastError() })
}
pub(super) fn error_name(er: abi::ER) -> Option<&'static str> {
unsafe { CStr::from_ptr(netc::strerror(er)) }.to_str().ok()
}
pub(super) fn decode_error_kind(er: abi::ER) -> ErrorKind {
let errno = netc::SOLID_NET_ERR_BASE - er;
match errno as libc::c_int {
libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
libc::ECONNRESET => ErrorKind::ConnectionReset,
libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
libc::EPIPE => ErrorKind::BrokenPipe,
libc::ENOTCONN => ErrorKind::NotConnected,
libc::ECONNABORTED => ErrorKind::ConnectionAborted,
libc::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable,
libc::EADDRINUSE => ErrorKind::AddrInUse,
libc::ENOENT => ErrorKind::NotFound,
libc::EINTR => ErrorKind::Interrupted,
libc::EINVAL => ErrorKind::InvalidInput,
libc::ETIMEDOUT => ErrorKind::TimedOut,
libc::EEXIST => ErrorKind::AlreadyExists,
libc::ENOSYS => ErrorKind::Unsupported,
libc::ENOMEM => ErrorKind::OutOfMemory,
libc::EAGAIN => ErrorKind::WouldBlock,
_ => ErrorKind::Uncategorized,
}
}
pub fn init() {}
pub struct Socket(FileDesc);
impl Socket {
pub fn new(addr: &SocketAddr, ty: c_int) -> io::Result<Socket> {
let fam = match *addr {
SocketAddr::V4(..) => netc::AF_INET,
SocketAddr::V6(..) => netc::AF_INET6,
};
Socket::new_raw(fam, ty)
}
pub fn new_raw(fam: c_int, ty: c_int) -> io::Result<Socket> {
unsafe {
let fd = cvt(netc::socket(fam, ty, 0))?;
let fd = FileDesc::new(fd);
let socket = Socket(fd);
Ok(socket)
}
}
pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> {
self.set_nonblocking(true)?;
let r = unsafe {
let (addrp, len) = addr.into_inner();
cvt(netc::connect(self.0.raw(), addrp, len))
};
self.set_nonblocking(false)?;
match r {
Ok(_) => return Ok(()),
// there's no ErrorKind for EINPROGRESS
Err(ref e) if e.raw_os_error() == Some(netc::EINPROGRESS) => {}
Err(e) => return Err(e),
}
if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 {
return Err(io::Error::new_const(
io::ErrorKind::InvalidInput,
&"cannot set a 0 duration timeout",
));
}
let mut timeout =
netc::timeval { tv_sec: timeout.as_secs() as _, tv_usec: timeout.subsec_micros() as _ };
if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
timeout.tv_usec = 1;
}
let fds = netc::fd_set { num_fds: 1, fds: [self.0.raw()] };
let mut writefds = fds;
let mut errorfds = fds;
let n = unsafe {
cvt(netc::select(
self.0.raw() + 1,
ptr::null_mut(),
&mut writefds,
&mut errorfds,
&mut timeout,
))?
};
match n {
0 => Err(io::Error::new_const(io::ErrorKind::TimedOut, &"connection timed out")),
_ => {
let can_write = writefds.num_fds!= 0;
if!can_write {
if let Some(e) = self.take_error()? {
return Err(e);
}
}
Ok(())
}
}
}
pub fn accept(&self, storage: *mut sockaddr, len: *mut socklen_t) -> io::Result<Socket> {
let fd = cvt_r(|| unsafe { netc::accept(self.0.raw(), storage, len) })?;
let fd = FileDesc::new(fd);
Ok(Socket(fd))
}
pub fn duplicate(&self) -> io::Result<Socket> {
self.0.duplicate().map(Socket)
}
fn recv_with_flags(&self, buf: &mut [u8], flags: c_int) -> io::Result<usize> {
let ret = cvt(unsafe {
netc::recv(self.0.raw(), buf.as_mut_ptr() as *mut c_void, buf.len(), flags)
})?;
Ok(ret as usize)
}
pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.recv_with_flags(buf, 0)
}
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
self.recv_with_flags(buf, MSG_PEEK)
}
pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.0.read_vectored(bufs)
}
#[inline]
pub fn is_read_vectored(&self) -> bool {
self.0.is_read_vectored()
}
fn recv_from_with_flags(
&self,
buf: &mut [u8],
flags: c_int,
) -> io::Result<(usize, SocketAddr)> {
let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() };
let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t;
let n = cvt(unsafe {
netc::recvfrom(
self.0.raw(),
buf.as_mut_ptr() as *mut c_void,
buf.len(),
flags,
&mut storage as *mut _ as *mut _,
&mut addrlen,
)
})?;
Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?))
}
pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from_with_flags(buf, 0)
}
pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from_with_flags(buf, MSG_PEEK)
}
pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.0.write_vectored(bufs)
}
#[inline]
pub fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
pub fn set_timeout(&self, dur: Option<Duration>, kind: c_int) -> io::Result<()> {
let timeout = match dur {
Some(dur) => {
if dur.as_secs() == 0 && dur.subsec_nanos() == 0 {
return Err(io::Error::new_const(
io::ErrorKind::InvalidInput,
&"cannot set a 0 duration timeout",
));
}
let secs = if dur.as_secs() > netc::c_long::MAX as u64 {
netc::c_long::MAX
} else {
dur.as_secs() as netc::c_long
};
let mut timeout = netc::timeval { tv_sec: secs, tv_usec: dur.subsec_micros() as _ };
if timeout.tv_sec == 0 && timeout.tv_usec == 0 {
timeout.tv_usec = 1;
}
timeout
}
None => netc::timeval { tv_sec: 0, tv_usec: 0 },
};
setsockopt(self, netc::SOL_SOCKET, kind, timeout)
}
pub fn
|
(&self, kind: c_int) -> io::Result<Option<Duration>> {
let raw: netc::timeval = getsockopt(self, netc::SOL_SOCKET, kind)?;
if raw.tv_sec == 0 && raw.tv_usec == 0 {
Ok(None)
} else {
let sec = raw.tv_sec as u64;
let nsec = (raw.tv_usec as u32) * 1000;
Ok(Some(Duration::new(sec, nsec)))
}
}
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
let how = match how {
Shutdown::Write => netc::SHUT_WR,
Shutdown::Read => netc::SHUT_RD,
Shutdown::Both => netc::SHUT_RDWR,
};
cvt(unsafe { netc::shutdown(self.0.raw(), how) })?;
Ok(())
}
pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> {
let linger = netc::linger {
l_onoff: linger.is_some() as netc::c_int,
l_linger: linger.unwrap_or_default().as_secs() as netc::c_int,
};
setsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER, linger)
}
pub fn linger(&self) -> io::Result<Option<Duration>> {
let val: netc::linger = getsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER)?;
Ok((val.l_onoff!= 0).then(|| Duration::from_secs(val.l_linger as u64)))
}
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
setsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY, nodelay as c_int)
}
pub fn nodelay(&self) -> io::Result<bool> {
let raw: c_int = getsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY)?;
Ok(raw!= 0)
}
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
let mut nonblocking = nonblocking as c_int;
cvt(unsafe {
netc::ioctl(*self.as_inner(), netc::FIONBIO, (&mut nonblocking) as *mut c_int as _)
})
.map(drop)
}
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
let raw: c_int = getsockopt(self, netc::SOL_SOCKET, netc::SO_ERROR)?;
if raw == 0 { Ok(None) } else { Ok(Some(io::Error::from_raw_os_error(raw as i32))) }
}
// This method is used by sys_common code to abstract over targets.
pub fn as_raw(&self) -> c_int {
*self.as_inner()
}
}
impl AsInner<c_int> for Socket {
fn as_inner(&self) -> &c_int {
self.0.as_inner()
}
}
impl FromInner<c_int> for Socket {
fn from_inner(fd: c_int) -> Socket {
Socket(FileDesc::new(fd))
}
}
impl IntoInner<c_int> for Socket {
fn into_inner(self) -> c_int {
self.0.into_raw()
}
}
|
timeout
|
identifier_name
|
color.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Specified color values.
use super::AllowQuirks;
#[cfg(feature = "gecko")]
use crate::gecko_bindings::structs::nscolor;
use crate::parser::{Parse, ParserContext};
#[cfg(feature = "gecko")]
use crate::properties::longhands::system_colors::SystemColor;
use crate::values::computed::{Color as ComputedColor, Context, ToComputedValue};
use crate::values::generics::color::{Color as GenericColor, ColorOrAuto as GenericColorOrAuto};
use crate::values::specified::calc::CalcNode;
use cssparser::{AngleOrNumber, Color as CSSParserColor, Parser, Token, RGBA};
use cssparser::{BasicParseErrorKind, NumberOrPercentage, ParseErrorKind};
use itoa;
use std::fmt::{self, Write};
use std::io::Write as IoWrite;
use style_traits::{CssType, CssWriter, KeywordsCollectFn, ParseError, StyleParseErrorKind};
use style_traits::{SpecifiedValueInfo, ToCss, ValueParseErrorKind};
/// Specified color value
#[derive(Clone, Debug, MallocSizeOf, PartialEq)]
pub enum Color {
/// The 'currentColor' keyword
CurrentColor,
/// A specific RGBA color
Numeric {
/// Parsed RGBA color
parsed: RGBA,
/// Authored representation
authored: Option<Box<str>>,
},
/// A complex color value from computed value
Complex(ComputedColor),
/// A system color
#[cfg(feature = "gecko")]
System(SystemColor),
/// A special color keyword value used in Gecko
#[cfg(feature = "gecko")]
Special(gecko::SpecialColorKeyword),
/// Quirksmode-only rule for inheriting color from the body
#[cfg(feature = "gecko")]
InheritFromBodyQuirk,
}
#[cfg(feature = "gecko")]
mod gecko {
#[derive(Clone, Copy, Debug, Eq, Hash, MallocSizeOf, Parse, PartialEq, ToCss)]
pub enum SpecialColorKeyword {
MozDefaultColor,
MozDefaultBackgroundColor,
MozHyperlinktext,
MozActivehyperlinktext,
MozVisitedhyperlinktext,
}
}
impl From<RGBA> for Color {
fn from(value: RGBA) -> Self {
Color::rgba(value)
}
}
struct ColorComponentParser<'a, 'b: 'a>(&'a ParserContext<'b>);
impl<'a, 'b: 'a, 'i: 'a> ::cssparser::ColorComponentParser<'i> for ColorComponentParser<'a, 'b> {
type Error = StyleParseErrorKind<'i>;
fn parse_angle_or_number<'t>(
&self,
input: &mut Parser<'i, 't>,
) -> Result<AngleOrNumber, ParseError<'i>> {
use crate::values::specified::Angle;
let location = input.current_source_location();
let token = input.next()?.clone();
match token {
Token::Dimension {
value, ref unit,..
} => {
let angle = Angle::parse_dimension(value, unit, /* from_calc = */ false);
let degrees = match angle {
Ok(angle) => angle.degrees(),
Err(()) => return Err(location.new_unexpected_token_error(token.clone())),
};
Ok(AngleOrNumber::Angle { degrees })
},
Token::Number { value,.. } => Ok(AngleOrNumber::Number { value }),
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {
input.parse_nested_block(|i| CalcNode::parse_angle_or_number(self.0, i))
},
t => return Err(location.new_unexpected_token_error(t)),
}
}
fn parse_percentage<'t>(&self, input: &mut Parser<'i, 't>) -> Result<f32, ParseError<'i>> {
use crate::values::specified::Percentage;
Ok(Percentage::parse(self.0, input)?.get())
}
fn parse_number<'t>(&self, input: &mut Parser<'i, 't>) -> Result<f32, ParseError<'i>> {
use crate::values::specified::Number;
Ok(Number::parse(self.0, input)?.get())
}
fn parse_number_or_percentage<'t>(
&self,
input: &mut Parser<'i, 't>,
) -> Result<NumberOrPercentage, ParseError<'i>> {
let location = input.current_source_location();
match input.next()?.clone() {
Token::Number { value,.. } => Ok(NumberOrPercentage::Number { value }),
Token::Percentage { unit_value,.. } => {
Ok(NumberOrPercentage::Percentage { unit_value })
},
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {
input.parse_nested_block(|i| CalcNode::parse_number_or_percentage(self.0, i))
},
t => return Err(location.new_unexpected_token_error(t)),
}
}
}
impl Parse for Color {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
// Currently we only store authored value for color keywords,
// because all browsers serialize those values as keywords for
// specified value.
let start = input.state();
let authored = input.expect_ident_cloned().ok();
input.reset(&start);
let compontent_parser = ColorComponentParser(&*context);
match input.try(|i| CSSParserColor::parse_with(&compontent_parser, i)) {
Ok(value) => Ok(match value {
CSSParserColor::CurrentColor => Color::CurrentColor,
CSSParserColor::RGBA(rgba) => Color::Numeric {
parsed: rgba,
authored: authored.map(|s| s.to_ascii_lowercase().into_boxed_str()),
},
}),
Err(e) => {
#[cfg(feature = "gecko")]
{
if let Ok(ident) = input.expect_ident() {
if let Ok(system) = SystemColor::from_ident(ident) {
return Ok(Color::System(system));
}
if let Ok(c) = gecko::SpecialColorKeyword::from_ident(ident) {
return Ok(Color::Special(c));
}
}
}
match e.kind {
ParseErrorKind::Basic(BasicParseErrorKind::UnexpectedToken(t)) => {
Err(e.location.new_custom_error(StyleParseErrorKind::ValueError(
ValueParseErrorKind::InvalidColor(t),
)))
},
_ => Err(e),
}
},
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Color::CurrentColor => CSSParserColor::CurrentColor.to_css(dest),
Color::Numeric {
authored: Some(ref authored),
..
} => dest.write_str(authored),
Color::Numeric {
parsed: ref rgba,..
} => rgba.to_css(dest),
Color::Complex(_) => Ok(()),
#[cfg(feature = "gecko")]
Color::System(system) => system.to_css(dest),
#[cfg(feature = "gecko")]
Color::Special(special) => special.to_css(dest),
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => Ok(()),
}
}
}
/// A wrapper of cssparser::Color::parse_hash.
///
/// That function should never return CurrentColor, so it makes no sense to
/// handle a cssparser::Color here. This should really be done in cssparser
/// directly rather than here.
fn parse_hash_color(value: &[u8]) -> Result<RGBA, ()> {
CSSParserColor::parse_hash(value).map(|color| match color {
CSSParserColor::RGBA(rgba) => rgba,
CSSParserColor::CurrentColor => unreachable!("parse_hash should never return currentcolor"),
})
}
impl Color {
/// Returns currentcolor value.
#[inline]
pub fn currentcolor() -> Color {
Color::CurrentColor
}
/// Returns transparent value.
#[inline]
pub fn transparent() -> Color {
// We should probably set authored to "transparent", but maybe it doesn't matter.
Color::rgba(RGBA::transparent())
}
/// Returns a numeric RGBA color value.
#[inline]
pub fn rgba(rgba: RGBA) -> Self {
Color::Numeric {
parsed: rgba,
authored: None,
}
}
/// Parse a color, with quirks.
///
/// <https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk>
pub fn parse_quirky<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks,
) -> Result<Self, ParseError<'i>> {
input.try(|i| Self::parse(context, i)).or_else(|e| {
if!allow_quirks.allowed(context.quirks_mode) {
return Err(e);
}
Color::parse_quirky_color(input)
.map(Color::rgba)
.map_err(|_| e)
})
}
/// Parse a <quirky-color> value.
///
/// <https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk>
fn parse_quirky_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<RGBA, ParseError<'i>> {
let location = input.current_source_location();
let (value, unit) = match *input.next()? {
Token::Number {
int_value: Some(integer),
..
} => (integer, None),
Token::Dimension {
int_value: Some(integer),
ref unit,
..
} => (integer, Some(unit)),
Token::Ident(ref ident) => {
if ident.len()!= 3 && ident.len()!= 6 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
return parse_hash_color(ident.as_bytes()).map_err(|()| {
location.new_custom_error(StyleParseErrorKind::UnspecifiedError)
});
},
ref t => {
return Err(location.new_unexpected_token_error(t.clone()));
},
};
if value < 0 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
let length = if value <= 9 {
1
} else if value <= 99 {
2
} else if value <= 999 {
3
} else if value <= 9999 {
4
} else if value <= 99999 {
5
} else if value <= 999999 {
6
} else {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
};
let total = length + unit.as_ref().map_or(0, |d| d.len());
if total > 6 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
let mut serialization = [b'0'; 6];
let space_padding = 6 - total;
let mut written = space_padding;
written += itoa::write(&mut serialization[written..], value).unwrap();
if let Some(unit) = unit {
written += (&mut serialization[written..])
.write(unit.as_bytes())
.unwrap();
}
debug_assert_eq!(written, 6);
parse_hash_color(&serialization)
.map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError))
}
/// Returns true if the color is completely transparent, and false
/// otherwise.
pub fn is_transparent(&self) -> bool {
match *self {
Color::Numeric { ref parsed,.. } => parsed.alpha == 0,
_ => false,
}
}
}
#[cfg(feature = "gecko")]
fn convert_nscolor_to_computedcolor(color: nscolor) -> ComputedColor {
use crate::gecko::values::convert_nscolor_to_rgba;
ComputedColor::rgba(convert_nscolor_to_rgba(color))
}
impl Color {
/// Converts this Color into a ComputedColor.
///
/// If `context` is `None`, and the specified color requires data from
/// the context to resolve, then `None` is returned.
pub fn to_computed_color(&self, _context: Option<&Context>) -> Option<ComputedColor> {
match *self {
Color::CurrentColor => Some(ComputedColor::currentcolor()),
Color::Numeric { ref parsed,.. } => Some(ComputedColor::rgba(*parsed)),
Color::Complex(ref complex) => Some(*complex),
#[cfg(feature = "gecko")]
Color::System(system) => _context
.map(|context| convert_nscolor_to_computedcolor(system.to_computed_value(context))),
#[cfg(feature = "gecko")]
Color::Special(special) => {
use self::gecko::SpecialColorKeyword as Keyword;
_context.map(|context| {
let prefs = context.device().pref_sheet_prefs();
convert_nscolor_to_computedcolor(match special {
Keyword::MozDefaultColor => prefs.mDefaultColor,
Keyword::MozDefaultBackgroundColor => prefs.mDefaultBackgroundColor,
Keyword::MozHyperlinktext => prefs.mLinkColor,
Keyword::MozActivehyperlinktext => prefs.mActiveLinkColor,
Keyword::MozVisitedhyperlinktext => prefs.mVisitedLinkColor,
})
})
},
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => {
|
_context.map(|context| ComputedColor::rgba(context.device().body_text_color()))
},
}
}
}
impl ToComputedValue for Color {
type ComputedValue = ComputedColor;
fn to_computed_value(&self, context: &Context) -> ComputedColor {
let result = self.to_computed_color(Some(context)).unwrap();
if!result.is_numeric() {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut().set_uncacheable();
}
}
}
result
}
fn from_computed_value(computed: &ComputedColor) -> Self {
match *computed {
GenericColor::Numeric(color) => Color::rgba(color),
GenericColor::Foreground => Color::currentcolor(),
GenericColor::Complex(..) => Color::Complex(*computed),
}
}
}
/// Specified color value, but resolved to just RGBA for computed value
/// with value from color property at the same context.
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToCss)]
pub struct RGBAColor(pub Color);
impl Parse for RGBAColor {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse(context, input).map(RGBAColor)
}
}
impl ToComputedValue for RGBAColor {
type ComputedValue = RGBA;
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0
.to_computed_value(context)
.to_rgba(context.style().get_color().clone_color())
}
fn from_computed_value(computed: &RGBA) -> Self {
RGBAColor(Color::rgba(*computed))
}
}
impl From<Color> for RGBAColor {
fn from(color: Color) -> RGBAColor {
RGBAColor(color)
}
}
impl SpecifiedValueInfo for Color {
const SUPPORTED_TYPES: u8 = CssType::COLOR;
fn collect_completion_keywords(f: KeywordsCollectFn) {
// We are not going to insert all the color names here. Caller and
// devtools should take care of them. XXX Actually, transparent
// should probably be handled that way as well.
// XXX `currentColor` should really be `currentcolor`. But let's
// keep it consistent with the old system for now.
f(&["rgb", "rgba", "hsl", "hsla", "currentColor", "transparent"]);
}
}
/// Specified value for the "color" property, which resolves the `currentcolor`
/// keyword to the parent color instead of self's color.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, SpecifiedValueInfo, ToCss)]
pub struct ColorPropertyValue(pub Color);
impl ToComputedValue for ColorPropertyValue {
type ComputedValue = RGBA;
#[inline]
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0
.to_computed_value(context)
.to_rgba(context.builder.get_parent_color().clone_color())
}
#[inline]
fn from_computed_value(computed: &RGBA) -> Self {
ColorPropertyValue(Color::rgba(*computed).into())
}
}
impl Parse for ColorPropertyValue {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse_quirky(context, input, AllowQuirks::Yes).map(ColorPropertyValue)
}
}
/// auto | <color>
pub type ColorOrAuto = GenericColorOrAuto<Color>;
|
random_line_split
|
|
color.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Specified color values.
use super::AllowQuirks;
#[cfg(feature = "gecko")]
use crate::gecko_bindings::structs::nscolor;
use crate::parser::{Parse, ParserContext};
#[cfg(feature = "gecko")]
use crate::properties::longhands::system_colors::SystemColor;
use crate::values::computed::{Color as ComputedColor, Context, ToComputedValue};
use crate::values::generics::color::{Color as GenericColor, ColorOrAuto as GenericColorOrAuto};
use crate::values::specified::calc::CalcNode;
use cssparser::{AngleOrNumber, Color as CSSParserColor, Parser, Token, RGBA};
use cssparser::{BasicParseErrorKind, NumberOrPercentage, ParseErrorKind};
use itoa;
use std::fmt::{self, Write};
use std::io::Write as IoWrite;
use style_traits::{CssType, CssWriter, KeywordsCollectFn, ParseError, StyleParseErrorKind};
use style_traits::{SpecifiedValueInfo, ToCss, ValueParseErrorKind};
/// Specified color value
#[derive(Clone, Debug, MallocSizeOf, PartialEq)]
pub enum Color {
/// The 'currentColor' keyword
CurrentColor,
/// A specific RGBA color
Numeric {
/// Parsed RGBA color
parsed: RGBA,
/// Authored representation
authored: Option<Box<str>>,
},
/// A complex color value from computed value
Complex(ComputedColor),
/// A system color
#[cfg(feature = "gecko")]
System(SystemColor),
/// A special color keyword value used in Gecko
#[cfg(feature = "gecko")]
Special(gecko::SpecialColorKeyword),
/// Quirksmode-only rule for inheriting color from the body
#[cfg(feature = "gecko")]
InheritFromBodyQuirk,
}
#[cfg(feature = "gecko")]
mod gecko {
#[derive(Clone, Copy, Debug, Eq, Hash, MallocSizeOf, Parse, PartialEq, ToCss)]
pub enum SpecialColorKeyword {
MozDefaultColor,
MozDefaultBackgroundColor,
MozHyperlinktext,
MozActivehyperlinktext,
MozVisitedhyperlinktext,
}
}
impl From<RGBA> for Color {
fn from(value: RGBA) -> Self {
Color::rgba(value)
}
}
struct ColorComponentParser<'a, 'b: 'a>(&'a ParserContext<'b>);
impl<'a, 'b: 'a, 'i: 'a> ::cssparser::ColorComponentParser<'i> for ColorComponentParser<'a, 'b> {
type Error = StyleParseErrorKind<'i>;
fn parse_angle_or_number<'t>(
&self,
input: &mut Parser<'i, 't>,
) -> Result<AngleOrNumber, ParseError<'i>> {
use crate::values::specified::Angle;
let location = input.current_source_location();
let token = input.next()?.clone();
match token {
Token::Dimension {
value, ref unit,..
} => {
let angle = Angle::parse_dimension(value, unit, /* from_calc = */ false);
let degrees = match angle {
Ok(angle) => angle.degrees(),
Err(()) => return Err(location.new_unexpected_token_error(token.clone())),
};
Ok(AngleOrNumber::Angle { degrees })
},
Token::Number { value,.. } => Ok(AngleOrNumber::Number { value }),
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {
input.parse_nested_block(|i| CalcNode::parse_angle_or_number(self.0, i))
},
t => return Err(location.new_unexpected_token_error(t)),
}
}
fn parse_percentage<'t>(&self, input: &mut Parser<'i, 't>) -> Result<f32, ParseError<'i>> {
use crate::values::specified::Percentage;
Ok(Percentage::parse(self.0, input)?.get())
}
fn parse_number<'t>(&self, input: &mut Parser<'i, 't>) -> Result<f32, ParseError<'i>> {
use crate::values::specified::Number;
Ok(Number::parse(self.0, input)?.get())
}
fn parse_number_or_percentage<'t>(
&self,
input: &mut Parser<'i, 't>,
) -> Result<NumberOrPercentage, ParseError<'i>> {
let location = input.current_source_location();
match input.next()?.clone() {
Token::Number { value,.. } => Ok(NumberOrPercentage::Number { value }),
Token::Percentage { unit_value,.. } => {
Ok(NumberOrPercentage::Percentage { unit_value })
},
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {
input.parse_nested_block(|i| CalcNode::parse_number_or_percentage(self.0, i))
},
t => return Err(location.new_unexpected_token_error(t)),
}
}
}
impl Parse for Color {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
// Currently we only store authored value for color keywords,
// because all browsers serialize those values as keywords for
// specified value.
let start = input.state();
let authored = input.expect_ident_cloned().ok();
input.reset(&start);
let compontent_parser = ColorComponentParser(&*context);
match input.try(|i| CSSParserColor::parse_with(&compontent_parser, i)) {
Ok(value) => Ok(match value {
CSSParserColor::CurrentColor => Color::CurrentColor,
CSSParserColor::RGBA(rgba) => Color::Numeric {
parsed: rgba,
authored: authored.map(|s| s.to_ascii_lowercase().into_boxed_str()),
},
}),
Err(e) => {
#[cfg(feature = "gecko")]
{
if let Ok(ident) = input.expect_ident() {
if let Ok(system) = SystemColor::from_ident(ident) {
return Ok(Color::System(system));
}
if let Ok(c) = gecko::SpecialColorKeyword::from_ident(ident) {
return Ok(Color::Special(c));
}
}
}
match e.kind {
ParseErrorKind::Basic(BasicParseErrorKind::UnexpectedToken(t)) => {
Err(e.location.new_custom_error(StyleParseErrorKind::ValueError(
ValueParseErrorKind::InvalidColor(t),
)))
},
_ => Err(e),
}
},
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
|
}
/// A wrapper of cssparser::Color::parse_hash.
///
/// That function should never return CurrentColor, so it makes no sense to
/// handle a cssparser::Color here. This should really be done in cssparser
/// directly rather than here.
fn parse_hash_color(value: &[u8]) -> Result<RGBA, ()> {
CSSParserColor::parse_hash(value).map(|color| match color {
CSSParserColor::RGBA(rgba) => rgba,
CSSParserColor::CurrentColor => unreachable!("parse_hash should never return currentcolor"),
})
}
impl Color {
/// Returns currentcolor value.
#[inline]
pub fn currentcolor() -> Color {
Color::CurrentColor
}
/// Returns transparent value.
#[inline]
pub fn transparent() -> Color {
// We should probably set authored to "transparent", but maybe it doesn't matter.
Color::rgba(RGBA::transparent())
}
/// Returns a numeric RGBA color value.
#[inline]
pub fn rgba(rgba: RGBA) -> Self {
Color::Numeric {
parsed: rgba,
authored: None,
}
}
/// Parse a color, with quirks.
///
/// <https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk>
pub fn parse_quirky<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks,
) -> Result<Self, ParseError<'i>> {
input.try(|i| Self::parse(context, i)).or_else(|e| {
if!allow_quirks.allowed(context.quirks_mode) {
return Err(e);
}
Color::parse_quirky_color(input)
.map(Color::rgba)
.map_err(|_| e)
})
}
/// Parse a <quirky-color> value.
///
/// <https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk>
fn parse_quirky_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<RGBA, ParseError<'i>> {
let location = input.current_source_location();
let (value, unit) = match *input.next()? {
Token::Number {
int_value: Some(integer),
..
} => (integer, None),
Token::Dimension {
int_value: Some(integer),
ref unit,
..
} => (integer, Some(unit)),
Token::Ident(ref ident) => {
if ident.len()!= 3 && ident.len()!= 6 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
return parse_hash_color(ident.as_bytes()).map_err(|()| {
location.new_custom_error(StyleParseErrorKind::UnspecifiedError)
});
},
ref t => {
return Err(location.new_unexpected_token_error(t.clone()));
},
};
if value < 0 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
let length = if value <= 9 {
1
} else if value <= 99 {
2
} else if value <= 999 {
3
} else if value <= 9999 {
4
} else if value <= 99999 {
5
} else if value <= 999999 {
6
} else {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
};
let total = length + unit.as_ref().map_or(0, |d| d.len());
if total > 6 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
let mut serialization = [b'0'; 6];
let space_padding = 6 - total;
let mut written = space_padding;
written += itoa::write(&mut serialization[written..], value).unwrap();
if let Some(unit) = unit {
written += (&mut serialization[written..])
.write(unit.as_bytes())
.unwrap();
}
debug_assert_eq!(written, 6);
parse_hash_color(&serialization)
.map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError))
}
/// Returns true if the color is completely transparent, and false
/// otherwise.
pub fn is_transparent(&self) -> bool {
match *self {
Color::Numeric { ref parsed,.. } => parsed.alpha == 0,
_ => false,
}
}
}
#[cfg(feature = "gecko")]
fn convert_nscolor_to_computedcolor(color: nscolor) -> ComputedColor {
use crate::gecko::values::convert_nscolor_to_rgba;
ComputedColor::rgba(convert_nscolor_to_rgba(color))
}
impl Color {
/// Converts this Color into a ComputedColor.
///
/// If `context` is `None`, and the specified color requires data from
/// the context to resolve, then `None` is returned.
pub fn to_computed_color(&self, _context: Option<&Context>) -> Option<ComputedColor> {
match *self {
Color::CurrentColor => Some(ComputedColor::currentcolor()),
Color::Numeric { ref parsed,.. } => Some(ComputedColor::rgba(*parsed)),
Color::Complex(ref complex) => Some(*complex),
#[cfg(feature = "gecko")]
Color::System(system) => _context
.map(|context| convert_nscolor_to_computedcolor(system.to_computed_value(context))),
#[cfg(feature = "gecko")]
Color::Special(special) => {
use self::gecko::SpecialColorKeyword as Keyword;
_context.map(|context| {
let prefs = context.device().pref_sheet_prefs();
convert_nscolor_to_computedcolor(match special {
Keyword::MozDefaultColor => prefs.mDefaultColor,
Keyword::MozDefaultBackgroundColor => prefs.mDefaultBackgroundColor,
Keyword::MozHyperlinktext => prefs.mLinkColor,
Keyword::MozActivehyperlinktext => prefs.mActiveLinkColor,
Keyword::MozVisitedhyperlinktext => prefs.mVisitedLinkColor,
})
})
},
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => {
_context.map(|context| ComputedColor::rgba(context.device().body_text_color()))
},
}
}
}
impl ToComputedValue for Color {
type ComputedValue = ComputedColor;
fn to_computed_value(&self, context: &Context) -> ComputedColor {
let result = self.to_computed_color(Some(context)).unwrap();
if!result.is_numeric() {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut().set_uncacheable();
}
}
}
result
}
fn from_computed_value(computed: &ComputedColor) -> Self {
match *computed {
GenericColor::Numeric(color) => Color::rgba(color),
GenericColor::Foreground => Color::currentcolor(),
GenericColor::Complex(..) => Color::Complex(*computed),
}
}
}
/// Specified color value, but resolved to just RGBA for computed value
/// with value from color property at the same context.
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToCss)]
pub struct RGBAColor(pub Color);
impl Parse for RGBAColor {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse(context, input).map(RGBAColor)
}
}
impl ToComputedValue for RGBAColor {
type ComputedValue = RGBA;
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0
.to_computed_value(context)
.to_rgba(context.style().get_color().clone_color())
}
fn from_computed_value(computed: &RGBA) -> Self {
RGBAColor(Color::rgba(*computed))
}
}
impl From<Color> for RGBAColor {
fn from(color: Color) -> RGBAColor {
RGBAColor(color)
}
}
impl SpecifiedValueInfo for Color {
const SUPPORTED_TYPES: u8 = CssType::COLOR;
fn collect_completion_keywords(f: KeywordsCollectFn) {
// We are not going to insert all the color names here. Caller and
// devtools should take care of them. XXX Actually, transparent
// should probably be handled that way as well.
// XXX `currentColor` should really be `currentcolor`. But let's
// keep it consistent with the old system for now.
f(&["rgb", "rgba", "hsl", "hsla", "currentColor", "transparent"]);
}
}
/// Specified value for the "color" property, which resolves the `currentcolor`
/// keyword to the parent color instead of self's color.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, SpecifiedValueInfo, ToCss)]
pub struct ColorPropertyValue(pub Color);
impl ToComputedValue for ColorPropertyValue {
type ComputedValue = RGBA;
#[inline]
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0
.to_computed_value(context)
.to_rgba(context.builder.get_parent_color().clone_color())
}
#[inline]
fn from_computed_value(computed: &RGBA) -> Self {
ColorPropertyValue(Color::rgba(*computed).into())
}
}
impl Parse for ColorPropertyValue {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse_quirky(context, input, AllowQuirks::Yes).map(ColorPropertyValue)
}
}
/// auto | <color>
pub type ColorOrAuto = GenericColorOrAuto<Color>;
|
{
match *self {
Color::CurrentColor => CSSParserColor::CurrentColor.to_css(dest),
Color::Numeric {
authored: Some(ref authored),
..
} => dest.write_str(authored),
Color::Numeric {
parsed: ref rgba, ..
} => rgba.to_css(dest),
Color::Complex(_) => Ok(()),
#[cfg(feature = "gecko")]
Color::System(system) => system.to_css(dest),
#[cfg(feature = "gecko")]
Color::Special(special) => special.to_css(dest),
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => Ok(()),
}
}
|
identifier_body
|
color.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Specified color values.
use super::AllowQuirks;
#[cfg(feature = "gecko")]
use crate::gecko_bindings::structs::nscolor;
use crate::parser::{Parse, ParserContext};
#[cfg(feature = "gecko")]
use crate::properties::longhands::system_colors::SystemColor;
use crate::values::computed::{Color as ComputedColor, Context, ToComputedValue};
use crate::values::generics::color::{Color as GenericColor, ColorOrAuto as GenericColorOrAuto};
use crate::values::specified::calc::CalcNode;
use cssparser::{AngleOrNumber, Color as CSSParserColor, Parser, Token, RGBA};
use cssparser::{BasicParseErrorKind, NumberOrPercentage, ParseErrorKind};
use itoa;
use std::fmt::{self, Write};
use std::io::Write as IoWrite;
use style_traits::{CssType, CssWriter, KeywordsCollectFn, ParseError, StyleParseErrorKind};
use style_traits::{SpecifiedValueInfo, ToCss, ValueParseErrorKind};
/// Specified color value
#[derive(Clone, Debug, MallocSizeOf, PartialEq)]
pub enum Color {
/// The 'currentColor' keyword
CurrentColor,
/// A specific RGBA color
Numeric {
/// Parsed RGBA color
parsed: RGBA,
/// Authored representation
authored: Option<Box<str>>,
},
/// A complex color value from computed value
Complex(ComputedColor),
/// A system color
#[cfg(feature = "gecko")]
System(SystemColor),
/// A special color keyword value used in Gecko
#[cfg(feature = "gecko")]
Special(gecko::SpecialColorKeyword),
/// Quirksmode-only rule for inheriting color from the body
#[cfg(feature = "gecko")]
InheritFromBodyQuirk,
}
#[cfg(feature = "gecko")]
mod gecko {
#[derive(Clone, Copy, Debug, Eq, Hash, MallocSizeOf, Parse, PartialEq, ToCss)]
pub enum SpecialColorKeyword {
MozDefaultColor,
MozDefaultBackgroundColor,
MozHyperlinktext,
MozActivehyperlinktext,
MozVisitedhyperlinktext,
}
}
impl From<RGBA> for Color {
fn from(value: RGBA) -> Self {
Color::rgba(value)
}
}
struct ColorComponentParser<'a, 'b: 'a>(&'a ParserContext<'b>);
impl<'a, 'b: 'a, 'i: 'a> ::cssparser::ColorComponentParser<'i> for ColorComponentParser<'a, 'b> {
type Error = StyleParseErrorKind<'i>;
fn parse_angle_or_number<'t>(
&self,
input: &mut Parser<'i, 't>,
) -> Result<AngleOrNumber, ParseError<'i>> {
use crate::values::specified::Angle;
let location = input.current_source_location();
let token = input.next()?.clone();
match token {
Token::Dimension {
value, ref unit,..
} => {
let angle = Angle::parse_dimension(value, unit, /* from_calc = */ false);
let degrees = match angle {
Ok(angle) => angle.degrees(),
Err(()) => return Err(location.new_unexpected_token_error(token.clone())),
};
Ok(AngleOrNumber::Angle { degrees })
},
Token::Number { value,.. } => Ok(AngleOrNumber::Number { value }),
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {
input.parse_nested_block(|i| CalcNode::parse_angle_or_number(self.0, i))
},
t => return Err(location.new_unexpected_token_error(t)),
}
}
fn parse_percentage<'t>(&self, input: &mut Parser<'i, 't>) -> Result<f32, ParseError<'i>> {
use crate::values::specified::Percentage;
Ok(Percentage::parse(self.0, input)?.get())
}
fn parse_number<'t>(&self, input: &mut Parser<'i, 't>) -> Result<f32, ParseError<'i>> {
use crate::values::specified::Number;
Ok(Number::parse(self.0, input)?.get())
}
fn parse_number_or_percentage<'t>(
&self,
input: &mut Parser<'i, 't>,
) -> Result<NumberOrPercentage, ParseError<'i>> {
let location = input.current_source_location();
match input.next()?.clone() {
Token::Number { value,.. } => Ok(NumberOrPercentage::Number { value }),
Token::Percentage { unit_value,.. } => {
Ok(NumberOrPercentage::Percentage { unit_value })
},
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {
input.parse_nested_block(|i| CalcNode::parse_number_or_percentage(self.0, i))
},
t => return Err(location.new_unexpected_token_error(t)),
}
}
}
impl Parse for Color {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
// Currently we only store authored value for color keywords,
// because all browsers serialize those values as keywords for
// specified value.
let start = input.state();
let authored = input.expect_ident_cloned().ok();
input.reset(&start);
let compontent_parser = ColorComponentParser(&*context);
match input.try(|i| CSSParserColor::parse_with(&compontent_parser, i)) {
Ok(value) => Ok(match value {
CSSParserColor::CurrentColor => Color::CurrentColor,
CSSParserColor::RGBA(rgba) => Color::Numeric {
parsed: rgba,
authored: authored.map(|s| s.to_ascii_lowercase().into_boxed_str()),
},
}),
Err(e) => {
#[cfg(feature = "gecko")]
{
if let Ok(ident) = input.expect_ident() {
if let Ok(system) = SystemColor::from_ident(ident) {
return Ok(Color::System(system));
}
if let Ok(c) = gecko::SpecialColorKeyword::from_ident(ident) {
return Ok(Color::Special(c));
}
}
}
match e.kind {
ParseErrorKind::Basic(BasicParseErrorKind::UnexpectedToken(t)) => {
Err(e.location.new_custom_error(StyleParseErrorKind::ValueError(
ValueParseErrorKind::InvalidColor(t),
)))
},
_ => Err(e),
}
},
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Color::CurrentColor => CSSParserColor::CurrentColor.to_css(dest),
Color::Numeric {
authored: Some(ref authored),
..
} => dest.write_str(authored),
Color::Numeric {
parsed: ref rgba,..
} => rgba.to_css(dest),
Color::Complex(_) => Ok(()),
#[cfg(feature = "gecko")]
Color::System(system) => system.to_css(dest),
#[cfg(feature = "gecko")]
Color::Special(special) => special.to_css(dest),
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => Ok(()),
}
}
}
/// A wrapper of cssparser::Color::parse_hash.
///
/// That function should never return CurrentColor, so it makes no sense to
/// handle a cssparser::Color here. This should really be done in cssparser
/// directly rather than here.
fn parse_hash_color(value: &[u8]) -> Result<RGBA, ()> {
CSSParserColor::parse_hash(value).map(|color| match color {
CSSParserColor::RGBA(rgba) => rgba,
CSSParserColor::CurrentColor => unreachable!("parse_hash should never return currentcolor"),
})
}
impl Color {
/// Returns currentcolor value.
#[inline]
pub fn currentcolor() -> Color {
Color::CurrentColor
}
/// Returns transparent value.
#[inline]
pub fn transparent() -> Color {
// We should probably set authored to "transparent", but maybe it doesn't matter.
Color::rgba(RGBA::transparent())
}
/// Returns a numeric RGBA color value.
#[inline]
pub fn rgba(rgba: RGBA) -> Self {
Color::Numeric {
parsed: rgba,
authored: None,
}
}
/// Parse a color, with quirks.
///
/// <https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk>
pub fn
|
<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks,
) -> Result<Self, ParseError<'i>> {
input.try(|i| Self::parse(context, i)).or_else(|e| {
if!allow_quirks.allowed(context.quirks_mode) {
return Err(e);
}
Color::parse_quirky_color(input)
.map(Color::rgba)
.map_err(|_| e)
})
}
/// Parse a <quirky-color> value.
///
/// <https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk>
fn parse_quirky_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<RGBA, ParseError<'i>> {
let location = input.current_source_location();
let (value, unit) = match *input.next()? {
Token::Number {
int_value: Some(integer),
..
} => (integer, None),
Token::Dimension {
int_value: Some(integer),
ref unit,
..
} => (integer, Some(unit)),
Token::Ident(ref ident) => {
if ident.len()!= 3 && ident.len()!= 6 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
return parse_hash_color(ident.as_bytes()).map_err(|()| {
location.new_custom_error(StyleParseErrorKind::UnspecifiedError)
});
},
ref t => {
return Err(location.new_unexpected_token_error(t.clone()));
},
};
if value < 0 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
let length = if value <= 9 {
1
} else if value <= 99 {
2
} else if value <= 999 {
3
} else if value <= 9999 {
4
} else if value <= 99999 {
5
} else if value <= 999999 {
6
} else {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
};
let total = length + unit.as_ref().map_or(0, |d| d.len());
if total > 6 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
let mut serialization = [b'0'; 6];
let space_padding = 6 - total;
let mut written = space_padding;
written += itoa::write(&mut serialization[written..], value).unwrap();
if let Some(unit) = unit {
written += (&mut serialization[written..])
.write(unit.as_bytes())
.unwrap();
}
debug_assert_eq!(written, 6);
parse_hash_color(&serialization)
.map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError))
}
/// Returns true if the color is completely transparent, and false
/// otherwise.
pub fn is_transparent(&self) -> bool {
match *self {
Color::Numeric { ref parsed,.. } => parsed.alpha == 0,
_ => false,
}
}
}
#[cfg(feature = "gecko")]
fn convert_nscolor_to_computedcolor(color: nscolor) -> ComputedColor {
use crate::gecko::values::convert_nscolor_to_rgba;
ComputedColor::rgba(convert_nscolor_to_rgba(color))
}
impl Color {
/// Converts this Color into a ComputedColor.
///
/// If `context` is `None`, and the specified color requires data from
/// the context to resolve, then `None` is returned.
pub fn to_computed_color(&self, _context: Option<&Context>) -> Option<ComputedColor> {
match *self {
Color::CurrentColor => Some(ComputedColor::currentcolor()),
Color::Numeric { ref parsed,.. } => Some(ComputedColor::rgba(*parsed)),
Color::Complex(ref complex) => Some(*complex),
#[cfg(feature = "gecko")]
Color::System(system) => _context
.map(|context| convert_nscolor_to_computedcolor(system.to_computed_value(context))),
#[cfg(feature = "gecko")]
Color::Special(special) => {
use self::gecko::SpecialColorKeyword as Keyword;
_context.map(|context| {
let prefs = context.device().pref_sheet_prefs();
convert_nscolor_to_computedcolor(match special {
Keyword::MozDefaultColor => prefs.mDefaultColor,
Keyword::MozDefaultBackgroundColor => prefs.mDefaultBackgroundColor,
Keyword::MozHyperlinktext => prefs.mLinkColor,
Keyword::MozActivehyperlinktext => prefs.mActiveLinkColor,
Keyword::MozVisitedhyperlinktext => prefs.mVisitedLinkColor,
})
})
},
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => {
_context.map(|context| ComputedColor::rgba(context.device().body_text_color()))
},
}
}
}
impl ToComputedValue for Color {
type ComputedValue = ComputedColor;
fn to_computed_value(&self, context: &Context) -> ComputedColor {
let result = self.to_computed_color(Some(context)).unwrap();
if!result.is_numeric() {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut().set_uncacheable();
}
}
}
result
}
fn from_computed_value(computed: &ComputedColor) -> Self {
match *computed {
GenericColor::Numeric(color) => Color::rgba(color),
GenericColor::Foreground => Color::currentcolor(),
GenericColor::Complex(..) => Color::Complex(*computed),
}
}
}
/// Specified color value, but resolved to just RGBA for computed value
/// with value from color property at the same context.
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToCss)]
pub struct RGBAColor(pub Color);
impl Parse for RGBAColor {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse(context, input).map(RGBAColor)
}
}
impl ToComputedValue for RGBAColor {
type ComputedValue = RGBA;
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0
.to_computed_value(context)
.to_rgba(context.style().get_color().clone_color())
}
fn from_computed_value(computed: &RGBA) -> Self {
RGBAColor(Color::rgba(*computed))
}
}
impl From<Color> for RGBAColor {
fn from(color: Color) -> RGBAColor {
RGBAColor(color)
}
}
impl SpecifiedValueInfo for Color {
const SUPPORTED_TYPES: u8 = CssType::COLOR;
fn collect_completion_keywords(f: KeywordsCollectFn) {
// We are not going to insert all the color names here. Caller and
// devtools should take care of them. XXX Actually, transparent
// should probably be handled that way as well.
// XXX `currentColor` should really be `currentcolor`. But let's
// keep it consistent with the old system for now.
f(&["rgb", "rgba", "hsl", "hsla", "currentColor", "transparent"]);
}
}
/// Specified value for the "color" property, which resolves the `currentcolor`
/// keyword to the parent color instead of self's color.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, SpecifiedValueInfo, ToCss)]
pub struct ColorPropertyValue(pub Color);
impl ToComputedValue for ColorPropertyValue {
type ComputedValue = RGBA;
#[inline]
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0
.to_computed_value(context)
.to_rgba(context.builder.get_parent_color().clone_color())
}
#[inline]
fn from_computed_value(computed: &RGBA) -> Self {
ColorPropertyValue(Color::rgba(*computed).into())
}
}
impl Parse for ColorPropertyValue {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse_quirky(context, input, AllowQuirks::Yes).map(ColorPropertyValue)
}
}
/// auto | <color>
pub type ColorOrAuto = GenericColorOrAuto<Color>;
|
parse_quirky
|
identifier_name
|
color.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Specified color values.
use super::AllowQuirks;
#[cfg(feature = "gecko")]
use crate::gecko_bindings::structs::nscolor;
use crate::parser::{Parse, ParserContext};
#[cfg(feature = "gecko")]
use crate::properties::longhands::system_colors::SystemColor;
use crate::values::computed::{Color as ComputedColor, Context, ToComputedValue};
use crate::values::generics::color::{Color as GenericColor, ColorOrAuto as GenericColorOrAuto};
use crate::values::specified::calc::CalcNode;
use cssparser::{AngleOrNumber, Color as CSSParserColor, Parser, Token, RGBA};
use cssparser::{BasicParseErrorKind, NumberOrPercentage, ParseErrorKind};
use itoa;
use std::fmt::{self, Write};
use std::io::Write as IoWrite;
use style_traits::{CssType, CssWriter, KeywordsCollectFn, ParseError, StyleParseErrorKind};
use style_traits::{SpecifiedValueInfo, ToCss, ValueParseErrorKind};
/// Specified color value
#[derive(Clone, Debug, MallocSizeOf, PartialEq)]
pub enum Color {
/// The 'currentColor' keyword
CurrentColor,
/// A specific RGBA color
Numeric {
/// Parsed RGBA color
parsed: RGBA,
/// Authored representation
authored: Option<Box<str>>,
},
/// A complex color value from computed value
Complex(ComputedColor),
/// A system color
#[cfg(feature = "gecko")]
System(SystemColor),
/// A special color keyword value used in Gecko
#[cfg(feature = "gecko")]
Special(gecko::SpecialColorKeyword),
/// Quirksmode-only rule for inheriting color from the body
#[cfg(feature = "gecko")]
InheritFromBodyQuirk,
}
#[cfg(feature = "gecko")]
mod gecko {
#[derive(Clone, Copy, Debug, Eq, Hash, MallocSizeOf, Parse, PartialEq, ToCss)]
pub enum SpecialColorKeyword {
MozDefaultColor,
MozDefaultBackgroundColor,
MozHyperlinktext,
MozActivehyperlinktext,
MozVisitedhyperlinktext,
}
}
impl From<RGBA> for Color {
fn from(value: RGBA) -> Self {
Color::rgba(value)
}
}
struct ColorComponentParser<'a, 'b: 'a>(&'a ParserContext<'b>);
impl<'a, 'b: 'a, 'i: 'a> ::cssparser::ColorComponentParser<'i> for ColorComponentParser<'a, 'b> {
type Error = StyleParseErrorKind<'i>;
fn parse_angle_or_number<'t>(
&self,
input: &mut Parser<'i, 't>,
) -> Result<AngleOrNumber, ParseError<'i>> {
use crate::values::specified::Angle;
let location = input.current_source_location();
let token = input.next()?.clone();
match token {
Token::Dimension {
value, ref unit,..
} => {
let angle = Angle::parse_dimension(value, unit, /* from_calc = */ false);
let degrees = match angle {
Ok(angle) => angle.degrees(),
Err(()) => return Err(location.new_unexpected_token_error(token.clone())),
};
Ok(AngleOrNumber::Angle { degrees })
},
Token::Number { value,.. } => Ok(AngleOrNumber::Number { value }),
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {
input.parse_nested_block(|i| CalcNode::parse_angle_or_number(self.0, i))
},
t => return Err(location.new_unexpected_token_error(t)),
}
}
fn parse_percentage<'t>(&self, input: &mut Parser<'i, 't>) -> Result<f32, ParseError<'i>> {
use crate::values::specified::Percentage;
Ok(Percentage::parse(self.0, input)?.get())
}
fn parse_number<'t>(&self, input: &mut Parser<'i, 't>) -> Result<f32, ParseError<'i>> {
use crate::values::specified::Number;
Ok(Number::parse(self.0, input)?.get())
}
fn parse_number_or_percentage<'t>(
&self,
input: &mut Parser<'i, 't>,
) -> Result<NumberOrPercentage, ParseError<'i>> {
let location = input.current_source_location();
match input.next()?.clone() {
Token::Number { value,.. } => Ok(NumberOrPercentage::Number { value }),
Token::Percentage { unit_value,.. } => {
Ok(NumberOrPercentage::Percentage { unit_value })
},
Token::Function(ref name) if name.eq_ignore_ascii_case("calc") => {
input.parse_nested_block(|i| CalcNode::parse_number_or_percentage(self.0, i))
},
t => return Err(location.new_unexpected_token_error(t)),
}
}
}
impl Parse for Color {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
// Currently we only store authored value for color keywords,
// because all browsers serialize those values as keywords for
// specified value.
let start = input.state();
let authored = input.expect_ident_cloned().ok();
input.reset(&start);
let compontent_parser = ColorComponentParser(&*context);
match input.try(|i| CSSParserColor::parse_with(&compontent_parser, i)) {
Ok(value) => Ok(match value {
CSSParserColor::CurrentColor => Color::CurrentColor,
CSSParserColor::RGBA(rgba) => Color::Numeric {
parsed: rgba,
authored: authored.map(|s| s.to_ascii_lowercase().into_boxed_str()),
},
}),
Err(e) => {
#[cfg(feature = "gecko")]
{
if let Ok(ident) = input.expect_ident() {
if let Ok(system) = SystemColor::from_ident(ident) {
return Ok(Color::System(system));
}
if let Ok(c) = gecko::SpecialColorKeyword::from_ident(ident) {
return Ok(Color::Special(c));
}
}
}
match e.kind {
ParseErrorKind::Basic(BasicParseErrorKind::UnexpectedToken(t)) => {
Err(e.location.new_custom_error(StyleParseErrorKind::ValueError(
ValueParseErrorKind::InvalidColor(t),
)))
},
_ => Err(e),
}
},
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Color::CurrentColor => CSSParserColor::CurrentColor.to_css(dest),
Color::Numeric {
authored: Some(ref authored),
..
} => dest.write_str(authored),
Color::Numeric {
parsed: ref rgba,..
} => rgba.to_css(dest),
Color::Complex(_) => Ok(()),
#[cfg(feature = "gecko")]
Color::System(system) => system.to_css(dest),
#[cfg(feature = "gecko")]
Color::Special(special) => special.to_css(dest),
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => Ok(()),
}
}
}
/// A wrapper of cssparser::Color::parse_hash.
///
/// That function should never return CurrentColor, so it makes no sense to
/// handle a cssparser::Color here. This should really be done in cssparser
/// directly rather than here.
fn parse_hash_color(value: &[u8]) -> Result<RGBA, ()> {
CSSParserColor::parse_hash(value).map(|color| match color {
CSSParserColor::RGBA(rgba) => rgba,
CSSParserColor::CurrentColor => unreachable!("parse_hash should never return currentcolor"),
})
}
impl Color {
/// Returns currentcolor value.
#[inline]
pub fn currentcolor() -> Color {
Color::CurrentColor
}
/// Returns transparent value.
#[inline]
pub fn transparent() -> Color {
// We should probably set authored to "transparent", but maybe it doesn't matter.
Color::rgba(RGBA::transparent())
}
/// Returns a numeric RGBA color value.
#[inline]
pub fn rgba(rgba: RGBA) -> Self {
Color::Numeric {
parsed: rgba,
authored: None,
}
}
/// Parse a color, with quirks.
///
/// <https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk>
pub fn parse_quirky<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
allow_quirks: AllowQuirks,
) -> Result<Self, ParseError<'i>> {
input.try(|i| Self::parse(context, i)).or_else(|e| {
if!allow_quirks.allowed(context.quirks_mode) {
return Err(e);
}
Color::parse_quirky_color(input)
.map(Color::rgba)
.map_err(|_| e)
})
}
/// Parse a <quirky-color> value.
///
/// <https://quirks.spec.whatwg.org/#the-hashless-hex-color-quirk>
fn parse_quirky_color<'i, 't>(input: &mut Parser<'i, 't>) -> Result<RGBA, ParseError<'i>> {
let location = input.current_source_location();
let (value, unit) = match *input.next()? {
Token::Number {
int_value: Some(integer),
..
} => (integer, None),
Token::Dimension {
int_value: Some(integer),
ref unit,
..
} => (integer, Some(unit)),
Token::Ident(ref ident) => {
if ident.len()!= 3 && ident.len()!= 6 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
return parse_hash_color(ident.as_bytes()).map_err(|()| {
location.new_custom_error(StyleParseErrorKind::UnspecifiedError)
});
},
ref t => {
return Err(location.new_unexpected_token_error(t.clone()));
},
};
if value < 0 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
let length = if value <= 9
|
else if value <= 99 {
2
} else if value <= 999 {
3
} else if value <= 9999 {
4
} else if value <= 99999 {
5
} else if value <= 999999 {
6
} else {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
};
let total = length + unit.as_ref().map_or(0, |d| d.len());
if total > 6 {
return Err(location.new_custom_error(StyleParseErrorKind::UnspecifiedError));
}
let mut serialization = [b'0'; 6];
let space_padding = 6 - total;
let mut written = space_padding;
written += itoa::write(&mut serialization[written..], value).unwrap();
if let Some(unit) = unit {
written += (&mut serialization[written..])
.write(unit.as_bytes())
.unwrap();
}
debug_assert_eq!(written, 6);
parse_hash_color(&serialization)
.map_err(|()| location.new_custom_error(StyleParseErrorKind::UnspecifiedError))
}
/// Returns true if the color is completely transparent, and false
/// otherwise.
pub fn is_transparent(&self) -> bool {
match *self {
Color::Numeric { ref parsed,.. } => parsed.alpha == 0,
_ => false,
}
}
}
#[cfg(feature = "gecko")]
fn convert_nscolor_to_computedcolor(color: nscolor) -> ComputedColor {
use crate::gecko::values::convert_nscolor_to_rgba;
ComputedColor::rgba(convert_nscolor_to_rgba(color))
}
impl Color {
/// Converts this Color into a ComputedColor.
///
/// If `context` is `None`, and the specified color requires data from
/// the context to resolve, then `None` is returned.
pub fn to_computed_color(&self, _context: Option<&Context>) -> Option<ComputedColor> {
match *self {
Color::CurrentColor => Some(ComputedColor::currentcolor()),
Color::Numeric { ref parsed,.. } => Some(ComputedColor::rgba(*parsed)),
Color::Complex(ref complex) => Some(*complex),
#[cfg(feature = "gecko")]
Color::System(system) => _context
.map(|context| convert_nscolor_to_computedcolor(system.to_computed_value(context))),
#[cfg(feature = "gecko")]
Color::Special(special) => {
use self::gecko::SpecialColorKeyword as Keyword;
_context.map(|context| {
let prefs = context.device().pref_sheet_prefs();
convert_nscolor_to_computedcolor(match special {
Keyword::MozDefaultColor => prefs.mDefaultColor,
Keyword::MozDefaultBackgroundColor => prefs.mDefaultBackgroundColor,
Keyword::MozHyperlinktext => prefs.mLinkColor,
Keyword::MozActivehyperlinktext => prefs.mActiveLinkColor,
Keyword::MozVisitedhyperlinktext => prefs.mVisitedLinkColor,
})
})
},
#[cfg(feature = "gecko")]
Color::InheritFromBodyQuirk => {
_context.map(|context| ComputedColor::rgba(context.device().body_text_color()))
},
}
}
}
impl ToComputedValue for Color {
type ComputedValue = ComputedColor;
fn to_computed_value(&self, context: &Context) -> ComputedColor {
let result = self.to_computed_color(Some(context)).unwrap();
if!result.is_numeric() {
if let Some(longhand) = context.for_non_inherited_property {
if longhand.stores_complex_colors_lossily() {
context.rule_cache_conditions.borrow_mut().set_uncacheable();
}
}
}
result
}
fn from_computed_value(computed: &ComputedColor) -> Self {
match *computed {
GenericColor::Numeric(color) => Color::rgba(color),
GenericColor::Foreground => Color::currentcolor(),
GenericColor::Complex(..) => Color::Complex(*computed),
}
}
}
/// Specified color value, but resolved to just RGBA for computed value
/// with value from color property at the same context.
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToCss)]
pub struct RGBAColor(pub Color);
impl Parse for RGBAColor {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse(context, input).map(RGBAColor)
}
}
impl ToComputedValue for RGBAColor {
type ComputedValue = RGBA;
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0
.to_computed_value(context)
.to_rgba(context.style().get_color().clone_color())
}
fn from_computed_value(computed: &RGBA) -> Self {
RGBAColor(Color::rgba(*computed))
}
}
impl From<Color> for RGBAColor {
fn from(color: Color) -> RGBAColor {
RGBAColor(color)
}
}
impl SpecifiedValueInfo for Color {
const SUPPORTED_TYPES: u8 = CssType::COLOR;
fn collect_completion_keywords(f: KeywordsCollectFn) {
// We are not going to insert all the color names here. Caller and
// devtools should take care of them. XXX Actually, transparent
// should probably be handled that way as well.
// XXX `currentColor` should really be `currentcolor`. But let's
// keep it consistent with the old system for now.
f(&["rgb", "rgba", "hsl", "hsla", "currentColor", "transparent"]);
}
}
/// Specified value for the "color" property, which resolves the `currentcolor`
/// keyword to the parent color instead of self's color.
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, SpecifiedValueInfo, ToCss)]
pub struct ColorPropertyValue(pub Color);
impl ToComputedValue for ColorPropertyValue {
type ComputedValue = RGBA;
#[inline]
fn to_computed_value(&self, context: &Context) -> RGBA {
self.0
.to_computed_value(context)
.to_rgba(context.builder.get_parent_color().clone_color())
}
#[inline]
fn from_computed_value(computed: &RGBA) -> Self {
ColorPropertyValue(Color::rgba(*computed).into())
}
}
impl Parse for ColorPropertyValue {
fn parse<'i, 't>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
) -> Result<Self, ParseError<'i>> {
Color::parse_quirky(context, input, AllowQuirks::Yes).map(ColorPropertyValue)
}
}
/// auto | <color>
pub type ColorOrAuto = GenericColorOrAuto<Color>;
|
{
1
}
|
conditional_block
|
sort.rs
|
#![deny(warnings)]
extern crate coreutils;
extern crate extra;
use std::env;
use std::io::{stdout, stderr, stdin, Error, Write, BufRead, BufReader};
use std::process::exit;
use std::cmp::Ordering;
use coreutils::ArgParser;
use std::fs::File;
use extra::option::OptionalExt;
const MAN_PAGE: &'static str = r#"
NAME
sort - sort lines of text files
SYNOPSIS
sort [ -h | --help | -n ] [FILE]...
DESCRIPTION
Write sorted concatenation of FILE(s) to standard output.
With no FILE, read standard input.
OPTIONS
-h
--help
display this help and exit
-n
--numeric-sort
sort numerically
"#; /* @MANEND */
fn get_first_f64(a: &str) -> f64 {
for s in a.split_whitespace() {
match s.parse::<f64>() {
Ok(a) => return a,
Err(_) => (),
}
}
return std::f64::NEG_INFINITY;
}
fn numeric_compare(a: &String, b: &String) -> Ordering {
let fa = get_first_f64(a);
let fb = get_first_f64(b);
if fa > fb {
Ordering::Greater
} else if fa < fb {
Ordering::Less
} else {
Ordering::Equal
}
}
fn lines_from_stdin() -> Result<Vec<String>, Error>
|
fn lines_from_files(paths: &Vec<&String>) -> Result<Vec<String>, Error> {
let mut lines = Vec::new();
for path in paths {
let f = try!(File::open(path));
let f = BufReader::new(f);
for line in f.lines() {
match line {
Ok(l) => lines.push(l),
Err(e) => return Err(e),
}
}
}
Ok(lines)
}
fn main() {
let stdout = stdout();
let mut stdout = stdout.lock();
let mut stderr = stderr();
let mut parser = ArgParser::new(2)
.add_flag(&["n", "numeric-sort"])
.add_flag(&["u", "unique"])
.add_flag(&["h", "help"]);
parser.parse(env::args());
if parser.found("help") {
stdout.write(MAN_PAGE.as_bytes()).try(&mut stderr);
stdout.flush().try(&mut stderr);
exit(0);
}
let lines = match parser.args.is_empty() {
true => lines_from_stdin(),
false => {
let mut paths = Vec::new();
for dir in parser.args.iter() {
paths.push(dir);
}
lines_from_files(&paths)
}
};
match lines {
Ok(mut l) => {
if parser.found("numeric-sort") {
l.sort_by(numeric_compare);
} else {
l.sort();
}
if parser.found("unique") {
l.dedup();
}
for x in l {
println!("{}", x);
}
}
Err(e) => {
let _ = write!(stderr, "{}", e);
}
}
}
|
{
let stdin = stdin();
let mut lines = Vec::new();
let f = BufReader::new(stdin.lock());
for line in f.lines() {
match line {
Ok(l) => lines.push(l),
Err(e) => return Err(e),
}
}
Ok(lines)
}
|
identifier_body
|
sort.rs
|
#![deny(warnings)]
extern crate coreutils;
extern crate extra;
use std::env;
use std::io::{stdout, stderr, stdin, Error, Write, BufRead, BufReader};
use std::process::exit;
use std::cmp::Ordering;
use coreutils::ArgParser;
use std::fs::File;
use extra::option::OptionalExt;
const MAN_PAGE: &'static str = r#"
NAME
sort - sort lines of text files
SYNOPSIS
sort [ -h | --help | -n ] [FILE]...
DESCRIPTION
Write sorted concatenation of FILE(s) to standard output.
With no FILE, read standard input.
OPTIONS
-h
--help
display this help and exit
-n
--numeric-sort
sort numerically
"#; /* @MANEND */
fn get_first_f64(a: &str) -> f64 {
for s in a.split_whitespace() {
match s.parse::<f64>() {
Ok(a) => return a,
Err(_) => (),
}
}
return std::f64::NEG_INFINITY;
}
fn numeric_compare(a: &String, b: &String) -> Ordering {
let fa = get_first_f64(a);
let fb = get_first_f64(b);
if fa > fb {
Ordering::Greater
} else if fa < fb {
Ordering::Less
} else {
Ordering::Equal
}
}
fn lines_from_stdin() -> Result<Vec<String>, Error> {
let stdin = stdin();
let mut lines = Vec::new();
let f = BufReader::new(stdin.lock());
for line in f.lines() {
match line {
Ok(l) => lines.push(l),
Err(e) => return Err(e),
}
}
Ok(lines)
}
fn lines_from_files(paths: &Vec<&String>) -> Result<Vec<String>, Error> {
let mut lines = Vec::new();
for path in paths {
let f = try!(File::open(path));
let f = BufReader::new(f);
for line in f.lines() {
match line {
Ok(l) => lines.push(l),
Err(e) => return Err(e),
}
}
}
Ok(lines)
}
fn
|
() {
let stdout = stdout();
let mut stdout = stdout.lock();
let mut stderr = stderr();
let mut parser = ArgParser::new(2)
.add_flag(&["n", "numeric-sort"])
.add_flag(&["u", "unique"])
.add_flag(&["h", "help"]);
parser.parse(env::args());
if parser.found("help") {
stdout.write(MAN_PAGE.as_bytes()).try(&mut stderr);
stdout.flush().try(&mut stderr);
exit(0);
}
let lines = match parser.args.is_empty() {
true => lines_from_stdin(),
false => {
let mut paths = Vec::new();
for dir in parser.args.iter() {
paths.push(dir);
}
lines_from_files(&paths)
}
};
match lines {
Ok(mut l) => {
if parser.found("numeric-sort") {
l.sort_by(numeric_compare);
} else {
l.sort();
}
if parser.found("unique") {
l.dedup();
}
for x in l {
println!("{}", x);
}
}
Err(e) => {
let _ = write!(stderr, "{}", e);
}
}
}
|
main
|
identifier_name
|
sort.rs
|
#![deny(warnings)]
extern crate coreutils;
extern crate extra;
use std::env;
use std::io::{stdout, stderr, stdin, Error, Write, BufRead, BufReader};
use std::process::exit;
use std::cmp::Ordering;
use coreutils::ArgParser;
use std::fs::File;
use extra::option::OptionalExt;
const MAN_PAGE: &'static str = r#"
NAME
sort - sort lines of text files
SYNOPSIS
sort [ -h | --help | -n ] [FILE]...
DESCRIPTION
Write sorted concatenation of FILE(s) to standard output.
With no FILE, read standard input.
OPTIONS
-h
--help
display this help and exit
-n
--numeric-sort
sort numerically
"#; /* @MANEND */
fn get_first_f64(a: &str) -> f64 {
for s in a.split_whitespace() {
match s.parse::<f64>() {
Ok(a) => return a,
Err(_) => (),
}
}
return std::f64::NEG_INFINITY;
}
fn numeric_compare(a: &String, b: &String) -> Ordering {
let fa = get_first_f64(a);
let fb = get_first_f64(b);
if fa > fb {
Ordering::Greater
} else if fa < fb {
Ordering::Less
} else {
Ordering::Equal
}
}
fn lines_from_stdin() -> Result<Vec<String>, Error> {
let stdin = stdin();
let mut lines = Vec::new();
let f = BufReader::new(stdin.lock());
for line in f.lines() {
match line {
Ok(l) => lines.push(l),
Err(e) => return Err(e),
}
}
Ok(lines)
}
fn lines_from_files(paths: &Vec<&String>) -> Result<Vec<String>, Error> {
let mut lines = Vec::new();
for path in paths {
let f = try!(File::open(path));
let f = BufReader::new(f);
for line in f.lines() {
match line {
Ok(l) => lines.push(l),
Err(e) => return Err(e),
}
}
}
Ok(lines)
}
fn main() {
let stdout = stdout();
let mut stdout = stdout.lock();
let mut stderr = stderr();
let mut parser = ArgParser::new(2)
.add_flag(&["n", "numeric-sort"])
.add_flag(&["u", "unique"])
.add_flag(&["h", "help"]);
parser.parse(env::args());
if parser.found("help") {
stdout.write(MAN_PAGE.as_bytes()).try(&mut stderr);
stdout.flush().try(&mut stderr);
exit(0);
}
let lines = match parser.args.is_empty() {
true => lines_from_stdin(),
false => {
let mut paths = Vec::new();
for dir in parser.args.iter() {
paths.push(dir);
}
lines_from_files(&paths)
}
};
match lines {
Ok(mut l) => {
if parser.found("numeric-sort") {
|
}
if parser.found("unique") {
l.dedup();
}
for x in l {
println!("{}", x);
}
}
Err(e) => {
let _ = write!(stderr, "{}", e);
}
}
}
|
l.sort_by(numeric_compare);
} else {
l.sort();
|
random_line_split
|
sort.rs
|
#![deny(warnings)]
extern crate coreutils;
extern crate extra;
use std::env;
use std::io::{stdout, stderr, stdin, Error, Write, BufRead, BufReader};
use std::process::exit;
use std::cmp::Ordering;
use coreutils::ArgParser;
use std::fs::File;
use extra::option::OptionalExt;
const MAN_PAGE: &'static str = r#"
NAME
sort - sort lines of text files
SYNOPSIS
sort [ -h | --help | -n ] [FILE]...
DESCRIPTION
Write sorted concatenation of FILE(s) to standard output.
With no FILE, read standard input.
OPTIONS
-h
--help
display this help and exit
-n
--numeric-sort
sort numerically
"#; /* @MANEND */
fn get_first_f64(a: &str) -> f64 {
for s in a.split_whitespace() {
match s.parse::<f64>() {
Ok(a) => return a,
Err(_) => (),
}
}
return std::f64::NEG_INFINITY;
}
fn numeric_compare(a: &String, b: &String) -> Ordering {
let fa = get_first_f64(a);
let fb = get_first_f64(b);
if fa > fb {
Ordering::Greater
} else if fa < fb {
Ordering::Less
} else {
Ordering::Equal
}
}
fn lines_from_stdin() -> Result<Vec<String>, Error> {
let stdin = stdin();
let mut lines = Vec::new();
let f = BufReader::new(stdin.lock());
for line in f.lines() {
match line {
Ok(l) => lines.push(l),
Err(e) => return Err(e),
}
}
Ok(lines)
}
fn lines_from_files(paths: &Vec<&String>) -> Result<Vec<String>, Error> {
let mut lines = Vec::new();
for path in paths {
let f = try!(File::open(path));
let f = BufReader::new(f);
for line in f.lines() {
match line {
Ok(l) => lines.push(l),
Err(e) => return Err(e),
}
}
}
Ok(lines)
}
fn main() {
let stdout = stdout();
let mut stdout = stdout.lock();
let mut stderr = stderr();
let mut parser = ArgParser::new(2)
.add_flag(&["n", "numeric-sort"])
.add_flag(&["u", "unique"])
.add_flag(&["h", "help"]);
parser.parse(env::args());
if parser.found("help")
|
let lines = match parser.args.is_empty() {
true => lines_from_stdin(),
false => {
let mut paths = Vec::new();
for dir in parser.args.iter() {
paths.push(dir);
}
lines_from_files(&paths)
}
};
match lines {
Ok(mut l) => {
if parser.found("numeric-sort") {
l.sort_by(numeric_compare);
} else {
l.sort();
}
if parser.found("unique") {
l.dedup();
}
for x in l {
println!("{}", x);
}
}
Err(e) => {
let _ = write!(stderr, "{}", e);
}
}
}
|
{
stdout.write(MAN_PAGE.as_bytes()).try(&mut stderr);
stdout.flush().try(&mut stderr);
exit(0);
}
|
conditional_block
|
lib.rs
|
extern crate num;
pub use base64::{u8_to_base64, base64_to_u8, big_to_base64, chunk_to_bytes};
pub use buffer::{Buff};
pub use english::{english_dict, lowercase, levenshtein, score_fn, count};
pub use xor_cipher::{single_xor, hamming};
use std::io::{File, BufferedReader, IoError};
pub mod base64;
pub mod buffer;
pub mod english;
pub mod xor_cipher;
fn unwrap_and_noline(x: Result<String, IoError>) -> String {
let mut unwrapped = x.unwrap();
let n = unwrapped.len();
unwrapped.truncate(n-1);
unwrapped
}
pub fn read_file(input: &str) -> Vec<String>
|
{
let path = Path::new(input);
let file = match File::open(&path) {
Ok(t) => t,
Err(e) => fail!("Couldn't open {}, got error {}", input, e)
};
let mut reader = BufferedReader::new(file);
reader.lines().map(unwrap_and_noline).collect()
}
|
identifier_body
|
|
lib.rs
|
extern crate num;
pub use base64::{u8_to_base64, base64_to_u8, big_to_base64, chunk_to_bytes};
pub use buffer::{Buff};
pub use english::{english_dict, lowercase, levenshtein, score_fn, count};
pub use xor_cipher::{single_xor, hamming};
use std::io::{File, BufferedReader, IoError};
pub mod base64;
pub mod buffer;
pub mod english;
pub mod xor_cipher;
fn
|
(x: Result<String, IoError>) -> String {
let mut unwrapped = x.unwrap();
let n = unwrapped.len();
unwrapped.truncate(n-1);
unwrapped
}
pub fn read_file(input: &str) -> Vec<String> {
let path = Path::new(input);
let file = match File::open(&path) {
Ok(t) => t,
Err(e) => fail!("Couldn't open {}, got error {}", input, e)
};
let mut reader = BufferedReader::new(file);
reader.lines().map(unwrap_and_noline).collect()
}
|
unwrap_and_noline
|
identifier_name
|
lib.rs
|
extern crate num;
pub use base64::{u8_to_base64, base64_to_u8, big_to_base64, chunk_to_bytes};
pub use buffer::{Buff};
pub use english::{english_dict, lowercase, levenshtein, score_fn, count};
pub use xor_cipher::{single_xor, hamming};
use std::io::{File, BufferedReader, IoError};
pub mod base64;
pub mod buffer;
pub mod english;
pub mod xor_cipher;
fn unwrap_and_noline(x: Result<String, IoError>) -> String {
let mut unwrapped = x.unwrap();
let n = unwrapped.len();
unwrapped.truncate(n-1);
unwrapped
}
pub fn read_file(input: &str) -> Vec<String> {
let path = Path::new(input);
let file = match File::open(&path) {
Ok(t) => t,
Err(e) => fail!("Couldn't open {}, got error {}", input, e)
|
};
let mut reader = BufferedReader::new(file);
reader.lines().map(unwrap_and_noline).collect()
}
|
random_line_split
|
|
htmlolistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLOListElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOListElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLOListElement {
htmlelement: HTMLElement,
}
impl HTMLOListElementDerived for EventTarget {
fn is_htmlolistelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLOListElement)))
}
}
impl HTMLOListElement {
fn
|
(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLOListElement {
HTMLOListElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLOListElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLOListElement> {
let element = HTMLOListElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLOListElementBinding::Wrap)
}
}
|
new_inherited
|
identifier_name
|
htmlolistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLOListElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOListElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLOListElement {
htmlelement: HTMLElement,
}
impl HTMLOListElementDerived for EventTarget {
fn is_htmlolistelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLOListElement)))
}
}
impl HTMLOListElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLOListElement {
HTMLOListElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLOListElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLOListElement>
|
}
|
{
let element = HTMLOListElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLOListElementBinding::Wrap)
}
|
identifier_body
|
htmlolistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLOListElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOListElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct HTMLOListElement {
htmlelement: HTMLElement,
}
impl HTMLOListElementDerived for EventTarget {
fn is_htmlolistelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLOListElement)))
}
}
impl HTMLOListElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLOListElement {
HTMLOListElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLOListElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
|
document: &Document) -> Root<HTMLOListElement> {
let element = HTMLOListElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLOListElementBinding::Wrap)
}
}
|
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
|
random_line_split
|
random_gen.rs
|
extern crate time;
use std::sync::RwLock;
pub trait PRNG {
fn get(&mut self) -> u32;
fn set_seed(&mut self, seed: u32);
}
pub fn set_seed_time(prng: &mut PRNG) {
prng.set_seed((time::precise_time_ns() as f64 / 1000.0) as u32);
}
pub fn get_target(prng: &mut PRNG, t: u32) -> u32 {
let v = get_01(prng);
(v * t as f64) as u32
}
pub fn get_range(prng: &mut PRNG, low: u32, high: u32) -> u32 {
let (low, high) = if high < low { (high, low) } else { (low, high) };
let rg = ((high - low) + 1) as f64;
let val = low as f64 + get_01(prng) * rg;
val as u32
}
pub fn get_01(prng: &mut PRNG) -> f64 {
prng.get() as f64 / 4294967295f64
}
pub struct LCG {
m_state: u32,
}
impl LCG {
pub fn new() -> LCG {
let mut lcg = LCG { m_state: 0 };
lcg.set_seed(10000);
lcg
}
}
impl PRNG for LCG {
fn get(&mut self) -> u32 {
use std::num::Wrapping;
self.m_state = (Wrapping(69069) * Wrapping(self.m_state) + Wrapping(362437)).0;
self.m_state
}
fn set_seed(&mut self, seed: u32) {
self.m_state = seed;
}
}
// Setup a static, global LCG for seeding other generators.
lazy_static! {
static ref _LCG: RwLock<LCG> = RwLock::new(LCG::new());
}
// The following generators are based on generators created by George Marsaglia
// They use the static lcg created above for seeding, to initialize various
// state and tables. Seeding them is a bit more involved than an LCG.
pub struct Xorshift {
m_x: u32,
m_y: u32,
m_z: u32,
m_w: u32,
m_v: u32,
}
impl Xorshift {
pub fn new() -> Xorshift
|
}
impl PRNG for Xorshift {
fn get(&mut self) -> u32 {
let t = self.m_x ^ (self.m_x >> 7);
self.m_x = self.m_y;
self.m_y = self.m_z;
self.m_z = self.m_w;
self.m_w = self.m_v;
self.m_v = (self.m_v ^ (self.m_v << 6)) ^ (t ^ (t << 13));
(self.m_y + self.m_y + 1) * self.m_v
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
self.m_x = lcg.get();
self.m_y = lcg.get();
self.m_z = lcg.get();
self.m_w = lcg.get();
self.m_v = lcg.get();
}
}
pub struct MWC256 {
m_q: [u32; 256],
c: u32,
}
lazy_static! {
static ref _MWC256: RwLock<usize> = RwLock::new(255);
}
impl MWC256 {
pub fn new() -> MWC256 {
let mut m = MWC256 {
m_q: [0; 256],
c: 0,
};
m.set_seed(10000);
m
}
}
impl PRNG for MWC256 {
fn get(&mut self) -> u32 {
let t: u64;
let a = 809430660u64;
let mut i = _MWC256.write().unwrap();
t = a * self.m_q[*i] as u64 + self.c as u64;
*i += 1;
self.c = (t >> 32) as u32;
self.m_q[*i] = t as u32;
self.m_q[*i]
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
for i in 0..256 {
self.m_q[i] = lcg.get();
}
self.c = get_target(&mut *lcg, 809430660);
}
}
pub struct CMWC4096 {
m_q: [u32; 4096],
c: u32,
}
lazy_static! {
static ref _CMWC4096: RwLock<usize> = RwLock::new(2095);
}
impl CMWC4096 {
pub fn new() -> CMWC4096 {
let mut m = CMWC4096 {
m_q: [0; 4096],
c: 0,
};
m.set_seed(10000);
m
}
}
impl PRNG for CMWC4096 {
fn get(&mut self) -> u32 {
let mut t: u64;
let a = 18782u64;
let b = 4294967295u64;
let r = b - 1;
let mut i = _CMWC4096.write().unwrap();
*i = (*i + 1) & 4095;
t = a * self.m_q[*i] as u64 + self.c as u64;
self.c = (t >> 32) as u32;
t = (t & b) + self.c as u64;
if t > r {
self.c += 1;
t = t - b;
}
self.m_q[*i] = (r - t) as u32;
self.m_q[*i]
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
for i in 0..4096 {
self.m_q[i] = lcg.get()
}
self.c = get_target(&mut *lcg, 18781)
}
}
pub struct KISS {
z: u32,
w: u32,
jsr: u32,
jcong: u32,
}
impl KISS {
pub fn new() -> KISS {
let mut k = KISS {
z: 0,
w: 0,
jsr: 0,
jcong: 0,
};
k.set_seed(10000);
k
}
}
impl PRNG for KISS {
fn get(&mut self) -> u32 {
self.z = 36969 * (self.z & 65535) + (self.z >> 16);
self.w = 18000 * (self.w & 65535) + (self.w >> 16);
let mwc = (self.z << 16) + self.w;
self.jcong = 69069 * self.jcong + 1234567;
self.jsr ^= self.jsr << 17;
self.jsr ^= self.jsr >> 13;
self.jsr ^= self.jsr << 5;
(mwc ^ self.jcong) + self.jsr
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
self.z = lcg.get();
self.w = lcg.get();
self.jsr = lcg.get();
self.jcong = lcg.get();
}
}
|
{
let mut x = Xorshift {
m_x: 0,
m_y: 0,
m_z: 0,
m_w: 0,
m_v: 0,
};
x.set_seed(10000);
x
}
|
identifier_body
|
random_gen.rs
|
extern crate time;
use std::sync::RwLock;
pub trait PRNG {
fn get(&mut self) -> u32;
fn set_seed(&mut self, seed: u32);
}
pub fn set_seed_time(prng: &mut PRNG) {
prng.set_seed((time::precise_time_ns() as f64 / 1000.0) as u32);
}
pub fn get_target(prng: &mut PRNG, t: u32) -> u32 {
let v = get_01(prng);
(v * t as f64) as u32
}
pub fn get_range(prng: &mut PRNG, low: u32, high: u32) -> u32 {
let (low, high) = if high < low { (high, low) } else { (low, high) };
let rg = ((high - low) + 1) as f64;
let val = low as f64 + get_01(prng) * rg;
val as u32
}
pub fn get_01(prng: &mut PRNG) -> f64 {
prng.get() as f64 / 4294967295f64
}
pub struct LCG {
m_state: u32,
}
impl LCG {
pub fn new() -> LCG {
let mut lcg = LCG { m_state: 0 };
lcg.set_seed(10000);
lcg
}
}
impl PRNG for LCG {
fn get(&mut self) -> u32 {
use std::num::Wrapping;
self.m_state = (Wrapping(69069) * Wrapping(self.m_state) + Wrapping(362437)).0;
self.m_state
}
fn set_seed(&mut self, seed: u32) {
self.m_state = seed;
}
}
// Setup a static, global LCG for seeding other generators.
lazy_static! {
static ref _LCG: RwLock<LCG> = RwLock::new(LCG::new());
}
// The following generators are based on generators created by George Marsaglia
// They use the static lcg created above for seeding, to initialize various
// state and tables. Seeding them is a bit more involved than an LCG.
pub struct Xorshift {
m_x: u32,
m_y: u32,
m_z: u32,
m_w: u32,
m_v: u32,
}
impl Xorshift {
pub fn new() -> Xorshift {
let mut x = Xorshift {
m_x: 0,
m_y: 0,
m_z: 0,
m_w: 0,
m_v: 0,
};
x.set_seed(10000);
x
}
}
impl PRNG for Xorshift {
fn get(&mut self) -> u32 {
let t = self.m_x ^ (self.m_x >> 7);
self.m_x = self.m_y;
self.m_y = self.m_z;
self.m_z = self.m_w;
self.m_w = self.m_v;
self.m_v = (self.m_v ^ (self.m_v << 6)) ^ (t ^ (t << 13));
(self.m_y + self.m_y + 1) * self.m_v
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
self.m_x = lcg.get();
self.m_y = lcg.get();
self.m_z = lcg.get();
self.m_w = lcg.get();
self.m_v = lcg.get();
}
}
pub struct MWC256 {
m_q: [u32; 256],
c: u32,
}
lazy_static! {
static ref _MWC256: RwLock<usize> = RwLock::new(255);
}
impl MWC256 {
pub fn new() -> MWC256 {
let mut m = MWC256 {
m_q: [0; 256],
c: 0,
};
m.set_seed(10000);
m
}
}
impl PRNG for MWC256 {
fn get(&mut self) -> u32 {
let t: u64;
let a = 809430660u64;
let mut i = _MWC256.write().unwrap();
t = a * self.m_q[*i] as u64 + self.c as u64;
*i += 1;
self.c = (t >> 32) as u32;
self.m_q[*i] = t as u32;
self.m_q[*i]
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
for i in 0..256 {
self.m_q[i] = lcg.get();
}
self.c = get_target(&mut *lcg, 809430660);
}
}
pub struct CMWC4096 {
m_q: [u32; 4096],
c: u32,
}
lazy_static! {
static ref _CMWC4096: RwLock<usize> = RwLock::new(2095);
}
impl CMWC4096 {
pub fn
|
() -> CMWC4096 {
let mut m = CMWC4096 {
m_q: [0; 4096],
c: 0,
};
m.set_seed(10000);
m
}
}
impl PRNG for CMWC4096 {
fn get(&mut self) -> u32 {
let mut t: u64;
let a = 18782u64;
let b = 4294967295u64;
let r = b - 1;
let mut i = _CMWC4096.write().unwrap();
*i = (*i + 1) & 4095;
t = a * self.m_q[*i] as u64 + self.c as u64;
self.c = (t >> 32) as u32;
t = (t & b) + self.c as u64;
if t > r {
self.c += 1;
t = t - b;
}
self.m_q[*i] = (r - t) as u32;
self.m_q[*i]
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
for i in 0..4096 {
self.m_q[i] = lcg.get()
}
self.c = get_target(&mut *lcg, 18781)
}
}
pub struct KISS {
z: u32,
w: u32,
jsr: u32,
jcong: u32,
}
impl KISS {
pub fn new() -> KISS {
let mut k = KISS {
z: 0,
w: 0,
jsr: 0,
jcong: 0,
};
k.set_seed(10000);
k
}
}
impl PRNG for KISS {
fn get(&mut self) -> u32 {
self.z = 36969 * (self.z & 65535) + (self.z >> 16);
self.w = 18000 * (self.w & 65535) + (self.w >> 16);
let mwc = (self.z << 16) + self.w;
self.jcong = 69069 * self.jcong + 1234567;
self.jsr ^= self.jsr << 17;
self.jsr ^= self.jsr >> 13;
self.jsr ^= self.jsr << 5;
(mwc ^ self.jcong) + self.jsr
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
self.z = lcg.get();
self.w = lcg.get();
self.jsr = lcg.get();
self.jcong = lcg.get();
}
}
|
new
|
identifier_name
|
random_gen.rs
|
extern crate time;
use std::sync::RwLock;
pub trait PRNG {
fn get(&mut self) -> u32;
fn set_seed(&mut self, seed: u32);
}
pub fn set_seed_time(prng: &mut PRNG) {
prng.set_seed((time::precise_time_ns() as f64 / 1000.0) as u32);
}
pub fn get_target(prng: &mut PRNG, t: u32) -> u32 {
let v = get_01(prng);
(v * t as f64) as u32
}
pub fn get_range(prng: &mut PRNG, low: u32, high: u32) -> u32 {
let (low, high) = if high < low { (high, low) } else
|
;
let rg = ((high - low) + 1) as f64;
let val = low as f64 + get_01(prng) * rg;
val as u32
}
pub fn get_01(prng: &mut PRNG) -> f64 {
prng.get() as f64 / 4294967295f64
}
pub struct LCG {
m_state: u32,
}
impl LCG {
pub fn new() -> LCG {
let mut lcg = LCG { m_state: 0 };
lcg.set_seed(10000);
lcg
}
}
impl PRNG for LCG {
fn get(&mut self) -> u32 {
use std::num::Wrapping;
self.m_state = (Wrapping(69069) * Wrapping(self.m_state) + Wrapping(362437)).0;
self.m_state
}
fn set_seed(&mut self, seed: u32) {
self.m_state = seed;
}
}
// Setup a static, global LCG for seeding other generators.
lazy_static! {
static ref _LCG: RwLock<LCG> = RwLock::new(LCG::new());
}
// The following generators are based on generators created by George Marsaglia
// They use the static lcg created above for seeding, to initialize various
// state and tables. Seeding them is a bit more involved than an LCG.
pub struct Xorshift {
m_x: u32,
m_y: u32,
m_z: u32,
m_w: u32,
m_v: u32,
}
impl Xorshift {
pub fn new() -> Xorshift {
let mut x = Xorshift {
m_x: 0,
m_y: 0,
m_z: 0,
m_w: 0,
m_v: 0,
};
x.set_seed(10000);
x
}
}
impl PRNG for Xorshift {
fn get(&mut self) -> u32 {
let t = self.m_x ^ (self.m_x >> 7);
self.m_x = self.m_y;
self.m_y = self.m_z;
self.m_z = self.m_w;
self.m_w = self.m_v;
self.m_v = (self.m_v ^ (self.m_v << 6)) ^ (t ^ (t << 13));
(self.m_y + self.m_y + 1) * self.m_v
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
self.m_x = lcg.get();
self.m_y = lcg.get();
self.m_z = lcg.get();
self.m_w = lcg.get();
self.m_v = lcg.get();
}
}
pub struct MWC256 {
m_q: [u32; 256],
c: u32,
}
lazy_static! {
static ref _MWC256: RwLock<usize> = RwLock::new(255);
}
impl MWC256 {
pub fn new() -> MWC256 {
let mut m = MWC256 {
m_q: [0; 256],
c: 0,
};
m.set_seed(10000);
m
}
}
impl PRNG for MWC256 {
fn get(&mut self) -> u32 {
let t: u64;
let a = 809430660u64;
let mut i = _MWC256.write().unwrap();
t = a * self.m_q[*i] as u64 + self.c as u64;
*i += 1;
self.c = (t >> 32) as u32;
self.m_q[*i] = t as u32;
self.m_q[*i]
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
for i in 0..256 {
self.m_q[i] = lcg.get();
}
self.c = get_target(&mut *lcg, 809430660);
}
}
pub struct CMWC4096 {
m_q: [u32; 4096],
c: u32,
}
lazy_static! {
static ref _CMWC4096: RwLock<usize> = RwLock::new(2095);
}
impl CMWC4096 {
pub fn new() -> CMWC4096 {
let mut m = CMWC4096 {
m_q: [0; 4096],
c: 0,
};
m.set_seed(10000);
m
}
}
impl PRNG for CMWC4096 {
fn get(&mut self) -> u32 {
let mut t: u64;
let a = 18782u64;
let b = 4294967295u64;
let r = b - 1;
let mut i = _CMWC4096.write().unwrap();
*i = (*i + 1) & 4095;
t = a * self.m_q[*i] as u64 + self.c as u64;
self.c = (t >> 32) as u32;
t = (t & b) + self.c as u64;
if t > r {
self.c += 1;
t = t - b;
}
self.m_q[*i] = (r - t) as u32;
self.m_q[*i]
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
for i in 0..4096 {
self.m_q[i] = lcg.get()
}
self.c = get_target(&mut *lcg, 18781)
}
}
pub struct KISS {
z: u32,
w: u32,
jsr: u32,
jcong: u32,
}
impl KISS {
pub fn new() -> KISS {
let mut k = KISS {
z: 0,
w: 0,
jsr: 0,
jcong: 0,
};
k.set_seed(10000);
k
}
}
impl PRNG for KISS {
fn get(&mut self) -> u32 {
self.z = 36969 * (self.z & 65535) + (self.z >> 16);
self.w = 18000 * (self.w & 65535) + (self.w >> 16);
let mwc = (self.z << 16) + self.w;
self.jcong = 69069 * self.jcong + 1234567;
self.jsr ^= self.jsr << 17;
self.jsr ^= self.jsr >> 13;
self.jsr ^= self.jsr << 5;
(mwc ^ self.jcong) + self.jsr
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
self.z = lcg.get();
self.w = lcg.get();
self.jsr = lcg.get();
self.jcong = lcg.get();
}
}
|
{ (low, high) }
|
conditional_block
|
random_gen.rs
|
extern crate time;
use std::sync::RwLock;
pub trait PRNG {
fn get(&mut self) -> u32;
fn set_seed(&mut self, seed: u32);
}
pub fn set_seed_time(prng: &mut PRNG) {
prng.set_seed((time::precise_time_ns() as f64 / 1000.0) as u32);
}
pub fn get_target(prng: &mut PRNG, t: u32) -> u32 {
let v = get_01(prng);
(v * t as f64) as u32
}
pub fn get_range(prng: &mut PRNG, low: u32, high: u32) -> u32 {
let (low, high) = if high < low { (high, low) } else { (low, high) };
let rg = ((high - low) + 1) as f64;
let val = low as f64 + get_01(prng) * rg;
val as u32
}
pub fn get_01(prng: &mut PRNG) -> f64 {
prng.get() as f64 / 4294967295f64
}
pub struct LCG {
m_state: u32,
}
impl LCG {
pub fn new() -> LCG {
let mut lcg = LCG { m_state: 0 };
lcg.set_seed(10000);
lcg
}
}
impl PRNG for LCG {
fn get(&mut self) -> u32 {
use std::num::Wrapping;
self.m_state = (Wrapping(69069) * Wrapping(self.m_state) + Wrapping(362437)).0;
self.m_state
}
fn set_seed(&mut self, seed: u32) {
self.m_state = seed;
}
}
// Setup a static, global LCG for seeding other generators.
lazy_static! {
static ref _LCG: RwLock<LCG> = RwLock::new(LCG::new());
}
// The following generators are based on generators created by George Marsaglia
// They use the static lcg created above for seeding, to initialize various
// state and tables. Seeding them is a bit more involved than an LCG.
pub struct Xorshift {
m_x: u32,
m_y: u32,
m_z: u32,
m_w: u32,
m_v: u32,
}
impl Xorshift {
pub fn new() -> Xorshift {
let mut x = Xorshift {
m_x: 0,
m_y: 0,
m_z: 0,
m_w: 0,
m_v: 0,
};
x.set_seed(10000);
x
}
}
|
fn get(&mut self) -> u32 {
let t = self.m_x ^ (self.m_x >> 7);
self.m_x = self.m_y;
self.m_y = self.m_z;
self.m_z = self.m_w;
self.m_w = self.m_v;
self.m_v = (self.m_v ^ (self.m_v << 6)) ^ (t ^ (t << 13));
(self.m_y + self.m_y + 1) * self.m_v
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
self.m_x = lcg.get();
self.m_y = lcg.get();
self.m_z = lcg.get();
self.m_w = lcg.get();
self.m_v = lcg.get();
}
}
pub struct MWC256 {
m_q: [u32; 256],
c: u32,
}
lazy_static! {
static ref _MWC256: RwLock<usize> = RwLock::new(255);
}
impl MWC256 {
pub fn new() -> MWC256 {
let mut m = MWC256 {
m_q: [0; 256],
c: 0,
};
m.set_seed(10000);
m
}
}
impl PRNG for MWC256 {
fn get(&mut self) -> u32 {
let t: u64;
let a = 809430660u64;
let mut i = _MWC256.write().unwrap();
t = a * self.m_q[*i] as u64 + self.c as u64;
*i += 1;
self.c = (t >> 32) as u32;
self.m_q[*i] = t as u32;
self.m_q[*i]
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
for i in 0..256 {
self.m_q[i] = lcg.get();
}
self.c = get_target(&mut *lcg, 809430660);
}
}
pub struct CMWC4096 {
m_q: [u32; 4096],
c: u32,
}
lazy_static! {
static ref _CMWC4096: RwLock<usize> = RwLock::new(2095);
}
impl CMWC4096 {
pub fn new() -> CMWC4096 {
let mut m = CMWC4096 {
m_q: [0; 4096],
c: 0,
};
m.set_seed(10000);
m
}
}
impl PRNG for CMWC4096 {
fn get(&mut self) -> u32 {
let mut t: u64;
let a = 18782u64;
let b = 4294967295u64;
let r = b - 1;
let mut i = _CMWC4096.write().unwrap();
*i = (*i + 1) & 4095;
t = a * self.m_q[*i] as u64 + self.c as u64;
self.c = (t >> 32) as u32;
t = (t & b) + self.c as u64;
if t > r {
self.c += 1;
t = t - b;
}
self.m_q[*i] = (r - t) as u32;
self.m_q[*i]
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
for i in 0..4096 {
self.m_q[i] = lcg.get()
}
self.c = get_target(&mut *lcg, 18781)
}
}
pub struct KISS {
z: u32,
w: u32,
jsr: u32,
jcong: u32,
}
impl KISS {
pub fn new() -> KISS {
let mut k = KISS {
z: 0,
w: 0,
jsr: 0,
jcong: 0,
};
k.set_seed(10000);
k
}
}
impl PRNG for KISS {
fn get(&mut self) -> u32 {
self.z = 36969 * (self.z & 65535) + (self.z >> 16);
self.w = 18000 * (self.w & 65535) + (self.w >> 16);
let mwc = (self.z << 16) + self.w;
self.jcong = 69069 * self.jcong + 1234567;
self.jsr ^= self.jsr << 17;
self.jsr ^= self.jsr >> 13;
self.jsr ^= self.jsr << 5;
(mwc ^ self.jcong) + self.jsr
}
fn set_seed(&mut self, seed: u32) {
let mut lcg = _LCG.write().unwrap();
lcg.set_seed(seed);
self.z = lcg.get();
self.w = lcg.get();
self.jsr = lcg.get();
self.jcong = lcg.get();
}
}
|
impl PRNG for Xorshift {
|
random_line_split
|
extract_bits.rs
|
use word::{Word, ToWord};
/// Extract bits [`start`, `start + length`) from `x` into the lower bits
/// of the result.
///
/// # Keywords:
///
/// Gather bit range.
///
/// # Intrinsics:
/// - BMI 1.0: bextr.
///
/// # Examples
///
/// ```
/// use bitwise::word::*;
///
/// let n = 0b1011_1110_1001_0011u16;
///
/// assert_eq!(n.extract_bits(1u8, 4u8), 0b1001);
/// ```
#[inline]
pub fn extract_bits<T: Word, U: Word>(x: T, start: U, length: U) -> T {
x.bextr(start.to(), length.to())
}
/// Method version of [`extract_bits`](fn.extract_bits.html).
pub trait ExtractBits {
#[inline]
fn extract_bits<U: Word>(self, U, U) -> Self;
}
impl<T: Word> ExtractBits for T {
#[inline]
fn
|
<U: Word>(self, start: U, length: U) -> Self {
extract_bits(self, start, length)
}
}
|
extract_bits
|
identifier_name
|
extract_bits.rs
|
use word::{Word, ToWord};
/// Extract bits [`start`, `start + length`) from `x` into the lower bits
/// of the result.
///
/// # Keywords:
///
/// Gather bit range.
///
/// # Intrinsics:
/// - BMI 1.0: bextr.
///
/// # Examples
///
/// ```
/// use bitwise::word::*;
///
/// let n = 0b1011_1110_1001_0011u16;
///
/// assert_eq!(n.extract_bits(1u8, 4u8), 0b1001);
/// ```
#[inline]
pub fn extract_bits<T: Word, U: Word>(x: T, start: U, length: U) -> T {
x.bextr(start.to(), length.to())
}
/// Method version of [`extract_bits`](fn.extract_bits.html).
pub trait ExtractBits {
#[inline]
fn extract_bits<U: Word>(self, U, U) -> Self;
}
impl<T: Word> ExtractBits for T {
#[inline]
fn extract_bits<U: Word>(self, start: U, length: U) -> Self
|
}
|
{
extract_bits(self, start, length)
}
|
identifier_body
|
extract_bits.rs
|
use word::{Word, ToWord};
/// Extract bits [`start`, `start + length`) from `x` into the lower bits
/// of the result.
///
/// # Keywords:
///
/// Gather bit range.
///
/// # Intrinsics:
/// - BMI 1.0: bextr.
///
/// # Examples
///
/// ```
|
///
/// assert_eq!(n.extract_bits(1u8, 4u8), 0b1001);
/// ```
#[inline]
pub fn extract_bits<T: Word, U: Word>(x: T, start: U, length: U) -> T {
x.bextr(start.to(), length.to())
}
/// Method version of [`extract_bits`](fn.extract_bits.html).
pub trait ExtractBits {
#[inline]
fn extract_bits<U: Word>(self, U, U) -> Self;
}
impl<T: Word> ExtractBits for T {
#[inline]
fn extract_bits<U: Word>(self, start: U, length: U) -> Self {
extract_bits(self, start, length)
}
}
|
/// use bitwise::word::*;
///
/// let n = 0b1011_1110_1001_0011u16;
|
random_line_split
|
lr0.rs
|
//! This module builds the LR(0) state machine for a given grammar.
//!
//! The state machine represents the state of the parser of the grammar, as tokens are produced by
//! the lexer. (The details of the lexer are out of scope; for our purposes, all that is relevant is
//! a sequence of tokens.)
//!
//! For a given sequence of tokens (not yet terminated by EOF; in other words, a prefix of a
//! possibly-valid complete input), there may be any number of rules (productions) which may match
//! that sequence of tokens. The lr0 module builds a state machine which has one state for each
//! unique set of rules that may match the current sequence of tokens. (This is somewhat analogous
//! to the NFA to DFA transform for regular expressions.) More precisely, each state consists of a
//! unique set of _items_, where each item is a position within a rule.
//!
//! All of this is well-described in the literature, especially the Dragon Book, i.e.
//! _Compilers: Principles, Techniques, and Tools, Edition 2_.
use crate::grammar::Grammar;
use crate::tvec::TVec;
use crate::util::{word_size, Bitmat, Bitv32};
use crate::warshall::reflexive_transitive_closure;
use crate::Symbol;
use crate::{Item, Rule, State, Var};
use log::{debug, log_enabled, trace};
use ramp_table::RampTable;
use std::fmt::Write;
pub(crate) const INITIAL_STATE_SYMBOL: Symbol = Symbol(0);
pub(crate) struct LR0Output {
/// The number of states produced by LR(0) analysis.
pub nstates: usize,
/// For each state, this gives the symbol that created this state.
// index: State
// value: Symbol
pub accessing_symbol: TVec<State, Symbol>,
/// Contains (State -> [State]) mappings for shifts. For each state, this gives the
/// set of states that this state can transition to.
pub shifts: RampTable<State>,
/// Contains State -> [Rule]. For each state, this gives the rules that can be
/// reduced in this state.
pub reductions: RampTable<Rule>,
/// Contains Var -> [Rule]
/// Each key is a variable (nonterminal). The values for each key are the rules
/// that derive (produce) this nonterminal.
pub derives: RampTable<Rule>,
/// Contains State -> [Item]
/// The items that make up a given state.
/// This is used only for debugging, not for actual analysis.
pub state_items: RampTable<Item>,
}
impl LR0Output {
pub fn nstates(&self) -> usize {
self.nstates
}
}
pub(crate) fn compute_lr0(gram: &Grammar) -> LR0Output {
let derives = set_derives(gram);
// was: allocate_item_sets()
// This defines: kernel_base, kernel_items, kernel_end, shift_symbol
// The kernel_* fields are allocated to well-defined sizes, but their contents are
// not well-defined yet.
let mut kernel_items_count: usize = 0;
let mut symbol_count: Vec<usize> = vec![0; gram.nsyms];
for &symbol in gram.ritem.iter() {
if symbol.is_symbol() {
let symbol = symbol.as_symbol();
kernel_items_count += 1;
symbol_count[symbol.index()] += 1;
}
}
let mut kernel_base: Vec<usize> = vec![0; gram.nsyms];
let mut count: usize = 0;
for i in 0..gram.nsyms {
kernel_base[i] = count;
count += symbol_count[i];
}
let mut kernel_items: Vec<Item> = vec![Item(0); kernel_items_count];
// values in this array are indexes into kernel_base
let mut kernel_end: Vec<usize> = vec![0; gram.nsyms];
// The item sets for each state.
let mut states: RampTable<Item> = RampTable::new();
let mut accessing_symbol: TVec<State, Symbol> = TVec::new();
// This function creates the initial state, using the DERIVES relation for
// the start symbol. From this initial state, we will discover / create all
// other states, by examining a state, the next variables that could be
// encountered in those states, and finding the transitive closure over same.
// Initializes the state table.
states.push_entry_extend(
derives[gram.symbol_to_var(gram.start()).index()]
.iter()
.map(|&item| gram.rrhs(item)),
);
accessing_symbol.push(INITIAL_STATE_SYMBOL);
// Contains the set of states that are relevant for each item. Each entry in this
// table corresponds to an item, so state_set.len() = nitems. The contents of each
// entry is a list of state indices (into LR0Output.states).
// Item -> [State]
let mut state_set: Vec<Vec<State>> = vec![vec![]; gram.nitems()];
let first_derives = set_first_derives(gram, &derives);
// These vectors are used for building tables during each state.
// It is inefficient to allocate and free these vectors within
// the scope of processing each state.
let mut item_set: Vec<Item> = Vec::with_capacity(gram.nitems());
let mut rule_set: Bitv32 = Bitv32::from_elem(gram.nrules, false);
let mut shift_symbol: Vec<Symbol> = Vec::new();
// this_state represents our position within our work list. The output.states
// array represents both our final output, and this_state is the next state
// within that array, where we need to generate new states from. New states
// are added to output.states within find_or_create_state() (called below).
let mut this_state: usize = 0;
// State which becomes the output
let mut reductions = RampTable::<Rule>::new();
let mut shifts = RampTable::<State>::new();
while this_state < states.len() {
assert!(item_set.len() == 0);
trace!("computing closure for state s{}:", this_state);
// The output of closure() is stored in item_set.
// rule_set is used only as temporary storage.
closure(
gram,
&states[this_state],
&first_derives,
&mut rule_set,
&mut item_set,
);
// The output of save_reductions() is stored in reductions.
save_reductions(gram, &item_set, &mut reductions);
new_item_sets(
&kernel_base,
&mut kernel_items,
&mut kernel_end,
gram,
&item_set,
&mut shift_symbol,
);
// Find or create states for shifts in the current state. This can potentially add new
// states to'states'. Then record the resulting shifts in'shifts'.
shift_symbol.sort();
for symbol in shift_symbol.iter().copied() {
// Search for an existing state that has the same items.
let symbol_items =
&kernel_items[kernel_base[symbol.index()]..kernel_end[symbol.index()]];
let this_state_set: &mut Vec<State> = &mut state_set[symbol_items[0].index()];
let shift_state = if let Some(&existing_state) = this_state_set
.iter()
.find(|state| *symbol_items == states[state.index()])
{
existing_state
} else {
// No match. Create a new state for this unique set of items.
let new_state: State = states.len().into();
states.push_entry_copy(symbol_items);
accessing_symbol.push(symbol);
// Add the new state to the state set for this item.
this_state_set.push(new_state);
new_state
};
shifts.push_value(shift_state);
}
shifts.finish_key();
item_set.clear();
shift_symbol.clear();
this_state += 1;
}
let output = LR0Output {
nstates: states.len(),
accessing_symbol,
reductions,
shifts,
derives,
state_items: states,
};
dump_lr0_output(gram, &output);
output
}
fn dump_lr0_output(gram: &Grammar, output: &LR0Output) {
if!log_enabled!(log::Level::Debug) {
return;
}
debug!("States: (nstates: {})", output.nstates);
for istate in 0..output.nstates {
let state = State(istate as i16);
debug!(
"s{}: (accessing_symbol {})",
state,
gram.name(output.accessing_symbol[state])
);
let items = &output.state_items[istate];
let mut line = String::new();
for i in 0..items.len() {
let rhs = items[i].index();
line.push_str(&format!("item {:4} : ", rhs));
// back up to start of this rule
let mut rhs_first = rhs;
while rhs_first > 0 && gram.ritem[rhs_first - 1].is_symbol() {
rhs_first -= 1;
}
// loop through rhs
let mut j = rhs_first;
while gram.ritem[j].is_symbol() {
let s = gram.ritem[j].as_symbol();
if j == rhs {
line.push_str(".");
}
line.push_str(&format!(" {}", gram.name(s)));
j += 1;
}
if j == rhs {
line.push_str(".");
}
// Is this item a reduction? In other words, is the "." at the end of the RHS?
if gram.ritem[rhs].is_rule() {
let r = gram.ritem[rhs].as_rule();
write!(
line,
" -> reduction (r{}) {}",
r.index(),
gram.name(gram.rlhs(r)),
)
.unwrap();
}
debug!(" {}", line);
line.clear();
}
for &r in &output.reductions[istate] {
debug!(" reduction: {}", gram.rule_to_str(r));
}
for &s in output.shifts[istate].iter() {
debug!(
" shift: {:-20} --> s{}",
gram.name(output.accessing_symbol[s]),
s.index()
);
}
}
}
// fills shift_symbol with shifts
// kernel_base: Symbol -> index into kernel_items
// kernel_end: Symbol -> index into kernel_items
fn new_item_sets(
kernel_base: &[usize],
kernel_items: &mut [Item],
kernel_end: &mut [usize],
gram: &Grammar,
item_set: &[Item],
shift_symbol: &mut Vec<Symbol>,
) {
assert!(shift_symbol.len() == 0);
// reset kernel_end
kernel_end.copy_from_slice(kernel_base);
for &item in item_set.iter() {
let symbol = gram.ritem(item);
if symbol.is_symbol() {
let symbol = symbol.as_symbol();
let base = kernel_base[symbol.index()];
let end = &mut kernel_end[symbol.index()];
if *end == base {
shift_symbol.push(symbol);
}
kernel_items[*end] = item + 1;
*end += 1;
}
}
}
/// Examine the items in the given item set. If any of the items have reached the
/// end of the rhs list for a particular rule, then add that rule to the reduction set.
/// We discover this by testing the sign of the next symbol in the item; if it is
/// negative, then we have reached the end of the symbols on the rhs of a rule. See
/// the code in reader::pack_grammar(), where this information is set up.
fn save_reductions(gram: &Grammar, item_set: &[Item], rules: &mut RampTable<Rule>) {
for &item in item_set {
let sr = gram.ritem(item);
if sr.is_rule() {
rules.push_value(sr.as_rule());
}
}
rules.finish_key();
}
/// Computes the `DERIVES` table. The `DERIVES` table maps `Var -> [Rule]`, where each `Var`
/// is a nonterminal and `[Rule]` contains all of the rules that have `Var` as their left-hand
/// side. In other words, this table allows you to lookup the set of rules that produce
/// (derive) a particular nonterminal.
fn set_derives(gram: &Grammar) -> RampTable<Rule>
|
}
derives
}
/// Builds a vector of symbols which are nullable. A nullable symbol is one which can be
/// reduced from an empty sequence of tokens.
pub(crate) fn set_nullable(gram: &Grammar) -> TVec<Symbol, bool> {
let mut nullable: TVec<Symbol, bool> = TVec::from_vec(vec![false; gram.nsyms]);
loop {
let mut done = true;
let mut i = 1;
while i < gram.ritem.len() {
let mut empty = true;
let rule = loop {
let sr = gram.ritem[i];
if sr.is_rule() {
break sr.as_rule();
}
let sym = sr.as_symbol();
if!nullable[sym] {
empty = false;
}
i += 1;
};
if empty {
let sym = gram.rlhs(rule);
if!nullable[sym] {
nullable[sym] = true;
done = false;
}
}
i += 1;
}
if done {
break;
}
}
if log_enabled!(log::Level::Debug) {
debug!("Nullable symbols:");
for sym in gram.iter_var_syms() {
if nullable[sym] {
debug!("{}", gram.name(sym));
}
}
}
nullable
}
/// Computes the "epsilon-free firsts" (EFF) relation.
/// The EFF is a bit matrix [nvars, nvars].
fn set_eff(gram: &Grammar, derives: &RampTable<Rule>) -> Bitmat {
let nvars = gram.nvars;
let mut eff: Bitmat = Bitmat::new(nvars, nvars);
for row in 0..nvars {
for &rule in &derives[row] {
let derived_rule_or_symbol = gram.ritem(gram.rrhs(rule));
if derived_rule_or_symbol.is_rule() {
continue;
}
let symbol = derived_rule_or_symbol.as_symbol();
if gram.is_var(symbol) {
eff.set(row, gram.symbol_to_var(symbol).index());
}
}
}
reflexive_transitive_closure(&mut eff);
print_eff(gram, &eff);
eff
}
fn print_eff(gram: &Grammar, eff: &Bitmat) {
debug!("Epsilon Free Firsts");
for i in 0..eff.rows {
let var = Var(i as i16);
debug!("{}", gram.name(gram.var_to_symbol(var)));
for j in eff.iter_ones_in_row(i) {
debug!(" {}", gram.name(gram.var_to_symbol(Var(j as i16))));
}
}
}
/// Computes the `first_derives` relation, which is a bit matrix of size [nvars, nrules].
/// Each row corresponds to a variable, and each column corresponds to a rule.
///
/// Note: Because this relation is only relevant to variables (non-terminals), the table
/// does not waste space on tokens. That is, row 0 is assigned to the first non-terminal
/// (Grammar.start_symbol). So when indexing using a symbol value, you have to subtract
/// start_symbol (or, equivalently, ntokens) first.
///
/// This implementation processes bits in groups of 32, for the sake of efficiency.
/// It is not clear whether this complexity is still justifiable, but it is preserved.
pub(crate) fn set_first_derives(gram: &Grammar, derives: &RampTable<Rule>) -> Bitmat {
let eff = set_eff(gram, derives);
assert!(eff.rows == gram.nvars);
assert!(eff.cols == gram.nvars);
let mut first_derives = Bitmat::new(gram.nvars, gram.nrules);
for (i, j) in eff.iter_ones() {
for &rule in &derives[j] {
first_derives.set(i, rule.index());
}
}
print_first_derives(gram, &first_derives);
first_derives
}
/// Computes the closure of a set of item sets, and writes the result into 'item_set'.
/// nucleus contains a set of items, that is, positions within reductions that are possible
/// in the current state. The closure() function looks at the next symbol in each item, and
/// if the next symbol is a variable, the first_derives relation is consulted in order to see
/// which other rules need to be added to the closure.
///
/// The caller provides a mutable rule_set array, which is guaranteed to hold enough space for
/// a bit vector of size nrules. The caller does not otherwise use rule_set; the caller provides
/// rule_set only to avoid frequently allocating and destroying an array.
///
/// Similarly, the item_set is passed as a mutable vector. However, the caller guarantees that
/// item_set will be empty on call to closure(), and closure() writes its output into item_set.
///
/// * rule_set: bit vector, size=nrules; temporary data, written and read by this fn
///
/// TODO: Consider changing item_set from Vec<Item> to a bitmap, whose length is nitems.
/// Then the'states' table becomes a Bitmat.
pub(crate) fn closure(
gram: &Grammar,
nucleus: &[Item],
first_derives: &Bitmat,
rule_set: &mut Bitv32,
item_set: &mut Vec<Item>,
) {
assert!(item_set.len() == 0);
let rulesetsize = word_size(rule_set.nbits);
// clear rule_set
rule_set.set_all(false);
// For each item in the nucleus, examine the next symbol in the item.
// If the next symbol is a non-terminal, then find the corresponding
// row in the first_derives table, and merge that row into the rule_set
// bit vector. The result is that rule_set will contain a bit vector
// that identifies the rules need to be added to the closure of the
// current state. Keep in mind that we process bit vectors in u32 chunks.
for &item in nucleus.iter() {
let symbol_or_rule = gram.ritem(item);
if symbol_or_rule.is_symbol() {
let symbol = symbol_or_rule.as_symbol();
if gram.is_var(symbol) {
let var = gram.symbol_to_var(symbol);
let dsp = var.index() * first_derives.rowsize;
for i in 0..rulesetsize {
rule_set.data[i] |= first_derives.data[dsp + i];
}
}
}
}
// Scan the rule_set that we just constructed. The rule_set tells us which
// items need to be merged into the item set for the item set. Thus,
// new_items = nucleus merged with rule_set.iter_ones().
//
// This code relies on this invariant:
// for all r: gram.rrhs[r + 1] > gram.rrhs[r]
let mut i: usize = 0; // index into nucleus
for rule in rule_set.iter_ones() {
let item = gram.rrhs[rule];
while i < nucleus.len() && nucleus[i] < item {
item_set.push(nucleus[i]);
i += 1;
}
item_set.push(item);
while i < nucleus.len() && nucleus[i] == item {
i += 1;
}
}
while i < nucleus.len() {
item_set.push(nucleus[i]);
i += 1;
}
}
// first_derives: cols = nrules
fn print_first_derives(gram: &Grammar, first_derives: &Bitmat) {
debug!("");
debug!("First Derives");
debug!("");
for i in 0..gram.nvars {
let var = Var(i as i16);
debug!("{} derives:", gram.name(gram.var_to_symbol(var)));
for j in first_derives.iter_ones_in_row(i) {
debug!(" {}", gram.rule_to_str(Rule(j as i16)));
}
}
}
|
{
let mut derives = RampTable::<Rule>::with_capacity(gram.nsyms, gram.nrules);
for lhs in gram.iter_var_syms() {
for rule in gram.iter_rules() {
if gram.rlhs(rule) == lhs {
derives.push_value(rule as Rule);
}
}
derives.finish_key();
}
if log_enabled!(log::Level::Debug) {
debug!("DERIVES:");
for lhs in gram.iter_vars() {
let lhs_sym = gram.var_to_symbol(lhs);
debug!(" {} derives rules: ", gram.name(lhs_sym));
for &rule in &derives[lhs.index()] {
debug!(" {}", &gram.rule_to_str(rule));
}
}
|
identifier_body
|
lr0.rs
|
//! This module builds the LR(0) state machine for a given grammar.
//!
//! The state machine represents the state of the parser of the grammar, as tokens are produced by
//! the lexer. (The details of the lexer are out of scope; for our purposes, all that is relevant is
//! a sequence of tokens.)
//!
//! For a given sequence of tokens (not yet terminated by EOF; in other words, a prefix of a
//! possibly-valid complete input), there may be any number of rules (productions) which may match
//! that sequence of tokens. The lr0 module builds a state machine which has one state for each
//! unique set of rules that may match the current sequence of tokens. (This is somewhat analogous
//! to the NFA to DFA transform for regular expressions.) More precisely, each state consists of a
//! unique set of _items_, where each item is a position within a rule.
//!
//! All of this is well-described in the literature, especially the Dragon Book, i.e.
//! _Compilers: Principles, Techniques, and Tools, Edition 2_.
use crate::grammar::Grammar;
use crate::tvec::TVec;
use crate::util::{word_size, Bitmat, Bitv32};
use crate::warshall::reflexive_transitive_closure;
use crate::Symbol;
use crate::{Item, Rule, State, Var};
use log::{debug, log_enabled, trace};
use ramp_table::RampTable;
use std::fmt::Write;
pub(crate) const INITIAL_STATE_SYMBOL: Symbol = Symbol(0);
pub(crate) struct LR0Output {
/// The number of states produced by LR(0) analysis.
pub nstates: usize,
/// For each state, this gives the symbol that created this state.
// index: State
// value: Symbol
pub accessing_symbol: TVec<State, Symbol>,
/// Contains (State -> [State]) mappings for shifts. For each state, this gives the
/// set of states that this state can transition to.
pub shifts: RampTable<State>,
/// Contains State -> [Rule]. For each state, this gives the rules that can be
/// reduced in this state.
pub reductions: RampTable<Rule>,
/// Contains Var -> [Rule]
/// Each key is a variable (nonterminal). The values for each key are the rules
/// that derive (produce) this nonterminal.
pub derives: RampTable<Rule>,
/// Contains State -> [Item]
/// The items that make up a given state.
/// This is used only for debugging, not for actual analysis.
pub state_items: RampTable<Item>,
}
impl LR0Output {
pub fn nstates(&self) -> usize {
self.nstates
}
}
pub(crate) fn compute_lr0(gram: &Grammar) -> LR0Output {
let derives = set_derives(gram);
// was: allocate_item_sets()
// This defines: kernel_base, kernel_items, kernel_end, shift_symbol
// The kernel_* fields are allocated to well-defined sizes, but their contents are
// not well-defined yet.
let mut kernel_items_count: usize = 0;
let mut symbol_count: Vec<usize> = vec![0; gram.nsyms];
for &symbol in gram.ritem.iter() {
if symbol.is_symbol() {
let symbol = symbol.as_symbol();
kernel_items_count += 1;
symbol_count[symbol.index()] += 1;
}
}
let mut kernel_base: Vec<usize> = vec![0; gram.nsyms];
let mut count: usize = 0;
for i in 0..gram.nsyms {
kernel_base[i] = count;
count += symbol_count[i];
}
let mut kernel_items: Vec<Item> = vec![Item(0); kernel_items_count];
// values in this array are indexes into kernel_base
let mut kernel_end: Vec<usize> = vec![0; gram.nsyms];
// The item sets for each state.
let mut states: RampTable<Item> = RampTable::new();
let mut accessing_symbol: TVec<State, Symbol> = TVec::new();
// This function creates the initial state, using the DERIVES relation for
// the start symbol. From this initial state, we will discover / create all
// other states, by examining a state, the next variables that could be
// encountered in those states, and finding the transitive closure over same.
// Initializes the state table.
states.push_entry_extend(
derives[gram.symbol_to_var(gram.start()).index()]
.iter()
.map(|&item| gram.rrhs(item)),
);
accessing_symbol.push(INITIAL_STATE_SYMBOL);
// Contains the set of states that are relevant for each item. Each entry in this
// table corresponds to an item, so state_set.len() = nitems. The contents of each
// entry is a list of state indices (into LR0Output.states).
// Item -> [State]
let mut state_set: Vec<Vec<State>> = vec![vec![]; gram.nitems()];
let first_derives = set_first_derives(gram, &derives);
// These vectors are used for building tables during each state.
// It is inefficient to allocate and free these vectors within
// the scope of processing each state.
let mut item_set: Vec<Item> = Vec::with_capacity(gram.nitems());
let mut rule_set: Bitv32 = Bitv32::from_elem(gram.nrules, false);
let mut shift_symbol: Vec<Symbol> = Vec::new();
// this_state represents our position within our work list. The output.states
// array represents both our final output, and this_state is the next state
// within that array, where we need to generate new states from. New states
// are added to output.states within find_or_create_state() (called below).
let mut this_state: usize = 0;
// State which becomes the output
let mut reductions = RampTable::<Rule>::new();
let mut shifts = RampTable::<State>::new();
while this_state < states.len() {
assert!(item_set.len() == 0);
trace!("computing closure for state s{}:", this_state);
// The output of closure() is stored in item_set.
// rule_set is used only as temporary storage.
closure(
gram,
&states[this_state],
&first_derives,
&mut rule_set,
&mut item_set,
);
// The output of save_reductions() is stored in reductions.
save_reductions(gram, &item_set, &mut reductions);
new_item_sets(
&kernel_base,
&mut kernel_items,
&mut kernel_end,
gram,
&item_set,
&mut shift_symbol,
);
// Find or create states for shifts in the current state. This can potentially add new
// states to'states'. Then record the resulting shifts in'shifts'.
shift_symbol.sort();
for symbol in shift_symbol.iter().copied() {
// Search for an existing state that has the same items.
let symbol_items =
&kernel_items[kernel_base[symbol.index()]..kernel_end[symbol.index()]];
let this_state_set: &mut Vec<State> = &mut state_set[symbol_items[0].index()];
let shift_state = if let Some(&existing_state) = this_state_set
.iter()
.find(|state| *symbol_items == states[state.index()])
{
existing_state
} else {
// No match. Create a new state for this unique set of items.
let new_state: State = states.len().into();
states.push_entry_copy(symbol_items);
accessing_symbol.push(symbol);
// Add the new state to the state set for this item.
this_state_set.push(new_state);
new_state
};
shifts.push_value(shift_state);
}
shifts.finish_key();
item_set.clear();
shift_symbol.clear();
this_state += 1;
}
let output = LR0Output {
nstates: states.len(),
accessing_symbol,
reductions,
shifts,
derives,
state_items: states,
};
dump_lr0_output(gram, &output);
output
}
fn dump_lr0_output(gram: &Grammar, output: &LR0Output) {
if!log_enabled!(log::Level::Debug) {
return;
}
debug!("States: (nstates: {})", output.nstates);
for istate in 0..output.nstates {
let state = State(istate as i16);
debug!(
"s{}: (accessing_symbol {})",
state,
gram.name(output.accessing_symbol[state])
);
let items = &output.state_items[istate];
let mut line = String::new();
for i in 0..items.len() {
let rhs = items[i].index();
line.push_str(&format!("item {:4} : ", rhs));
// back up to start of this rule
let mut rhs_first = rhs;
while rhs_first > 0 && gram.ritem[rhs_first - 1].is_symbol() {
rhs_first -= 1;
}
// loop through rhs
let mut j = rhs_first;
while gram.ritem[j].is_symbol() {
let s = gram.ritem[j].as_symbol();
if j == rhs {
line.push_str(".");
}
line.push_str(&format!(" {}", gram.name(s)));
j += 1;
}
if j == rhs {
line.push_str(".");
}
// Is this item a reduction? In other words, is the "." at the end of the RHS?
if gram.ritem[rhs].is_rule() {
let r = gram.ritem[rhs].as_rule();
write!(
line,
" -> reduction (r{}) {}",
r.index(),
gram.name(gram.rlhs(r)),
)
.unwrap();
}
debug!(" {}", line);
line.clear();
}
for &r in &output.reductions[istate] {
debug!(" reduction: {}", gram.rule_to_str(r));
}
for &s in output.shifts[istate].iter() {
debug!(
" shift: {:-20} --> s{}",
gram.name(output.accessing_symbol[s]),
s.index()
);
}
}
}
// fills shift_symbol with shifts
// kernel_base: Symbol -> index into kernel_items
// kernel_end: Symbol -> index into kernel_items
fn new_item_sets(
kernel_base: &[usize],
kernel_items: &mut [Item],
kernel_end: &mut [usize],
gram: &Grammar,
item_set: &[Item],
shift_symbol: &mut Vec<Symbol>,
) {
assert!(shift_symbol.len() == 0);
// reset kernel_end
kernel_end.copy_from_slice(kernel_base);
for &item in item_set.iter() {
let symbol = gram.ritem(item);
if symbol.is_symbol() {
let symbol = symbol.as_symbol();
let base = kernel_base[symbol.index()];
let end = &mut kernel_end[symbol.index()];
if *end == base {
shift_symbol.push(symbol);
}
kernel_items[*end] = item + 1;
*end += 1;
}
}
}
/// Examine the items in the given item set. If any of the items have reached the
/// end of the rhs list for a particular rule, then add that rule to the reduction set.
/// We discover this by testing the sign of the next symbol in the item; if it is
/// negative, then we have reached the end of the symbols on the rhs of a rule. See
/// the code in reader::pack_grammar(), where this information is set up.
fn save_reductions(gram: &Grammar, item_set: &[Item], rules: &mut RampTable<Rule>) {
for &item in item_set {
let sr = gram.ritem(item);
if sr.is_rule() {
rules.push_value(sr.as_rule());
}
}
rules.finish_key();
}
/// Computes the `DERIVES` table. The `DERIVES` table maps `Var -> [Rule]`, where each `Var`
/// is a nonterminal and `[Rule]` contains all of the rules that have `Var` as their left-hand
/// side. In other words, this table allows you to lookup the set of rules that produce
/// (derive) a particular nonterminal.
fn
|
(gram: &Grammar) -> RampTable<Rule> {
let mut derives = RampTable::<Rule>::with_capacity(gram.nsyms, gram.nrules);
for lhs in gram.iter_var_syms() {
for rule in gram.iter_rules() {
if gram.rlhs(rule) == lhs {
derives.push_value(rule as Rule);
}
}
derives.finish_key();
}
if log_enabled!(log::Level::Debug) {
debug!("DERIVES:");
for lhs in gram.iter_vars() {
let lhs_sym = gram.var_to_symbol(lhs);
debug!(" {} derives rules: ", gram.name(lhs_sym));
for &rule in &derives[lhs.index()] {
debug!(" {}", &gram.rule_to_str(rule));
}
}
}
derives
}
/// Builds a vector of symbols which are nullable. A nullable symbol is one which can be
/// reduced from an empty sequence of tokens.
pub(crate) fn set_nullable(gram: &Grammar) -> TVec<Symbol, bool> {
let mut nullable: TVec<Symbol, bool> = TVec::from_vec(vec![false; gram.nsyms]);
loop {
let mut done = true;
let mut i = 1;
while i < gram.ritem.len() {
let mut empty = true;
let rule = loop {
let sr = gram.ritem[i];
if sr.is_rule() {
break sr.as_rule();
}
let sym = sr.as_symbol();
if!nullable[sym] {
empty = false;
}
i += 1;
};
if empty {
let sym = gram.rlhs(rule);
if!nullable[sym] {
nullable[sym] = true;
done = false;
}
}
i += 1;
}
if done {
break;
}
}
if log_enabled!(log::Level::Debug) {
debug!("Nullable symbols:");
for sym in gram.iter_var_syms() {
if nullable[sym] {
debug!("{}", gram.name(sym));
}
}
}
nullable
}
/// Computes the "epsilon-free firsts" (EFF) relation.
/// The EFF is a bit matrix [nvars, nvars].
fn set_eff(gram: &Grammar, derives: &RampTable<Rule>) -> Bitmat {
let nvars = gram.nvars;
let mut eff: Bitmat = Bitmat::new(nvars, nvars);
for row in 0..nvars {
for &rule in &derives[row] {
let derived_rule_or_symbol = gram.ritem(gram.rrhs(rule));
if derived_rule_or_symbol.is_rule() {
continue;
}
let symbol = derived_rule_or_symbol.as_symbol();
if gram.is_var(symbol) {
eff.set(row, gram.symbol_to_var(symbol).index());
}
}
}
reflexive_transitive_closure(&mut eff);
print_eff(gram, &eff);
eff
}
fn print_eff(gram: &Grammar, eff: &Bitmat) {
debug!("Epsilon Free Firsts");
for i in 0..eff.rows {
let var = Var(i as i16);
debug!("{}", gram.name(gram.var_to_symbol(var)));
for j in eff.iter_ones_in_row(i) {
debug!(" {}", gram.name(gram.var_to_symbol(Var(j as i16))));
}
}
}
/// Computes the `first_derives` relation, which is a bit matrix of size [nvars, nrules].
/// Each row corresponds to a variable, and each column corresponds to a rule.
///
/// Note: Because this relation is only relevant to variables (non-terminals), the table
/// does not waste space on tokens. That is, row 0 is assigned to the first non-terminal
/// (Grammar.start_symbol). So when indexing using a symbol value, you have to subtract
/// start_symbol (or, equivalently, ntokens) first.
///
/// This implementation processes bits in groups of 32, for the sake of efficiency.
/// It is not clear whether this complexity is still justifiable, but it is preserved.
pub(crate) fn set_first_derives(gram: &Grammar, derives: &RampTable<Rule>) -> Bitmat {
let eff = set_eff(gram, derives);
assert!(eff.rows == gram.nvars);
assert!(eff.cols == gram.nvars);
let mut first_derives = Bitmat::new(gram.nvars, gram.nrules);
for (i, j) in eff.iter_ones() {
for &rule in &derives[j] {
first_derives.set(i, rule.index());
}
}
print_first_derives(gram, &first_derives);
first_derives
}
/// Computes the closure of a set of item sets, and writes the result into 'item_set'.
/// nucleus contains a set of items, that is, positions within reductions that are possible
/// in the current state. The closure() function looks at the next symbol in each item, and
/// if the next symbol is a variable, the first_derives relation is consulted in order to see
/// which other rules need to be added to the closure.
///
/// The caller provides a mutable rule_set array, which is guaranteed to hold enough space for
/// a bit vector of size nrules. The caller does not otherwise use rule_set; the caller provides
/// rule_set only to avoid frequently allocating and destroying an array.
///
/// Similarly, the item_set is passed as a mutable vector. However, the caller guarantees that
/// item_set will be empty on call to closure(), and closure() writes its output into item_set.
///
/// * rule_set: bit vector, size=nrules; temporary data, written and read by this fn
///
/// TODO: Consider changing item_set from Vec<Item> to a bitmap, whose length is nitems.
/// Then the'states' table becomes a Bitmat.
pub(crate) fn closure(
gram: &Grammar,
nucleus: &[Item],
first_derives: &Bitmat,
rule_set: &mut Bitv32,
item_set: &mut Vec<Item>,
) {
assert!(item_set.len() == 0);
let rulesetsize = word_size(rule_set.nbits);
// clear rule_set
rule_set.set_all(false);
// For each item in the nucleus, examine the next symbol in the item.
// If the next symbol is a non-terminal, then find the corresponding
// row in the first_derives table, and merge that row into the rule_set
// bit vector. The result is that rule_set will contain a bit vector
// that identifies the rules need to be added to the closure of the
// current state. Keep in mind that we process bit vectors in u32 chunks.
for &item in nucleus.iter() {
let symbol_or_rule = gram.ritem(item);
if symbol_or_rule.is_symbol() {
let symbol = symbol_or_rule.as_symbol();
if gram.is_var(symbol) {
let var = gram.symbol_to_var(symbol);
let dsp = var.index() * first_derives.rowsize;
for i in 0..rulesetsize {
rule_set.data[i] |= first_derives.data[dsp + i];
}
}
}
}
// Scan the rule_set that we just constructed. The rule_set tells us which
// items need to be merged into the item set for the item set. Thus,
// new_items = nucleus merged with rule_set.iter_ones().
//
// This code relies on this invariant:
// for all r: gram.rrhs[r + 1] > gram.rrhs[r]
let mut i: usize = 0; // index into nucleus
for rule in rule_set.iter_ones() {
let item = gram.rrhs[rule];
while i < nucleus.len() && nucleus[i] < item {
item_set.push(nucleus[i]);
i += 1;
}
item_set.push(item);
while i < nucleus.len() && nucleus[i] == item {
i += 1;
}
}
while i < nucleus.len() {
item_set.push(nucleus[i]);
i += 1;
}
}
// first_derives: cols = nrules
fn print_first_derives(gram: &Grammar, first_derives: &Bitmat) {
debug!("");
debug!("First Derives");
debug!("");
for i in 0..gram.nvars {
let var = Var(i as i16);
debug!("{} derives:", gram.name(gram.var_to_symbol(var)));
for j in first_derives.iter_ones_in_row(i) {
debug!(" {}", gram.rule_to_str(Rule(j as i16)));
}
}
}
|
set_derives
|
identifier_name
|
lr0.rs
|
//! This module builds the LR(0) state machine for a given grammar.
//!
//! The state machine represents the state of the parser of the grammar, as tokens are produced by
//! the lexer. (The details of the lexer are out of scope; for our purposes, all that is relevant is
//! a sequence of tokens.)
//!
//! For a given sequence of tokens (not yet terminated by EOF; in other words, a prefix of a
//! possibly-valid complete input), there may be any number of rules (productions) which may match
//! that sequence of tokens. The lr0 module builds a state machine which has one state for each
//! unique set of rules that may match the current sequence of tokens. (This is somewhat analogous
//! to the NFA to DFA transform for regular expressions.) More precisely, each state consists of a
//! unique set of _items_, where each item is a position within a rule.
//!
//! All of this is well-described in the literature, especially the Dragon Book, i.e.
//! _Compilers: Principles, Techniques, and Tools, Edition 2_.
use crate::grammar::Grammar;
use crate::tvec::TVec;
use crate::util::{word_size, Bitmat, Bitv32};
use crate::warshall::reflexive_transitive_closure;
use crate::Symbol;
use crate::{Item, Rule, State, Var};
use log::{debug, log_enabled, trace};
use ramp_table::RampTable;
use std::fmt::Write;
pub(crate) const INITIAL_STATE_SYMBOL: Symbol = Symbol(0);
pub(crate) struct LR0Output {
/// The number of states produced by LR(0) analysis.
pub nstates: usize,
/// For each state, this gives the symbol that created this state.
// index: State
// value: Symbol
pub accessing_symbol: TVec<State, Symbol>,
/// Contains (State -> [State]) mappings for shifts. For each state, this gives the
/// set of states that this state can transition to.
pub shifts: RampTable<State>,
/// Contains State -> [Rule]. For each state, this gives the rules that can be
/// reduced in this state.
pub reductions: RampTable<Rule>,
/// Contains Var -> [Rule]
/// Each key is a variable (nonterminal). The values for each key are the rules
/// that derive (produce) this nonterminal.
pub derives: RampTable<Rule>,
/// Contains State -> [Item]
/// The items that make up a given state.
/// This is used only for debugging, not for actual analysis.
pub state_items: RampTable<Item>,
}
impl LR0Output {
pub fn nstates(&self) -> usize {
self.nstates
}
}
pub(crate) fn compute_lr0(gram: &Grammar) -> LR0Output {
let derives = set_derives(gram);
// was: allocate_item_sets()
// This defines: kernel_base, kernel_items, kernel_end, shift_symbol
// The kernel_* fields are allocated to well-defined sizes, but their contents are
// not well-defined yet.
let mut kernel_items_count: usize = 0;
let mut symbol_count: Vec<usize> = vec![0; gram.nsyms];
for &symbol in gram.ritem.iter() {
if symbol.is_symbol() {
let symbol = symbol.as_symbol();
kernel_items_count += 1;
symbol_count[symbol.index()] += 1;
}
}
let mut kernel_base: Vec<usize> = vec![0; gram.nsyms];
let mut count: usize = 0;
for i in 0..gram.nsyms {
kernel_base[i] = count;
count += symbol_count[i];
}
let mut kernel_items: Vec<Item> = vec![Item(0); kernel_items_count];
// values in this array are indexes into kernel_base
let mut kernel_end: Vec<usize> = vec![0; gram.nsyms];
// The item sets for each state.
let mut states: RampTable<Item> = RampTable::new();
let mut accessing_symbol: TVec<State, Symbol> = TVec::new();
// This function creates the initial state, using the DERIVES relation for
// the start symbol. From this initial state, we will discover / create all
// other states, by examining a state, the next variables that could be
// encountered in those states, and finding the transitive closure over same.
// Initializes the state table.
states.push_entry_extend(
derives[gram.symbol_to_var(gram.start()).index()]
.iter()
.map(|&item| gram.rrhs(item)),
);
accessing_symbol.push(INITIAL_STATE_SYMBOL);
// Contains the set of states that are relevant for each item. Each entry in this
// table corresponds to an item, so state_set.len() = nitems. The contents of each
// entry is a list of state indices (into LR0Output.states).
// Item -> [State]
let mut state_set: Vec<Vec<State>> = vec![vec![]; gram.nitems()];
let first_derives = set_first_derives(gram, &derives);
|
let mut rule_set: Bitv32 = Bitv32::from_elem(gram.nrules, false);
let mut shift_symbol: Vec<Symbol> = Vec::new();
// this_state represents our position within our work list. The output.states
// array represents both our final output, and this_state is the next state
// within that array, where we need to generate new states from. New states
// are added to output.states within find_or_create_state() (called below).
let mut this_state: usize = 0;
// State which becomes the output
let mut reductions = RampTable::<Rule>::new();
let mut shifts = RampTable::<State>::new();
while this_state < states.len() {
assert!(item_set.len() == 0);
trace!("computing closure for state s{}:", this_state);
// The output of closure() is stored in item_set.
// rule_set is used only as temporary storage.
closure(
gram,
&states[this_state],
&first_derives,
&mut rule_set,
&mut item_set,
);
// The output of save_reductions() is stored in reductions.
save_reductions(gram, &item_set, &mut reductions);
new_item_sets(
&kernel_base,
&mut kernel_items,
&mut kernel_end,
gram,
&item_set,
&mut shift_symbol,
);
// Find or create states for shifts in the current state. This can potentially add new
// states to'states'. Then record the resulting shifts in'shifts'.
shift_symbol.sort();
for symbol in shift_symbol.iter().copied() {
// Search for an existing state that has the same items.
let symbol_items =
&kernel_items[kernel_base[symbol.index()]..kernel_end[symbol.index()]];
let this_state_set: &mut Vec<State> = &mut state_set[symbol_items[0].index()];
let shift_state = if let Some(&existing_state) = this_state_set
.iter()
.find(|state| *symbol_items == states[state.index()])
{
existing_state
} else {
// No match. Create a new state for this unique set of items.
let new_state: State = states.len().into();
states.push_entry_copy(symbol_items);
accessing_symbol.push(symbol);
// Add the new state to the state set for this item.
this_state_set.push(new_state);
new_state
};
shifts.push_value(shift_state);
}
shifts.finish_key();
item_set.clear();
shift_symbol.clear();
this_state += 1;
}
let output = LR0Output {
nstates: states.len(),
accessing_symbol,
reductions,
shifts,
derives,
state_items: states,
};
dump_lr0_output(gram, &output);
output
}
fn dump_lr0_output(gram: &Grammar, output: &LR0Output) {
if!log_enabled!(log::Level::Debug) {
return;
}
debug!("States: (nstates: {})", output.nstates);
for istate in 0..output.nstates {
let state = State(istate as i16);
debug!(
"s{}: (accessing_symbol {})",
state,
gram.name(output.accessing_symbol[state])
);
let items = &output.state_items[istate];
let mut line = String::new();
for i in 0..items.len() {
let rhs = items[i].index();
line.push_str(&format!("item {:4} : ", rhs));
// back up to start of this rule
let mut rhs_first = rhs;
while rhs_first > 0 && gram.ritem[rhs_first - 1].is_symbol() {
rhs_first -= 1;
}
// loop through rhs
let mut j = rhs_first;
while gram.ritem[j].is_symbol() {
let s = gram.ritem[j].as_symbol();
if j == rhs {
line.push_str(".");
}
line.push_str(&format!(" {}", gram.name(s)));
j += 1;
}
if j == rhs {
line.push_str(".");
}
// Is this item a reduction? In other words, is the "." at the end of the RHS?
if gram.ritem[rhs].is_rule() {
let r = gram.ritem[rhs].as_rule();
write!(
line,
" -> reduction (r{}) {}",
r.index(),
gram.name(gram.rlhs(r)),
)
.unwrap();
}
debug!(" {}", line);
line.clear();
}
for &r in &output.reductions[istate] {
debug!(" reduction: {}", gram.rule_to_str(r));
}
for &s in output.shifts[istate].iter() {
debug!(
" shift: {:-20} --> s{}",
gram.name(output.accessing_symbol[s]),
s.index()
);
}
}
}
// fills shift_symbol with shifts
// kernel_base: Symbol -> index into kernel_items
// kernel_end: Symbol -> index into kernel_items
fn new_item_sets(
kernel_base: &[usize],
kernel_items: &mut [Item],
kernel_end: &mut [usize],
gram: &Grammar,
item_set: &[Item],
shift_symbol: &mut Vec<Symbol>,
) {
assert!(shift_symbol.len() == 0);
// reset kernel_end
kernel_end.copy_from_slice(kernel_base);
for &item in item_set.iter() {
let symbol = gram.ritem(item);
if symbol.is_symbol() {
let symbol = symbol.as_symbol();
let base = kernel_base[symbol.index()];
let end = &mut kernel_end[symbol.index()];
if *end == base {
shift_symbol.push(symbol);
}
kernel_items[*end] = item + 1;
*end += 1;
}
}
}
/// Examine the items in the given item set. If any of the items have reached the
/// end of the rhs list for a particular rule, then add that rule to the reduction set.
/// We discover this by testing the sign of the next symbol in the item; if it is
/// negative, then we have reached the end of the symbols on the rhs of a rule. See
/// the code in reader::pack_grammar(), where this information is set up.
fn save_reductions(gram: &Grammar, item_set: &[Item], rules: &mut RampTable<Rule>) {
for &item in item_set {
let sr = gram.ritem(item);
if sr.is_rule() {
rules.push_value(sr.as_rule());
}
}
rules.finish_key();
}
/// Computes the `DERIVES` table. The `DERIVES` table maps `Var -> [Rule]`, where each `Var`
/// is a nonterminal and `[Rule]` contains all of the rules that have `Var` as their left-hand
/// side. In other words, this table allows you to lookup the set of rules that produce
/// (derive) a particular nonterminal.
fn set_derives(gram: &Grammar) -> RampTable<Rule> {
let mut derives = RampTable::<Rule>::with_capacity(gram.nsyms, gram.nrules);
for lhs in gram.iter_var_syms() {
for rule in gram.iter_rules() {
if gram.rlhs(rule) == lhs {
derives.push_value(rule as Rule);
}
}
derives.finish_key();
}
if log_enabled!(log::Level::Debug) {
debug!("DERIVES:");
for lhs in gram.iter_vars() {
let lhs_sym = gram.var_to_symbol(lhs);
debug!(" {} derives rules: ", gram.name(lhs_sym));
for &rule in &derives[lhs.index()] {
debug!(" {}", &gram.rule_to_str(rule));
}
}
}
derives
}
/// Builds a vector of symbols which are nullable. A nullable symbol is one which can be
/// reduced from an empty sequence of tokens.
pub(crate) fn set_nullable(gram: &Grammar) -> TVec<Symbol, bool> {
let mut nullable: TVec<Symbol, bool> = TVec::from_vec(vec![false; gram.nsyms]);
loop {
let mut done = true;
let mut i = 1;
while i < gram.ritem.len() {
let mut empty = true;
let rule = loop {
let sr = gram.ritem[i];
if sr.is_rule() {
break sr.as_rule();
}
let sym = sr.as_symbol();
if!nullable[sym] {
empty = false;
}
i += 1;
};
if empty {
let sym = gram.rlhs(rule);
if!nullable[sym] {
nullable[sym] = true;
done = false;
}
}
i += 1;
}
if done {
break;
}
}
if log_enabled!(log::Level::Debug) {
debug!("Nullable symbols:");
for sym in gram.iter_var_syms() {
if nullable[sym] {
debug!("{}", gram.name(sym));
}
}
}
nullable
}
/// Computes the "epsilon-free firsts" (EFF) relation.
/// The EFF is a bit matrix [nvars, nvars].
fn set_eff(gram: &Grammar, derives: &RampTable<Rule>) -> Bitmat {
let nvars = gram.nvars;
let mut eff: Bitmat = Bitmat::new(nvars, nvars);
for row in 0..nvars {
for &rule in &derives[row] {
let derived_rule_or_symbol = gram.ritem(gram.rrhs(rule));
if derived_rule_or_symbol.is_rule() {
continue;
}
let symbol = derived_rule_or_symbol.as_symbol();
if gram.is_var(symbol) {
eff.set(row, gram.symbol_to_var(symbol).index());
}
}
}
reflexive_transitive_closure(&mut eff);
print_eff(gram, &eff);
eff
}
fn print_eff(gram: &Grammar, eff: &Bitmat) {
debug!("Epsilon Free Firsts");
for i in 0..eff.rows {
let var = Var(i as i16);
debug!("{}", gram.name(gram.var_to_symbol(var)));
for j in eff.iter_ones_in_row(i) {
debug!(" {}", gram.name(gram.var_to_symbol(Var(j as i16))));
}
}
}
/// Computes the `first_derives` relation, which is a bit matrix of size [nvars, nrules].
/// Each row corresponds to a variable, and each column corresponds to a rule.
///
/// Note: Because this relation is only relevant to variables (non-terminals), the table
/// does not waste space on tokens. That is, row 0 is assigned to the first non-terminal
/// (Grammar.start_symbol). So when indexing using a symbol value, you have to subtract
/// start_symbol (or, equivalently, ntokens) first.
///
/// This implementation processes bits in groups of 32, for the sake of efficiency.
/// It is not clear whether this complexity is still justifiable, but it is preserved.
pub(crate) fn set_first_derives(gram: &Grammar, derives: &RampTable<Rule>) -> Bitmat {
let eff = set_eff(gram, derives);
assert!(eff.rows == gram.nvars);
assert!(eff.cols == gram.nvars);
let mut first_derives = Bitmat::new(gram.nvars, gram.nrules);
for (i, j) in eff.iter_ones() {
for &rule in &derives[j] {
first_derives.set(i, rule.index());
}
}
print_first_derives(gram, &first_derives);
first_derives
}
/// Computes the closure of a set of item sets, and writes the result into 'item_set'.
/// nucleus contains a set of items, that is, positions within reductions that are possible
/// in the current state. The closure() function looks at the next symbol in each item, and
/// if the next symbol is a variable, the first_derives relation is consulted in order to see
/// which other rules need to be added to the closure.
///
/// The caller provides a mutable rule_set array, which is guaranteed to hold enough space for
/// a bit vector of size nrules. The caller does not otherwise use rule_set; the caller provides
/// rule_set only to avoid frequently allocating and destroying an array.
///
/// Similarly, the item_set is passed as a mutable vector. However, the caller guarantees that
/// item_set will be empty on call to closure(), and closure() writes its output into item_set.
///
/// * rule_set: bit vector, size=nrules; temporary data, written and read by this fn
///
/// TODO: Consider changing item_set from Vec<Item> to a bitmap, whose length is nitems.
/// Then the'states' table becomes a Bitmat.
pub(crate) fn closure(
gram: &Grammar,
nucleus: &[Item],
first_derives: &Bitmat,
rule_set: &mut Bitv32,
item_set: &mut Vec<Item>,
) {
assert!(item_set.len() == 0);
let rulesetsize = word_size(rule_set.nbits);
// clear rule_set
rule_set.set_all(false);
// For each item in the nucleus, examine the next symbol in the item.
// If the next symbol is a non-terminal, then find the corresponding
// row in the first_derives table, and merge that row into the rule_set
// bit vector. The result is that rule_set will contain a bit vector
// that identifies the rules need to be added to the closure of the
// current state. Keep in mind that we process bit vectors in u32 chunks.
for &item in nucleus.iter() {
let symbol_or_rule = gram.ritem(item);
if symbol_or_rule.is_symbol() {
let symbol = symbol_or_rule.as_symbol();
if gram.is_var(symbol) {
let var = gram.symbol_to_var(symbol);
let dsp = var.index() * first_derives.rowsize;
for i in 0..rulesetsize {
rule_set.data[i] |= first_derives.data[dsp + i];
}
}
}
}
// Scan the rule_set that we just constructed. The rule_set tells us which
// items need to be merged into the item set for the item set. Thus,
// new_items = nucleus merged with rule_set.iter_ones().
//
// This code relies on this invariant:
// for all r: gram.rrhs[r + 1] > gram.rrhs[r]
let mut i: usize = 0; // index into nucleus
for rule in rule_set.iter_ones() {
let item = gram.rrhs[rule];
while i < nucleus.len() && nucleus[i] < item {
item_set.push(nucleus[i]);
i += 1;
}
item_set.push(item);
while i < nucleus.len() && nucleus[i] == item {
i += 1;
}
}
while i < nucleus.len() {
item_set.push(nucleus[i]);
i += 1;
}
}
// first_derives: cols = nrules
fn print_first_derives(gram: &Grammar, first_derives: &Bitmat) {
debug!("");
debug!("First Derives");
debug!("");
for i in 0..gram.nvars {
let var = Var(i as i16);
debug!("{} derives:", gram.name(gram.var_to_symbol(var)));
for j in first_derives.iter_ones_in_row(i) {
debug!(" {}", gram.rule_to_str(Rule(j as i16)));
}
}
}
|
// These vectors are used for building tables during each state.
// It is inefficient to allocate and free these vectors within
// the scope of processing each state.
let mut item_set: Vec<Item> = Vec::with_capacity(gram.nitems());
|
random_line_split
|
lr0.rs
|
//! This module builds the LR(0) state machine for a given grammar.
//!
//! The state machine represents the state of the parser of the grammar, as tokens are produced by
//! the lexer. (The details of the lexer are out of scope; for our purposes, all that is relevant is
//! a sequence of tokens.)
//!
//! For a given sequence of tokens (not yet terminated by EOF; in other words, a prefix of a
//! possibly-valid complete input), there may be any number of rules (productions) which may match
//! that sequence of tokens. The lr0 module builds a state machine which has one state for each
//! unique set of rules that may match the current sequence of tokens. (This is somewhat analogous
//! to the NFA to DFA transform for regular expressions.) More precisely, each state consists of a
//! unique set of _items_, where each item is a position within a rule.
//!
//! All of this is well-described in the literature, especially the Dragon Book, i.e.
//! _Compilers: Principles, Techniques, and Tools, Edition 2_.
use crate::grammar::Grammar;
use crate::tvec::TVec;
use crate::util::{word_size, Bitmat, Bitv32};
use crate::warshall::reflexive_transitive_closure;
use crate::Symbol;
use crate::{Item, Rule, State, Var};
use log::{debug, log_enabled, trace};
use ramp_table::RampTable;
use std::fmt::Write;
pub(crate) const INITIAL_STATE_SYMBOL: Symbol = Symbol(0);
pub(crate) struct LR0Output {
/// The number of states produced by LR(0) analysis.
pub nstates: usize,
/// For each state, this gives the symbol that created this state.
// index: State
// value: Symbol
pub accessing_symbol: TVec<State, Symbol>,
/// Contains (State -> [State]) mappings for shifts. For each state, this gives the
/// set of states that this state can transition to.
pub shifts: RampTable<State>,
/// Contains State -> [Rule]. For each state, this gives the rules that can be
/// reduced in this state.
pub reductions: RampTable<Rule>,
/// Contains Var -> [Rule]
/// Each key is a variable (nonterminal). The values for each key are the rules
/// that derive (produce) this nonterminal.
pub derives: RampTable<Rule>,
/// Contains State -> [Item]
/// The items that make up a given state.
/// This is used only for debugging, not for actual analysis.
pub state_items: RampTable<Item>,
}
impl LR0Output {
pub fn nstates(&self) -> usize {
self.nstates
}
}
pub(crate) fn compute_lr0(gram: &Grammar) -> LR0Output {
let derives = set_derives(gram);
// was: allocate_item_sets()
// This defines: kernel_base, kernel_items, kernel_end, shift_symbol
// The kernel_* fields are allocated to well-defined sizes, but their contents are
// not well-defined yet.
let mut kernel_items_count: usize = 0;
let mut symbol_count: Vec<usize> = vec![0; gram.nsyms];
for &symbol in gram.ritem.iter() {
if symbol.is_symbol() {
let symbol = symbol.as_symbol();
kernel_items_count += 1;
symbol_count[symbol.index()] += 1;
}
}
let mut kernel_base: Vec<usize> = vec![0; gram.nsyms];
let mut count: usize = 0;
for i in 0..gram.nsyms {
kernel_base[i] = count;
count += symbol_count[i];
}
let mut kernel_items: Vec<Item> = vec![Item(0); kernel_items_count];
// values in this array are indexes into kernel_base
let mut kernel_end: Vec<usize> = vec![0; gram.nsyms];
// The item sets for each state.
let mut states: RampTable<Item> = RampTable::new();
let mut accessing_symbol: TVec<State, Symbol> = TVec::new();
// This function creates the initial state, using the DERIVES relation for
// the start symbol. From this initial state, we will discover / create all
// other states, by examining a state, the next variables that could be
// encountered in those states, and finding the transitive closure over same.
// Initializes the state table.
states.push_entry_extend(
derives[gram.symbol_to_var(gram.start()).index()]
.iter()
.map(|&item| gram.rrhs(item)),
);
accessing_symbol.push(INITIAL_STATE_SYMBOL);
// Contains the set of states that are relevant for each item. Each entry in this
// table corresponds to an item, so state_set.len() = nitems. The contents of each
// entry is a list of state indices (into LR0Output.states).
// Item -> [State]
let mut state_set: Vec<Vec<State>> = vec![vec![]; gram.nitems()];
let first_derives = set_first_derives(gram, &derives);
// These vectors are used for building tables during each state.
// It is inefficient to allocate and free these vectors within
// the scope of processing each state.
let mut item_set: Vec<Item> = Vec::with_capacity(gram.nitems());
let mut rule_set: Bitv32 = Bitv32::from_elem(gram.nrules, false);
let mut shift_symbol: Vec<Symbol> = Vec::new();
// this_state represents our position within our work list. The output.states
// array represents both our final output, and this_state is the next state
// within that array, where we need to generate new states from. New states
// are added to output.states within find_or_create_state() (called below).
let mut this_state: usize = 0;
// State which becomes the output
let mut reductions = RampTable::<Rule>::new();
let mut shifts = RampTable::<State>::new();
while this_state < states.len() {
assert!(item_set.len() == 0);
trace!("computing closure for state s{}:", this_state);
// The output of closure() is stored in item_set.
// rule_set is used only as temporary storage.
closure(
gram,
&states[this_state],
&first_derives,
&mut rule_set,
&mut item_set,
);
// The output of save_reductions() is stored in reductions.
save_reductions(gram, &item_set, &mut reductions);
new_item_sets(
&kernel_base,
&mut kernel_items,
&mut kernel_end,
gram,
&item_set,
&mut shift_symbol,
);
// Find or create states for shifts in the current state. This can potentially add new
// states to'states'. Then record the resulting shifts in'shifts'.
shift_symbol.sort();
for symbol in shift_symbol.iter().copied() {
// Search for an existing state that has the same items.
let symbol_items =
&kernel_items[kernel_base[symbol.index()]..kernel_end[symbol.index()]];
let this_state_set: &mut Vec<State> = &mut state_set[symbol_items[0].index()];
let shift_state = if let Some(&existing_state) = this_state_set
.iter()
.find(|state| *symbol_items == states[state.index()])
{
existing_state
} else {
// No match. Create a new state for this unique set of items.
let new_state: State = states.len().into();
states.push_entry_copy(symbol_items);
accessing_symbol.push(symbol);
// Add the new state to the state set for this item.
this_state_set.push(new_state);
new_state
};
shifts.push_value(shift_state);
}
shifts.finish_key();
item_set.clear();
shift_symbol.clear();
this_state += 1;
}
let output = LR0Output {
nstates: states.len(),
accessing_symbol,
reductions,
shifts,
derives,
state_items: states,
};
dump_lr0_output(gram, &output);
output
}
fn dump_lr0_output(gram: &Grammar, output: &LR0Output) {
if!log_enabled!(log::Level::Debug) {
return;
}
debug!("States: (nstates: {})", output.nstates);
for istate in 0..output.nstates {
let state = State(istate as i16);
debug!(
"s{}: (accessing_symbol {})",
state,
gram.name(output.accessing_symbol[state])
);
let items = &output.state_items[istate];
let mut line = String::new();
for i in 0..items.len() {
let rhs = items[i].index();
line.push_str(&format!("item {:4} : ", rhs));
// back up to start of this rule
let mut rhs_first = rhs;
while rhs_first > 0 && gram.ritem[rhs_first - 1].is_symbol() {
rhs_first -= 1;
}
// loop through rhs
let mut j = rhs_first;
while gram.ritem[j].is_symbol() {
let s = gram.ritem[j].as_symbol();
if j == rhs {
line.push_str(".");
}
line.push_str(&format!(" {}", gram.name(s)));
j += 1;
}
if j == rhs {
line.push_str(".");
}
// Is this item a reduction? In other words, is the "." at the end of the RHS?
if gram.ritem[rhs].is_rule() {
let r = gram.ritem[rhs].as_rule();
write!(
line,
" -> reduction (r{}) {}",
r.index(),
gram.name(gram.rlhs(r)),
)
.unwrap();
}
debug!(" {}", line);
line.clear();
}
for &r in &output.reductions[istate] {
debug!(" reduction: {}", gram.rule_to_str(r));
}
for &s in output.shifts[istate].iter() {
debug!(
" shift: {:-20} --> s{}",
gram.name(output.accessing_symbol[s]),
s.index()
);
}
}
}
// fills shift_symbol with shifts
// kernel_base: Symbol -> index into kernel_items
// kernel_end: Symbol -> index into kernel_items
fn new_item_sets(
kernel_base: &[usize],
kernel_items: &mut [Item],
kernel_end: &mut [usize],
gram: &Grammar,
item_set: &[Item],
shift_symbol: &mut Vec<Symbol>,
) {
assert!(shift_symbol.len() == 0);
// reset kernel_end
kernel_end.copy_from_slice(kernel_base);
for &item in item_set.iter() {
let symbol = gram.ritem(item);
if symbol.is_symbol() {
let symbol = symbol.as_symbol();
let base = kernel_base[symbol.index()];
let end = &mut kernel_end[symbol.index()];
if *end == base {
shift_symbol.push(symbol);
}
kernel_items[*end] = item + 1;
*end += 1;
}
}
}
/// Examine the items in the given item set. If any of the items have reached the
/// end of the rhs list for a particular rule, then add that rule to the reduction set.
/// We discover this by testing the sign of the next symbol in the item; if it is
/// negative, then we have reached the end of the symbols on the rhs of a rule. See
/// the code in reader::pack_grammar(), where this information is set up.
fn save_reductions(gram: &Grammar, item_set: &[Item], rules: &mut RampTable<Rule>) {
for &item in item_set {
let sr = gram.ritem(item);
if sr.is_rule() {
rules.push_value(sr.as_rule());
}
}
rules.finish_key();
}
/// Computes the `DERIVES` table. The `DERIVES` table maps `Var -> [Rule]`, where each `Var`
/// is a nonterminal and `[Rule]` contains all of the rules that have `Var` as their left-hand
/// side. In other words, this table allows you to lookup the set of rules that produce
/// (derive) a particular nonterminal.
fn set_derives(gram: &Grammar) -> RampTable<Rule> {
let mut derives = RampTable::<Rule>::with_capacity(gram.nsyms, gram.nrules);
for lhs in gram.iter_var_syms() {
for rule in gram.iter_rules() {
if gram.rlhs(rule) == lhs {
derives.push_value(rule as Rule);
}
}
derives.finish_key();
}
if log_enabled!(log::Level::Debug) {
debug!("DERIVES:");
for lhs in gram.iter_vars() {
let lhs_sym = gram.var_to_symbol(lhs);
debug!(" {} derives rules: ", gram.name(lhs_sym));
for &rule in &derives[lhs.index()] {
debug!(" {}", &gram.rule_to_str(rule));
}
}
}
derives
}
/// Builds a vector of symbols which are nullable. A nullable symbol is one which can be
/// reduced from an empty sequence of tokens.
pub(crate) fn set_nullable(gram: &Grammar) -> TVec<Symbol, bool> {
let mut nullable: TVec<Symbol, bool> = TVec::from_vec(vec![false; gram.nsyms]);
loop {
let mut done = true;
let mut i = 1;
while i < gram.ritem.len() {
let mut empty = true;
let rule = loop {
let sr = gram.ritem[i];
if sr.is_rule() {
break sr.as_rule();
}
let sym = sr.as_symbol();
if!nullable[sym] {
empty = false;
}
i += 1;
};
if empty {
let sym = gram.rlhs(rule);
if!nullable[sym]
|
}
i += 1;
}
if done {
break;
}
}
if log_enabled!(log::Level::Debug) {
debug!("Nullable symbols:");
for sym in gram.iter_var_syms() {
if nullable[sym] {
debug!("{}", gram.name(sym));
}
}
}
nullable
}
/// Computes the "epsilon-free firsts" (EFF) relation.
/// The EFF is a bit matrix [nvars, nvars].
fn set_eff(gram: &Grammar, derives: &RampTable<Rule>) -> Bitmat {
let nvars = gram.nvars;
let mut eff: Bitmat = Bitmat::new(nvars, nvars);
for row in 0..nvars {
for &rule in &derives[row] {
let derived_rule_or_symbol = gram.ritem(gram.rrhs(rule));
if derived_rule_or_symbol.is_rule() {
continue;
}
let symbol = derived_rule_or_symbol.as_symbol();
if gram.is_var(symbol) {
eff.set(row, gram.symbol_to_var(symbol).index());
}
}
}
reflexive_transitive_closure(&mut eff);
print_eff(gram, &eff);
eff
}
fn print_eff(gram: &Grammar, eff: &Bitmat) {
debug!("Epsilon Free Firsts");
for i in 0..eff.rows {
let var = Var(i as i16);
debug!("{}", gram.name(gram.var_to_symbol(var)));
for j in eff.iter_ones_in_row(i) {
debug!(" {}", gram.name(gram.var_to_symbol(Var(j as i16))));
}
}
}
/// Computes the `first_derives` relation, which is a bit matrix of size [nvars, nrules].
/// Each row corresponds to a variable, and each column corresponds to a rule.
///
/// Note: Because this relation is only relevant to variables (non-terminals), the table
/// does not waste space on tokens. That is, row 0 is assigned to the first non-terminal
/// (Grammar.start_symbol). So when indexing using a symbol value, you have to subtract
/// start_symbol (or, equivalently, ntokens) first.
///
/// This implementation processes bits in groups of 32, for the sake of efficiency.
/// It is not clear whether this complexity is still justifiable, but it is preserved.
pub(crate) fn set_first_derives(gram: &Grammar, derives: &RampTable<Rule>) -> Bitmat {
let eff = set_eff(gram, derives);
assert!(eff.rows == gram.nvars);
assert!(eff.cols == gram.nvars);
let mut first_derives = Bitmat::new(gram.nvars, gram.nrules);
for (i, j) in eff.iter_ones() {
for &rule in &derives[j] {
first_derives.set(i, rule.index());
}
}
print_first_derives(gram, &first_derives);
first_derives
}
/// Computes the closure of a set of item sets, and writes the result into 'item_set'.
/// nucleus contains a set of items, that is, positions within reductions that are possible
/// in the current state. The closure() function looks at the next symbol in each item, and
/// if the next symbol is a variable, the first_derives relation is consulted in order to see
/// which other rules need to be added to the closure.
///
/// The caller provides a mutable rule_set array, which is guaranteed to hold enough space for
/// a bit vector of size nrules. The caller does not otherwise use rule_set; the caller provides
/// rule_set only to avoid frequently allocating and destroying an array.
///
/// Similarly, the item_set is passed as a mutable vector. However, the caller guarantees that
/// item_set will be empty on call to closure(), and closure() writes its output into item_set.
///
/// * rule_set: bit vector, size=nrules; temporary data, written and read by this fn
///
/// TODO: Consider changing item_set from Vec<Item> to a bitmap, whose length is nitems.
/// Then the'states' table becomes a Bitmat.
pub(crate) fn closure(
gram: &Grammar,
nucleus: &[Item],
first_derives: &Bitmat,
rule_set: &mut Bitv32,
item_set: &mut Vec<Item>,
) {
assert!(item_set.len() == 0);
let rulesetsize = word_size(rule_set.nbits);
// clear rule_set
rule_set.set_all(false);
// For each item in the nucleus, examine the next symbol in the item.
// If the next symbol is a non-terminal, then find the corresponding
// row in the first_derives table, and merge that row into the rule_set
// bit vector. The result is that rule_set will contain a bit vector
// that identifies the rules need to be added to the closure of the
// current state. Keep in mind that we process bit vectors in u32 chunks.
for &item in nucleus.iter() {
let symbol_or_rule = gram.ritem(item);
if symbol_or_rule.is_symbol() {
let symbol = symbol_or_rule.as_symbol();
if gram.is_var(symbol) {
let var = gram.symbol_to_var(symbol);
let dsp = var.index() * first_derives.rowsize;
for i in 0..rulesetsize {
rule_set.data[i] |= first_derives.data[dsp + i];
}
}
}
}
// Scan the rule_set that we just constructed. The rule_set tells us which
// items need to be merged into the item set for the item set. Thus,
// new_items = nucleus merged with rule_set.iter_ones().
//
// This code relies on this invariant:
// for all r: gram.rrhs[r + 1] > gram.rrhs[r]
let mut i: usize = 0; // index into nucleus
for rule in rule_set.iter_ones() {
let item = gram.rrhs[rule];
while i < nucleus.len() && nucleus[i] < item {
item_set.push(nucleus[i]);
i += 1;
}
item_set.push(item);
while i < nucleus.len() && nucleus[i] == item {
i += 1;
}
}
while i < nucleus.len() {
item_set.push(nucleus[i]);
i += 1;
}
}
// first_derives: cols = nrules
fn print_first_derives(gram: &Grammar, first_derives: &Bitmat) {
debug!("");
debug!("First Derives");
debug!("");
for i in 0..gram.nvars {
let var = Var(i as i16);
debug!("{} derives:", gram.name(gram.var_to_symbol(var)));
for j in first_derives.iter_ones_in_row(i) {
debug!(" {}", gram.rule_to_str(Rule(j as i16)));
}
}
}
|
{
nullable[sym] = true;
done = false;
}
|
conditional_block
|
trait-bounds-in-arc.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that a heterogeneous list of existential types can be put inside an Arc
// and shared between threads as long as all types fulfill Send.
// ignore-pretty
#![allow(unknown_features)]
#![feature(box_syntax, std_misc)]
#![feature(unboxed_closures)]
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
trait Pet {
fn name(&self, blk: Box<FnMut(&str)>);
fn num_legs(&self) -> usize;
fn of_good_pedigree(&self) -> bool;
}
struct Catte {
num_whiskers: usize,
name: String,
}
struct Dogge {
bark_decibels: usize,
tricks_known: usize,
name: String,
}
struct Goldfyshe {
swim_speed: usize,
name: String,
}
impl Pet for Catte {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(&self.name) }
fn num_legs(&self) -> usize { 4 }
fn of_good_pedigree(&self) -> bool { self.num_whiskers >= 4 }
}
impl Pet for Dogge {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(&self.name) }
fn
|
(&self) -> usize { 4 }
fn of_good_pedigree(&self) -> bool {
self.bark_decibels < 70 || self.tricks_known > 20
}
}
impl Pet for Goldfyshe {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(&self.name) }
fn num_legs(&self) -> usize { 0 }
fn of_good_pedigree(&self) -> bool { self.swim_speed >= 500 }
}
pub fn main() {
let catte = Catte { num_whiskers: 7, name: "alonzo_church".to_string() };
let dogge1 = Dogge {
bark_decibels: 100,
tricks_known: 42,
name: "alan_turing".to_string(),
};
let dogge2 = Dogge {
bark_decibels: 55,
tricks_known: 11,
name: "albert_einstein".to_string(),
};
let fishe = Goldfyshe {
swim_speed: 998,
name: "alec_guinness".to_string(),
};
let arc = Arc::new(vec!(box catte as Box<Pet+Sync+Send>,
box dogge1 as Box<Pet+Sync+Send>,
box fishe as Box<Pet+Sync+Send>,
box dogge2 as Box<Pet+Sync+Send>));
let (tx1, rx1) = channel();
let arc1 = arc.clone();
let t1 = thread::spawn(move|| { check_legs(arc1); tx1.send(()); });
let (tx2, rx2) = channel();
let arc2 = arc.clone();
let t2 = thread::spawn(move|| { check_names(arc2); tx2.send(()); });
let (tx3, rx3) = channel();
let arc3 = arc.clone();
let t3 = thread::spawn(move|| { check_pedigree(arc3); tx3.send(()); });
rx1.recv();
rx2.recv();
rx3.recv();
t1.join();
t2.join();
t3.join();
}
fn check_legs(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
let mut legs = 0;
for pet in &*arc {
legs += pet.num_legs();
}
assert!(legs == 12);
}
fn check_names(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in &*arc {
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
pet.name(Box::new(|name| {
assert!(name.as_bytes()[0] == 'a' as u8 && name.as_bytes()[1] == 'l' as u8);
}))
}
}
fn check_pedigree(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in &*arc {
assert!(pet.of_good_pedigree());
}
}
|
num_legs
|
identifier_name
|
trait-bounds-in-arc.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that a heterogeneous list of existential types can be put inside an Arc
// and shared between threads as long as all types fulfill Send.
// ignore-pretty
#![allow(unknown_features)]
#![feature(box_syntax, std_misc)]
#![feature(unboxed_closures)]
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
trait Pet {
fn name(&self, blk: Box<FnMut(&str)>);
fn num_legs(&self) -> usize;
fn of_good_pedigree(&self) -> bool;
}
struct Catte {
num_whiskers: usize,
name: String,
}
struct Dogge {
bark_decibels: usize,
tricks_known: usize,
name: String,
}
struct Goldfyshe {
swim_speed: usize,
name: String,
}
impl Pet for Catte {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(&self.name) }
fn num_legs(&self) -> usize { 4 }
fn of_good_pedigree(&self) -> bool { self.num_whiskers >= 4 }
}
impl Pet for Dogge {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(&self.name) }
fn num_legs(&self) -> usize { 4 }
fn of_good_pedigree(&self) -> bool {
self.bark_decibels < 70 || self.tricks_known > 20
}
}
impl Pet for Goldfyshe {
fn name(&self, mut blk: Box<FnMut(&str)>) { blk(&self.name) }
fn num_legs(&self) -> usize { 0 }
fn of_good_pedigree(&self) -> bool { self.swim_speed >= 500 }
}
pub fn main() {
let catte = Catte { num_whiskers: 7, name: "alonzo_church".to_string() };
let dogge1 = Dogge {
bark_decibels: 100,
tricks_known: 42,
name: "alan_turing".to_string(),
};
let dogge2 = Dogge {
bark_decibels: 55,
tricks_known: 11,
name: "albert_einstein".to_string(),
};
let fishe = Goldfyshe {
swim_speed: 998,
name: "alec_guinness".to_string(),
};
let arc = Arc::new(vec!(box catte as Box<Pet+Sync+Send>,
box dogge1 as Box<Pet+Sync+Send>,
box fishe as Box<Pet+Sync+Send>,
box dogge2 as Box<Pet+Sync+Send>));
let (tx1, rx1) = channel();
let arc1 = arc.clone();
let t1 = thread::spawn(move|| { check_legs(arc1); tx1.send(()); });
let (tx2, rx2) = channel();
let arc2 = arc.clone();
let t2 = thread::spawn(move|| { check_names(arc2); tx2.send(()); });
let (tx3, rx3) = channel();
let arc3 = arc.clone();
|
rx1.recv();
rx2.recv();
rx3.recv();
t1.join();
t2.join();
t3.join();
}
fn check_legs(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
let mut legs = 0;
for pet in &*arc {
legs += pet.num_legs();
}
assert!(legs == 12);
}
fn check_names(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in &*arc {
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
pet.name(Box::new(|name| {
assert!(name.as_bytes()[0] == 'a' as u8 && name.as_bytes()[1] == 'l' as u8);
}))
}
}
fn check_pedigree(arc: Arc<Vec<Box<Pet+Sync+Send>>>) {
for pet in &*arc {
assert!(pet.of_good_pedigree());
}
}
|
let t3 = thread::spawn(move|| { check_pedigree(arc3); tx3.send(()); });
|
random_line_split
|
range-1.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
pub fn main() {
// Mixed types.
let _ = 0u32..10i32;
//~^ ERROR mismatched types
// Bool => does not implement iterator.
for i in false..true {}
//~^ ERROR `bool: std::iter::Step` is not satisfied
// Unsized type.
let arr: &[_] = &[1, 2, 3];
let range = *arr..;
//~^ ERROR the size for values of type
}
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test range syntax - type errors.
|
random_line_split
|
range-1.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test range syntax - type errors.
pub fn
|
() {
// Mixed types.
let _ = 0u32..10i32;
//~^ ERROR mismatched types
// Bool => does not implement iterator.
for i in false..true {}
//~^ ERROR `bool: std::iter::Step` is not satisfied
// Unsized type.
let arr: &[_] = &[1, 2, 3];
let range = *arr..;
//~^ ERROR the size for values of type
}
|
main
|
identifier_name
|
range-1.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test range syntax - type errors.
pub fn main()
|
{
// Mixed types.
let _ = 0u32..10i32;
//~^ ERROR mismatched types
// Bool => does not implement iterator.
for i in false..true {}
//~^ ERROR `bool: std::iter::Step` is not satisfied
// Unsized type.
let arr: &[_] = &[1, 2, 3];
let range = *arr..;
//~^ ERROR the size for values of type
}
|
identifier_body
|
|
x86.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::target_strs;
use driver::config::cfg_os_to_meta_os;
use metadata::loader::meta_section_name;
use syntax::abi;
pub fn
|
(target_triple: StrBuf, target_os: abi::Os)
-> target_strs::t {
return target_strs::t {
module_asm: "".to_strbuf(),
meta_sect_name:
meta_section_name(cfg_os_to_meta_os(target_os)).to_strbuf(),
data_layout: match target_os {
abi::OsMacos => {
"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16\
-i32:32:32-i64:32:64\
-f32:32:32-f64:32:64-v64:64:64\
-v128:128:128-a0:0:64-f80:128:128\
-n8:16:32".to_strbuf()
}
abi::OsWin32 => {
"e-p:32:32-f64:64:64-i64:64:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsLinux => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsAndroid => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsFreebsd => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
},
target_triple: target_triple,
cc_args: vec!("-m32".to_strbuf()),
};
}
|
get_target_strs
|
identifier_name
|
x86.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::target_strs;
use driver::config::cfg_os_to_meta_os;
use metadata::loader::meta_section_name;
use syntax::abi;
pub fn get_target_strs(target_triple: StrBuf, target_os: abi::Os)
-> target_strs::t {
return target_strs::t {
module_asm: "".to_strbuf(),
meta_sect_name:
meta_section_name(cfg_os_to_meta_os(target_os)).to_strbuf(),
data_layout: match target_os {
abi::OsMacos => {
"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16\
-i32:32:32-i64:32:64\
-f32:32:32-f64:32:64-v64:64:64\
-v128:128:128-a0:0:64-f80:128:128\
-n8:16:32".to_strbuf()
}
abi::OsWin32 => {
"e-p:32:32-f64:64:64-i64:64:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsLinux => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsAndroid => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
|
abi::OsFreebsd => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
},
target_triple: target_triple,
cc_args: vec!("-m32".to_strbuf()),
};
}
|
}
|
random_line_split
|
x86.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::target_strs;
use driver::config::cfg_os_to_meta_os;
use metadata::loader::meta_section_name;
use syntax::abi;
pub fn get_target_strs(target_triple: StrBuf, target_os: abi::Os)
-> target_strs::t
|
abi::OsLinux => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsAndroid => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsFreebsd => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
},
target_triple: target_triple,
cc_args: vec!("-m32".to_strbuf()),
};
}
|
{
return target_strs::t {
module_asm: "".to_strbuf(),
meta_sect_name:
meta_section_name(cfg_os_to_meta_os(target_os)).to_strbuf(),
data_layout: match target_os {
abi::OsMacos => {
"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16\
-i32:32:32-i64:32:64\
-f32:32:32-f64:32:64-v64:64:64\
-v128:128:128-a0:0:64-f80:128:128\
-n8:16:32".to_strbuf()
}
abi::OsWin32 => {
"e-p:32:32-f64:64:64-i64:64:64-f80:32:32-n8:16:32".to_strbuf()
}
|
identifier_body
|
x86.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use back::target_strs;
use driver::config::cfg_os_to_meta_os;
use metadata::loader::meta_section_name;
use syntax::abi;
pub fn get_target_strs(target_triple: StrBuf, target_os: abi::Os)
-> target_strs::t {
return target_strs::t {
module_asm: "".to_strbuf(),
meta_sect_name:
meta_section_name(cfg_os_to_meta_os(target_os)).to_strbuf(),
data_layout: match target_os {
abi::OsMacos => {
"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16\
-i32:32:32-i64:32:64\
-f32:32:32-f64:32:64-v64:64:64\
-v128:128:128-a0:0:64-f80:128:128\
-n8:16:32".to_strbuf()
}
abi::OsWin32 => {
"e-p:32:32-f64:64:64-i64:64:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsLinux =>
|
abi::OsAndroid => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
abi::OsFreebsd => {
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
},
target_triple: target_triple,
cc_args: vec!("-m32".to_strbuf()),
};
}
|
{
"e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_strbuf()
}
|
conditional_block
|
macro.rs
|
//! [マクロクラブ Rust支部 | κeenのHappy Hacκing Blog](https://keens.github.io/blog/2018/02/17/makurokurabu_rustshibu/)
macro_rules! double_println {
($ex: expr) => (
println!("{}{}", $ex, $ex);
// セミコロンなしでも可
);
}
macro_rules! simple_macro {
// `()` はマクロが引数をとらないことを示す
() => (
println!("Hello Macro!");
);
}
macro_rules! name {
// expr: 式文
($x: expr) => (
println!("{}", $x);
// 複数書く場合はセミコロン必須
// 忘れないように書いててもいいっぽい
);
($x: expr, $($y: expr), +) => (
println!("{}", $x);
name!($($y), +)
);
}
macro_rules! fnction {
// ident: 関数・変数名に使用する
($fn_name: ident) => (
fn $fn_name() {
// stringify! はidentを文字列に そのまま 変換する
println!("called {}", stringify!($fn_name));
}
)
}
fnction!(test);
fn main() {
double_println!("double");
simple_macro!();
name!("Susumu", "Hirasawa");
test();
}
|
identifier_name
|
||
macro.rs
|
//! [マクロクラブ Rust支部 | κeenのHappy Hacκing Blog](https://keens.github.io/blog/2018/02/17/makurokurabu_rustshibu/)
macro_rules! double_println {
($ex: expr) => (
println!("{}{}", $ex, $ex);
// セミコロンなしでも可
);
}
macro_rules! simple_macro {
// `()` はマクロが引数をとらないことを示す
() => (
|
);
}
macro_rules! name {
// expr: 式文
($x: expr) => (
println!("{}", $x);
// 複数書く場合はセミコロン必須
// 忘れないように書いててもいいっぽい
);
($x: expr, $($y: expr), +) => (
println!("{}", $x);
name!($($y), +)
);
}
macro_rules! fnction {
// ident: 関数・変数名に使用する
($fn_name: ident) => (
fn $fn_name() {
// stringify! はidentを文字列に そのまま 変換する
println!("called {}", stringify!($fn_name));
}
)
}
fnction!(test);
fn main() {
double_println!("double");
simple_macro!();
name!("Susumu", "Hirasawa");
test();
}
|
println!("Hello Macro!");
|
random_line_split
|
macro.rs
|
//! [マクロクラブ Rust支部 | κeenのHappy Hacκing Blog](https://keens.github.io/blog/2018/02/17/makurokurabu_rustshibu/)
macro_rules! double_println {
($ex: expr) => (
println!("{}{}", $ex, $ex);
// セミコロンなしでも可
);
}
macro_rules! simple_macro {
// `()` はマクロが引数をとらないことを示す
() => (
println!("Hello Macro!");
);
}
macro_rules! name {
// expr: 式文
($x: expr) => (
println!("{}", $x);
// 複数書く場合はセミコロン必須
// 忘れないように書いててもいいっぽい
);
($x: expr, $($y: expr), +) => (
println!("{}", $x);
name!($($y), +)
);
}
macro_rules! fnction {
// ident: 関数・変数名に使用する
($fn_name: ident) => (
fn $fn_name() {
// stringify! はidentを文字列に そのまま 変換する
println!("called {}", stringify!($fn_name));
}
)
}
fnction!(test);
fn main() {
double_println!("double");
simple_macro!();
name!("Susumu", "Hirasawa");
test();
}
|
identifier_body
|
||
redundant_else.rs
|
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_ast::ast::{Block, Expr, ExprKind, Stmt, StmtKind};
use rustc_ast::visit::{walk_expr, Visitor};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for `else` blocks that can be removed without changing semantics.
///
/// ### Why is this bad?
/// The `else` block adds unnecessary indentation and verbosity.
///
/// ### Known problems
/// Some may prefer to keep the `else` block for clarity.
///
/// ### Example
/// ```rust
/// fn my_func(count: u32) {
/// if count == 0 {
/// print!("Nothing to do");
/// return;
/// } else {
/// print!("Moving on...");
/// }
/// }
/// ```
/// Use instead:
/// ```rust
/// fn my_func(count: u32) {
/// if count == 0 {
/// print!("Nothing to do");
/// return;
/// }
/// print!("Moving on...");
/// }
/// ```
pub REDUNDANT_ELSE,
pedantic,
"`else` branch that can be removed without changing semantics"
}
declare_lint_pass!(RedundantElse => [REDUNDANT_ELSE]);
impl EarlyLintPass for RedundantElse {
fn
|
(&mut self, cx: &EarlyContext<'_>, stmt: &Stmt) {
if in_external_macro(cx.sess, stmt.span) {
return;
}
// Only look at expressions that are a whole statement
let expr: &Expr = match &stmt.kind {
StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr,
_ => return,
};
// if else
let (mut then, mut els): (&Block, &Expr) = match &expr.kind {
ExprKind::If(_, then, Some(els)) => (then, els),
_ => return,
};
loop {
if!BreakVisitor::default().check_block(then) {
// then block does not always break
return;
}
match &els.kind {
// else if else
ExprKind::If(_, next_then, Some(next_els)) => {
then = next_then;
els = next_els;
continue;
},
// else if without else
ExprKind::If(..) => return,
// done
_ => break,
}
}
span_lint_and_help(
cx,
REDUNDANT_ELSE,
els.span,
"redundant else block",
None,
"remove the `else` block and move the contents out",
);
}
}
/// Call `check` functions to check if an expression always breaks control flow
#[derive(Default)]
struct BreakVisitor {
is_break: bool,
}
impl<'ast> Visitor<'ast> for BreakVisitor {
fn visit_block(&mut self, block: &'ast Block) {
self.is_break = match block.stmts.as_slice() {
[.., last] => self.check_stmt(last),
_ => false,
};
}
fn visit_expr(&mut self, expr: &'ast Expr) {
self.is_break = match expr.kind {
ExprKind::Break(..) | ExprKind::Continue(..) | ExprKind::Ret(..) => true,
ExprKind::Match(_, ref arms) => arms.iter().all(|arm| self.check_expr(&arm.body)),
ExprKind::If(_, ref then, Some(ref els)) => self.check_block(then) && self.check_expr(els),
ExprKind::If(_, _, None)
// ignore loops for simplicity
| ExprKind::While(..) | ExprKind::ForLoop(..) | ExprKind::Loop(..) => false,
_ => {
walk_expr(self, expr);
return;
},
};
}
}
impl BreakVisitor {
fn check<T>(&mut self, item: T, visit: fn(&mut Self, T)) -> bool {
visit(self, item);
std::mem::replace(&mut self.is_break, false)
}
fn check_block(&mut self, block: &Block) -> bool {
self.check(block, Self::visit_block)
}
fn check_expr(&mut self, expr: &Expr) -> bool {
self.check(expr, Self::visit_expr)
}
fn check_stmt(&mut self, stmt: &Stmt) -> bool {
self.check(stmt, Self::visit_stmt)
}
}
|
check_stmt
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.