file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
sync.rs
|
#![crate_name = "sync"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Alexander Fomin <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* Last synced with: sync (GNU coreutils) 8.13 */
extern crate getopts;
extern crate libc;
#[path = "../common/util.rs"] #[macro_use] mod util;
static NAME: &'static str = "sync";
static VERSION: &'static str = "1.0.0";
#[cfg(unix)]
mod platform {
use super::libc;
extern {
fn sync() -> libc::c_void;
}
pub unsafe fn do_sync() -> isize {
sync();
0
}
}
#[cfg(windows)]
mod platform {
pub use super::libc;
use std::{mem, string};
use std::ptr::null;
extern "system" {
fn CreateFileA(lpFileName: *const libc::c_char,
dwDesiredAccess: libc::uint32_t,
dwShareMode: libc::uint32_t,
lpSecurityAttributes: *const libc::c_void, // *LPSECURITY_ATTRIBUTES
dwCreationDisposition: libc::uint32_t,
dwFlagsAndAttributes: libc::uint32_t,
hTemplateFile: *const libc::c_void) -> *const libc::c_void;
fn GetDriveTypeA(lpRootPathName: *const libc::c_char) -> libc::c_uint;
fn GetLastError() -> libc::uint32_t;
fn FindFirstVolumeA(lpszVolumeName: *mut libc::c_char,
cchBufferLength: libc::uint32_t) -> *const libc::c_void;
fn FindNextVolumeA(hFindVolume: *const libc::c_void,
lpszVolumeName: *mut libc::c_char,
cchBufferLength: libc::uint32_t) -> libc::c_int;
fn FindVolumeClose(hFindVolume: *const libc::c_void) -> libc::c_int;
fn FlushFileBuffers(hFile: *const libc::c_void) -> libc::c_int;
}
|
unsafe fn flush_volume(name: &str) {
let name_buffer = name.to_c_str().as_ptr();
if 0x00000003 == GetDriveTypeA(name_buffer) { // DRIVE_FIXED
let sliced_name = &name[..name.len() - 1]; // eliminate trailing backslash
let sliced_name_buffer = sliced_name.to_c_str().as_ptr();
match CreateFileA(sliced_name_buffer,
0xC0000000, // GENERIC_WRITE
0x00000003, // FILE_SHARE_WRITE,
null(),
0x00000003, // OPEN_EXISTING
0,
null()) {
-1 => { // INVALID_HANDLE_VALUE
crash!(GetLastError(), "failed to create volume handle");
}
handle => {
if FlushFileBuffers(handle) == 0 {
crash!(GetLastError(), "failed to flush file buffer");
}
}
}
}
}
#[allow(unused_unsafe)]
unsafe fn find_first_volume() -> (String, *const libc::c_void) {
let mut name: [libc::c_char; 260] = mem::uninitialized(); // MAX_PATH
match FindFirstVolumeA(name.as_mut_ptr(),
name.len() as libc::uint32_t) {
-1 => { // INVALID_HANDLE_VALUE
crash!(GetLastError(), "failed to find first volume");
}
handle => {
(string::raw::from_buf(name.as_ptr() as *const u8), handle)
}
}
}
#[allow(unused_unsafe)]
unsafe fn find_all_volumes() -> Vec<String> {
match find_first_volume() {
(first_volume, next_volume_handle) => {
let mut volumes = vec![first_volume];
loop {
let mut name: [libc::c_char; 260] = mem::uninitialized(); // MAX_PATH
match FindNextVolumeA(next_volume_handle,
name.as_mut_ptr(),
name.len() as libc::uint32_t) {
0 => {
match GetLastError() {
0x12 => { // ERROR_NO_MORE_FILES
FindVolumeClose(next_volume_handle); // ignore FindVolumeClose() failures
break;
}
err => {
crash!(err, "failed to find next volume");
}
}
}
_ => {
volumes.push(string::raw::from_buf(name.as_ptr() as *const u8));
}
}
}
volumes
}
}
}
pub unsafe fn do_sync() -> int {
let volumes = find_all_volumes();
for vol in volumes.iter() {
flush_volume(&vol);
}
0
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
_ => { help(&opts); return 1 }
};
if matches.opt_present("h") {
help(&opts);
return 0
}
if matches.opt_present("V") {
version();
return 0
}
sync();
0
}
fn version() {
println!("{} (uutils) {}", NAME, VERSION);
println!("The MIT License");
println!("");
println!("Author -- Alexander Fomin.");
}
fn help(opts: &getopts::Options) {
let msg = format!("{0} {1}
Usage:
{0} [OPTION]
Force changed blocks to disk, update the super block.", NAME, VERSION);
print!("{}", opts.usage(&msg));
}
fn sync() -> isize {
unsafe {
platform::do_sync()
}
}
|
#[allow(unused_unsafe)]
|
random_line_split
|
sync.rs
|
#![crate_name = "sync"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Alexander Fomin <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* Last synced with: sync (GNU coreutils) 8.13 */
extern crate getopts;
extern crate libc;
#[path = "../common/util.rs"] #[macro_use] mod util;
static NAME: &'static str = "sync";
static VERSION: &'static str = "1.0.0";
#[cfg(unix)]
mod platform {
use super::libc;
extern {
fn sync() -> libc::c_void;
}
pub unsafe fn do_sync() -> isize {
sync();
0
}
}
#[cfg(windows)]
mod platform {
pub use super::libc;
use std::{mem, string};
use std::ptr::null;
extern "system" {
fn CreateFileA(lpFileName: *const libc::c_char,
dwDesiredAccess: libc::uint32_t,
dwShareMode: libc::uint32_t,
lpSecurityAttributes: *const libc::c_void, // *LPSECURITY_ATTRIBUTES
dwCreationDisposition: libc::uint32_t,
dwFlagsAndAttributes: libc::uint32_t,
hTemplateFile: *const libc::c_void) -> *const libc::c_void;
fn GetDriveTypeA(lpRootPathName: *const libc::c_char) -> libc::c_uint;
fn GetLastError() -> libc::uint32_t;
fn FindFirstVolumeA(lpszVolumeName: *mut libc::c_char,
cchBufferLength: libc::uint32_t) -> *const libc::c_void;
fn FindNextVolumeA(hFindVolume: *const libc::c_void,
lpszVolumeName: *mut libc::c_char,
cchBufferLength: libc::uint32_t) -> libc::c_int;
fn FindVolumeClose(hFindVolume: *const libc::c_void) -> libc::c_int;
fn FlushFileBuffers(hFile: *const libc::c_void) -> libc::c_int;
}
#[allow(unused_unsafe)]
unsafe fn flush_volume(name: &str) {
let name_buffer = name.to_c_str().as_ptr();
if 0x00000003 == GetDriveTypeA(name_buffer) { // DRIVE_FIXED
let sliced_name = &name[..name.len() - 1]; // eliminate trailing backslash
let sliced_name_buffer = sliced_name.to_c_str().as_ptr();
match CreateFileA(sliced_name_buffer,
0xC0000000, // GENERIC_WRITE
0x00000003, // FILE_SHARE_WRITE,
null(),
0x00000003, // OPEN_EXISTING
0,
null()) {
-1 => { // INVALID_HANDLE_VALUE
crash!(GetLastError(), "failed to create volume handle");
}
handle => {
if FlushFileBuffers(handle) == 0 {
crash!(GetLastError(), "failed to flush file buffer");
}
}
}
}
}
#[allow(unused_unsafe)]
unsafe fn find_first_volume() -> (String, *const libc::c_void)
|
#[allow(unused_unsafe)]
unsafe fn find_all_volumes() -> Vec<String> {
match find_first_volume() {
(first_volume, next_volume_handle) => {
let mut volumes = vec![first_volume];
loop {
let mut name: [libc::c_char; 260] = mem::uninitialized(); // MAX_PATH
match FindNextVolumeA(next_volume_handle,
name.as_mut_ptr(),
name.len() as libc::uint32_t) {
0 => {
match GetLastError() {
0x12 => { // ERROR_NO_MORE_FILES
FindVolumeClose(next_volume_handle); // ignore FindVolumeClose() failures
break;
}
err => {
crash!(err, "failed to find next volume");
}
}
}
_ => {
volumes.push(string::raw::from_buf(name.as_ptr() as *const u8));
}
}
}
volumes
}
}
}
pub unsafe fn do_sync() -> int {
let volumes = find_all_volumes();
for vol in volumes.iter() {
flush_volume(&vol);
}
0
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
_ => { help(&opts); return 1 }
};
if matches.opt_present("h") {
help(&opts);
return 0
}
if matches.opt_present("V") {
version();
return 0
}
sync();
0
}
fn version() {
println!("{} (uutils) {}", NAME, VERSION);
println!("The MIT License");
println!("");
println!("Author -- Alexander Fomin.");
}
fn help(opts: &getopts::Options) {
let msg = format!("{0} {1}
Usage:
{0} [OPTION]
Force changed blocks to disk, update the super block.", NAME, VERSION);
print!("{}", opts.usage(&msg));
}
fn sync() -> isize {
unsafe {
platform::do_sync()
}
}
|
{
let mut name: [libc::c_char; 260] = mem::uninitialized(); // MAX_PATH
match FindFirstVolumeA(name.as_mut_ptr(),
name.len() as libc::uint32_t) {
-1 => { // INVALID_HANDLE_VALUE
crash!(GetLastError(), "failed to find first volume");
}
handle => {
(string::raw::from_buf(name.as_ptr() as *const u8), handle)
}
}
}
|
identifier_body
|
issue-3447.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::RefCell;
static S: &'static str = "str";
struct list<T> {
element: T,
next: Option<Box<RefCell<list<T>>>>
}
impl<T:'static> list<T> {
pub fn addEnd(&mut self, element: T) {
let newList = list {
element: element,
next: None
};
self.next = Some(box RefCell::new(newList));
}
}
pub fn main() {
|
next: None
};
println!("{}", ls.element);
}
|
let ls = list {
element: S,
|
random_line_split
|
issue-3447.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::RefCell;
static S: &'static str = "str";
struct list<T> {
element: T,
next: Option<Box<RefCell<list<T>>>>
}
impl<T:'static> list<T> {
pub fn addEnd(&mut self, element: T)
|
}
pub fn main() {
let ls = list {
element: S,
next: None
};
println!("{}", ls.element);
}
|
{
let newList = list {
element: element,
next: None
};
self.next = Some(box RefCell::new(newList));
}
|
identifier_body
|
issue-3447.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::RefCell;
static S: &'static str = "str";
struct list<T> {
element: T,
next: Option<Box<RefCell<list<T>>>>
}
impl<T:'static> list<T> {
pub fn
|
(&mut self, element: T) {
let newList = list {
element: element,
next: None
};
self.next = Some(box RefCell::new(newList));
}
}
pub fn main() {
let ls = list {
element: S,
next: None
};
println!("{}", ls.element);
}
|
addEnd
|
identifier_name
|
lib.rs
|
// DO NOT EDIT!
// This file was generated automatically from'src/mako/api/lib.rs.mako'
// DO NOT EDIT!
//! This documentation was generated from *replicapool* crate version *0.1.8+20150311*, where *20150311* is the exact revision of the *replicapool:v1beta2* schema built by the [mako](http://www.makotemplates.org/) code generator *v0.1.8*.
//!
//! Everything else about the *replicapool* *v1_beta2* API can be found at the
//! [official documentation site](https://developers.google.com/compute/docs/instance-groups/manager/v1beta2).
//! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/replicapool1_beta2).
//! # Features
//!
//! Handle the following *Resources* with ease from the central [hub](struct.Replicapool.html)...
//!
//! * [instance group managers](struct.InstanceGroupManager.html)
//! * [*abandon instances*](struct.InstanceGroupManagerAbandonInstanceCall.html), [*delete*](struct.InstanceGroupManagerDeleteCall.html), [*delete instances*](struct.InstanceGroupManagerDeleteInstanceCall.html), [*get*](struct.InstanceGroupManagerGetCall.html), [*insert*](struct.InstanceGroupManagerInsertCall.html), [*list*](struct.InstanceGroupManagerListCall.html), [*recreate instances*](struct.InstanceGroupManagerRecreateInstanceCall.html), [*resize*](struct.InstanceGroupManagerResizeCall.html), [*set instance template*](struct.InstanceGroupManagerSetInstanceTemplateCall.html) and [*set target pools*](struct.InstanceGroupManagerSetTargetPoolCall.html)
//! * zone operations
//! * [*get*](struct.ZoneOperationGetCall.html) and [*list*](struct.ZoneOperationListCall.html)
//!
//!
//!
//!
//! Not what you are looking for? Find all other Google APIs in their Rust [documentation index](../index.html).
//!
//! # Structure of this Library
//!
//! The API is structured into the following primary items:
//!
//! * **[Hub](struct.Replicapool.html)**
//! * a central object to maintain state and allow accessing all *Activities*
//! * creates [*Method Builders*](trait.MethodsBuilder.html) which in turn
//! allow access to individual [*Call Builders*](trait.CallBuilder.html)
//! * **[Resources](trait.Resource.html)**
//! * primary types that you can apply *Activities* to
//! * a collection of properties and *Parts*
//! * **[Parts](trait.Part.html)**
//! * a collection of properties
//! * never directly used in *Activities*
//! * **[Activities](trait.CallBuilder.html)**
//! * operations to apply to *Resources*
//!
//! All *structures* are marked with applicable traits to further categorize them and ease browsing.
//!
//! Generally speaking, you can invoke *Activities* like this:
//!
//! ```Rust,ignore
//! let r = hub.resource().activity(...).doit()
//! ```
//!
//! Or specifically...
//!
//! ```ignore
//! let r = hub.instance_group_managers().set_target_pools(...).doit()
//! let r = hub.instance_group_managers().list(...).doit()
//! let r = hub.instance_group_managers().insert(...).doit()
//! let r = hub.instance_group_managers().get(...).doit()
//! let r = hub.instance_group_managers().abandon_instances(...).doit()
//! let r = hub.instance_group_managers().recreate_instances(...).doit()
//! let r = hub.instance_group_managers().delete(...).doit()
//! let r = hub.instance_group_managers().set_instance_template(...).doit()
//! let r = hub.instance_group_managers().resize(...).doit()
//! let r = hub.instance_group_managers().delete_instances(...).doit()
//! ```
//!
//! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities`
//! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be
//! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired.
//! The `doit()` method performs the actual communication with the server and returns the respective result.
//!
//! # Usage
//!
//! ## Setting up your Project
//!
//! To use this library, you would put the following lines into your `Cargo.toml` file:
//!
//! ```toml
//! [dependencies]
//! google-replicapool1_beta2 = "*"
//! ```
//!
//! ## A complete example
//!
//! ```test_harness,no_run
//! extern crate hyper;
//! extern crate yup_oauth2 as oauth2;
//! extern crate google_replicapool1_beta2 as replicapool1_beta2;
//! use replicapool1_beta2::{Result, Error};
//! # #[test] fn egal() {
//! use std::default::Default;
//! use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
//! use replicapool1_beta2::Replicapool;
//!
//! // Get an ApplicationSecret instance by some means. It contains the `client_id` and
//! // `client_secret`, among other things.
//! let secret: ApplicationSecret = Default::default();
//! // Instantiate the authenticator. It will choose a suitable authentication flow for you,
//! // unless you replace `None` with the desired Flow.
//! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
//! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
//! // retrieve them from storage.
//! let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
//! hyper::Client::new(),
//! <MemoryStorage as Default>::default(), None);
//! let mut hub = Replicapool::new(hyper::Client::new(), auth);
//! // You can configure optional parameters by calling the respective setters at will, and
//! // execute the final call using `doit()`.
//! // Values shown here are possibly random and not representative!
//! let result = hub.instance_group_managers().list("project", "zone")
//! .page_token("sanctus")
//! .max_results(79)
//! .filter("amet")
//! .doit();
//!
//! match result {
//! Err(e) => match e {
//! // The Error enum provides details about what exactly happened.
//! // You can also just use its `Debug`, `Display` or `Error` traits
//! Error::HttpError(_)
//! |Error::MissingAPIKey
//! |Error::MissingToken(_)
//! |Error::Cancelled
//! |Error::UploadSizeLimitExceeded(_, _)
//! |Error::Failure(_)
//! |Error::BadRequest(_)
//! |Error::FieldClash(_)
//! |Error::JsonDecodeError(_, _) => println!("{}", e),
//! },
//! Ok(res) => println!("Success: {:?}", res),
//! }
//! # }
//! ```
//! ## Handling Errors
//!
//! All errors produced by the system are provided either as [Result](enum.Result.html) enumeration as return value of
//! the doit() methods, or handed as possibly intermediate results to either the
//! [Hub Delegate](trait.Delegate.html), or the [Authenticator Delegate](../yup-oauth2/trait.AuthenticatorDelegate.html).
//!
//! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This
//! makes the system potentially resilient to all kinds of errors.
//!
//! ## Uploads and Downloads
//! If a method supports downloads, the response body, which is part of the [Result](enum.Result.html), should be
//! read by you to obtain the media.
//! If such a method also supports a [Response Result](trait.ResponseResult.html), it will return that by default.
//! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making
//! this call: `.param("alt", "media")`.
//!
//! Methods supporting uploads can do so using up to 2 different protocols:
//! *simple* and *resumable*. The distinctiveness of each is represented by customized
//! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively.
//!
//! ## Customization and Callbacks
//!
//! You may alter the way an `doit()` method is called by providing a [delegate](trait.Delegate.html) to the
//! [Method Builder](trait.CallBuilder.html) before making the final `doit()` call.
//! Respective methods will be called to provide progress information, as well as determine whether the system should
//! retry on failure.
|
//! The [delegate trait](trait.Delegate.html) is default-implemented, allowing you to customize it with minimal effort.
//!
//! ## Optional Parts in Server-Requests
//!
//! All structures provided by this library are made to be [enocodable](trait.RequestValue.html) and
//! [decodable](trait.ResponseResult.html) via *json*. Optionals are used to indicate that partial requests are responses
//! are valid.
//! Most optionals are are considered [Parts](trait.Part.html) which are identifiable by name, which will be sent to
//! the server to indicate either the set parts of the request or the desired parts in the response.
//!
//! ## Builder Arguments
//!
//! Using [method builders](trait.CallBuilder.html), you are able to prepare an action call by repeatedly calling it's methods.
//! These will always take a single argument, for which the following statements are true.
//!
//! * [PODs][wiki-pod] are handed by copy
//! * strings are passed as `&str`
//! * [request values](trait.RequestValue.html) are moved
//!
//! Arguments will always be copied or cloned into the builder, to make them independent of their original life times.
//!
//! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure
//! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern
//! [google-go-api]: https://github.com/google/google-api-go-client
//!
//!
// Unused attributes happen thanks to defined, but unused structures
// We don't warn about this, as depending on the API, some data structures or facilities are never used.
// Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any
// unused imports in fully featured APIs. Same with unused_mut....
#![allow(unused_imports, unused_mut, dead_code)]
include!(concat!(env!("OUT_DIR"), "/lib.rs"));
|
//!
|
random_line_split
|
git_gc.rs
|
//! Tests for git garbage collection.
use std::env;
use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::Command;
use cargo_test_support::git;
use cargo_test_support::paths;
use cargo_test_support::project;
use cargo_test_support::registry::Package;
use url::Url;
fn find_index() -> PathBuf {
let dir = paths::home().join(".cargo/registry/index");
dir.read_dir().unwrap().next().unwrap().unwrap().path()
}
fn run_test(path_env: Option<&OsStr>) {
const N: usize = 50;
let foo = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
bar = "*"
"#,
)
.file("src/lib.rs", "")
.build();
Package::new("bar", "0.1.0").publish();
foo.cargo("build").run();
let index = find_index();
let path = paths::home().join("tmp");
let url = Url::from_file_path(&path).unwrap().to_string();
let repo = git2::Repository::init(&path).unwrap();
let index = git2::Repository::open(&index).unwrap();
let mut cfg = repo.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();
let mut cfg = index.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();
for _ in 0..N {
git::commit(&repo);
index
.remote_anonymous(&url)
.unwrap()
.fetch(&["refs/heads/master:refs/remotes/foo/master"], None, None)
.unwrap();
}
drop((repo, index));
Package::new("bar", "0.1.1").publish();
let before = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(before > N);
let mut cmd = foo.cargo("update");
cmd.env("__CARGO_PACKFILE_LIMIT", "10");
if let Some(path) = path_env {
cmd.env("PATH", path);
}
cmd.env("CARGO_LOG", "trace");
cmd.run();
let after = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(
after < before,
"packfiles before: {}\n\
packfiles after: {}",
before,
after
);
}
#[cargo_test]
fn use_git_gc() {
if Command::new("git").arg("--version").output().is_err() {
return;
}
run_test(None);
}
#[cargo_test]
fn avoid_using_git() {
let path = env::var_os("PATH").unwrap_or_default();
let mut paths = env::split_paths(&path).collect::<Vec<_>>();
let idx = paths
.iter()
.position(|p| p.join("git").exists() || p.join("git.exe").exists());
match idx {
Some(i) =>
|
None => return,
}
run_test(Some(&env::join_paths(&paths).unwrap()));
}
|
{
paths.remove(i);
}
|
conditional_block
|
git_gc.rs
|
//! Tests for git garbage collection.
use std::env;
use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::Command;
use cargo_test_support::git;
use cargo_test_support::paths;
use cargo_test_support::project;
use cargo_test_support::registry::Package;
use url::Url;
fn find_index() -> PathBuf {
let dir = paths::home().join(".cargo/registry/index");
dir.read_dir().unwrap().next().unwrap().unwrap().path()
}
|
fn run_test(path_env: Option<&OsStr>) {
const N: usize = 50;
let foo = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
bar = "*"
"#,
)
.file("src/lib.rs", "")
.build();
Package::new("bar", "0.1.0").publish();
foo.cargo("build").run();
let index = find_index();
let path = paths::home().join("tmp");
let url = Url::from_file_path(&path).unwrap().to_string();
let repo = git2::Repository::init(&path).unwrap();
let index = git2::Repository::open(&index).unwrap();
let mut cfg = repo.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();
let mut cfg = index.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();
for _ in 0..N {
git::commit(&repo);
index
.remote_anonymous(&url)
.unwrap()
.fetch(&["refs/heads/master:refs/remotes/foo/master"], None, None)
.unwrap();
}
drop((repo, index));
Package::new("bar", "0.1.1").publish();
let before = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(before > N);
let mut cmd = foo.cargo("update");
cmd.env("__CARGO_PACKFILE_LIMIT", "10");
if let Some(path) = path_env {
cmd.env("PATH", path);
}
cmd.env("CARGO_LOG", "trace");
cmd.run();
let after = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(
after < before,
"packfiles before: {}\n\
packfiles after: {}",
before,
after
);
}
#[cargo_test]
fn use_git_gc() {
if Command::new("git").arg("--version").output().is_err() {
return;
}
run_test(None);
}
#[cargo_test]
fn avoid_using_git() {
let path = env::var_os("PATH").unwrap_or_default();
let mut paths = env::split_paths(&path).collect::<Vec<_>>();
let idx = paths
.iter()
.position(|p| p.join("git").exists() || p.join("git.exe").exists());
match idx {
Some(i) => {
paths.remove(i);
}
None => return,
}
run_test(Some(&env::join_paths(&paths).unwrap()));
}
|
random_line_split
|
|
git_gc.rs
|
//! Tests for git garbage collection.
use std::env;
use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::Command;
use cargo_test_support::git;
use cargo_test_support::paths;
use cargo_test_support::project;
use cargo_test_support::registry::Package;
use url::Url;
fn find_index() -> PathBuf {
let dir = paths::home().join(".cargo/registry/index");
dir.read_dir().unwrap().next().unwrap().unwrap().path()
}
fn run_test(path_env: Option<&OsStr>) {
const N: usize = 50;
let foo = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
bar = "*"
"#,
)
.file("src/lib.rs", "")
.build();
Package::new("bar", "0.1.0").publish();
foo.cargo("build").run();
let index = find_index();
let path = paths::home().join("tmp");
let url = Url::from_file_path(&path).unwrap().to_string();
let repo = git2::Repository::init(&path).unwrap();
let index = git2::Repository::open(&index).unwrap();
let mut cfg = repo.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();
let mut cfg = index.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();
for _ in 0..N {
git::commit(&repo);
index
.remote_anonymous(&url)
.unwrap()
.fetch(&["refs/heads/master:refs/remotes/foo/master"], None, None)
.unwrap();
}
drop((repo, index));
Package::new("bar", "0.1.1").publish();
let before = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(before > N);
let mut cmd = foo.cargo("update");
cmd.env("__CARGO_PACKFILE_LIMIT", "10");
if let Some(path) = path_env {
cmd.env("PATH", path);
}
cmd.env("CARGO_LOG", "trace");
cmd.run();
let after = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(
after < before,
"packfiles before: {}\n\
packfiles after: {}",
before,
after
);
}
#[cargo_test]
fn use_git_gc() {
if Command::new("git").arg("--version").output().is_err() {
return;
}
run_test(None);
}
#[cargo_test]
fn
|
() {
let path = env::var_os("PATH").unwrap_or_default();
let mut paths = env::split_paths(&path).collect::<Vec<_>>();
let idx = paths
.iter()
.position(|p| p.join("git").exists() || p.join("git.exe").exists());
match idx {
Some(i) => {
paths.remove(i);
}
None => return,
}
run_test(Some(&env::join_paths(&paths).unwrap()));
}
|
avoid_using_git
|
identifier_name
|
git_gc.rs
|
//! Tests for git garbage collection.
use std::env;
use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::Command;
use cargo_test_support::git;
use cargo_test_support::paths;
use cargo_test_support::project;
use cargo_test_support::registry::Package;
use url::Url;
fn find_index() -> PathBuf {
let dir = paths::home().join(".cargo/registry/index");
dir.read_dir().unwrap().next().unwrap().unwrap().path()
}
fn run_test(path_env: Option<&OsStr>) {
const N: usize = 50;
let foo = project()
.file(
"Cargo.toml",
r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
bar = "*"
"#,
)
.file("src/lib.rs", "")
.build();
Package::new("bar", "0.1.0").publish();
foo.cargo("build").run();
let index = find_index();
let path = paths::home().join("tmp");
let url = Url::from_file_path(&path).unwrap().to_string();
let repo = git2::Repository::init(&path).unwrap();
let index = git2::Repository::open(&index).unwrap();
let mut cfg = repo.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();
let mut cfg = index.config().unwrap();
cfg.set_str("user.email", "[email protected]").unwrap();
cfg.set_str("user.name", "Foo Bar").unwrap();
for _ in 0..N {
git::commit(&repo);
index
.remote_anonymous(&url)
.unwrap()
.fetch(&["refs/heads/master:refs/remotes/foo/master"], None, None)
.unwrap();
}
drop((repo, index));
Package::new("bar", "0.1.1").publish();
let before = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(before > N);
let mut cmd = foo.cargo("update");
cmd.env("__CARGO_PACKFILE_LIMIT", "10");
if let Some(path) = path_env {
cmd.env("PATH", path);
}
cmd.env("CARGO_LOG", "trace");
cmd.run();
let after = find_index()
.join(".git/objects/pack")
.read_dir()
.unwrap()
.count();
assert!(
after < before,
"packfiles before: {}\n\
packfiles after: {}",
before,
after
);
}
#[cargo_test]
fn use_git_gc()
|
#[cargo_test]
fn avoid_using_git() {
let path = env::var_os("PATH").unwrap_or_default();
let mut paths = env::split_paths(&path).collect::<Vec<_>>();
let idx = paths
.iter()
.position(|p| p.join("git").exists() || p.join("git.exe").exists());
match idx {
Some(i) => {
paths.remove(i);
}
None => return,
}
run_test(Some(&env::join_paths(&paths).unwrap()));
}
|
{
if Command::new("git").arg("--version").output().is_err() {
return;
}
run_test(None);
}
|
identifier_body
|
union_with_anon_struct.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Copy, Clone)]
pub union foo {
pub bar: foo__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct foo__bindgen_ty_1 {
pub a: ::std::os::raw::c_uint,
pub b: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout_foo__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<foo__bindgen_ty_1>(),
8usize,
concat!("Size of: ", stringify!(foo__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<foo__bindgen_ty_1>(),
4usize,
concat!("Alignment of ", stringify!(foo__bindgen_ty_1))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<foo__bindgen_ty_1>())).a as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(foo__bindgen_ty_1),
"::",
stringify!(a)
)
);
assert_eq!(
|
unsafe {
&(*(::std::ptr::null::<foo__bindgen_ty_1>())).b as *const _ as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(foo__bindgen_ty_1),
"::",
stringify!(b)
)
);
}
#[test]
fn bindgen_test_layout_foo() {
assert_eq!(
::std::mem::size_of::<foo>(),
8usize,
concat!("Size of: ", stringify!(foo))
);
assert_eq!(
::std::mem::align_of::<foo>(),
4usize,
concat!("Alignment of ", stringify!(foo))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<foo>())).bar as *const _ as usize },
0usize,
concat!("Offset of field: ", stringify!(foo), "::", stringify!(bar))
);
}
impl Default for foo {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
|
random_line_split
|
|
union_with_anon_struct.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Copy, Clone)]
pub union foo {
pub bar: foo__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct foo__bindgen_ty_1 {
pub a: ::std::os::raw::c_uint,
pub b: ::std::os::raw::c_uint,
}
#[test]
fn bindgen_test_layout_foo__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<foo__bindgen_ty_1>(),
8usize,
concat!("Size of: ", stringify!(foo__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<foo__bindgen_ty_1>(),
4usize,
concat!("Alignment of ", stringify!(foo__bindgen_ty_1))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<foo__bindgen_ty_1>())).a as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(foo__bindgen_ty_1),
"::",
stringify!(a)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<foo__bindgen_ty_1>())).b as *const _ as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(foo__bindgen_ty_1),
"::",
stringify!(b)
)
);
}
#[test]
fn bindgen_test_layout_foo()
|
impl Default for foo {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
|
{
assert_eq!(
::std::mem::size_of::<foo>(),
8usize,
concat!("Size of: ", stringify!(foo))
);
assert_eq!(
::std::mem::align_of::<foo>(),
4usize,
concat!("Alignment of ", stringify!(foo))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<foo>())).bar as *const _ as usize },
0usize,
concat!("Offset of field: ", stringify!(foo), "::", stringify!(bar))
);
}
|
identifier_body
|
union_with_anon_struct.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Copy, Clone)]
pub union foo {
pub bar: foo__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct foo__bindgen_ty_1 {
pub a: ::std::os::raw::c_uint,
pub b: ::std::os::raw::c_uint,
}
#[test]
fn
|
() {
assert_eq!(
::std::mem::size_of::<foo__bindgen_ty_1>(),
8usize,
concat!("Size of: ", stringify!(foo__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<foo__bindgen_ty_1>(),
4usize,
concat!("Alignment of ", stringify!(foo__bindgen_ty_1))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<foo__bindgen_ty_1>())).a as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(foo__bindgen_ty_1),
"::",
stringify!(a)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<foo__bindgen_ty_1>())).b as *const _ as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(foo__bindgen_ty_1),
"::",
stringify!(b)
)
);
}
#[test]
fn bindgen_test_layout_foo() {
assert_eq!(
::std::mem::size_of::<foo>(),
8usize,
concat!("Size of: ", stringify!(foo))
);
assert_eq!(
::std::mem::align_of::<foo>(),
4usize,
concat!("Alignment of ", stringify!(foo))
);
assert_eq!(
unsafe { &(*(::std::ptr::null::<foo>())).bar as *const _ as usize },
0usize,
concat!("Offset of field: ", stringify!(foo), "::", stringify!(bar))
);
}
impl Default for foo {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
|
bindgen_test_layout_foo__bindgen_ty_1
|
identifier_name
|
common.rs
|
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_HARDWARE_CAMERA2_CTS_COMMON_RS
#define ANDROID_HARDWARE_CAMERA2_CTS_COMMON_RS
|
#pragma rs java_package_name(android.hardware.camera2.cts)
#pragma rs_fp_relaxed
typedef uchar3 yuvx_444; // interleaved YUV. (y,u,v) per pixel. use.x/.y/.z to read
typedef uchar3 yuvf_420; // flexible YUV (4:2:0). use rsGetElementAtYuv to read.
#define convert_yuvx_444 convert_uchar3
#define convert_yuvf_420 __error_cant_output_flexible_yuv__
#define rsGetElementAt_yuvx_444 rsGetElementAt_uchar3
#define rsGetElementAt_yuvf_420 __error_cant_output_flexible_yuv__
#define RS_KERNEL __attribute__((kernel))
#ifndef LOG_DEBUG
#define LOG_DEBUG 0
#endif
#if LOG_DEBUG
#define LOGD(string, expr) rsDebug((string), (expr))
#else
#define LOGD(string, expr) if (0) rsDebug((string), (expr))
#endif
#endif // header guard
|
#pragma version(1)
|
random_line_split
|
stop.rs
|
// STD Dependencies -----------------------------------------------------------
use std::fmt;
// Discord Dependencies -------------------------------------------------------
use discord::model::{ChannelId, ServerId};
// Internal Dependencies ------------------------------------------------------
use ::bot::{Bot, BotConfig};
use ::core::EventQueue;
use ::action::{ActionHandler, ActionGroup, MessageActions};
// Action Implementation ------------------------------------------------------
pub struct Action {
server_id: ServerId,
voice_channel_id: ChannelId,
}
impl Action {
pub fn new(
server_id: ServerId,
voice_channel_id: ChannelId
) -> Box<Action> {
Box::new(Action {
server_id: server_id,
voice_channel_id: voice_channel_id
})
}
}
impl ActionHandler for Action {
fn run(&mut self, bot: &mut Bot, _: &BotConfig, queue: &mut EventQueue) -> ActionGroup {
let mut actions: Vec<Box<ActionHandler>> = Vec::new();
if let Some(server) = bot.get_server(&self.server_id)
|
}
actions
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Action] [StopRecording] Server #{}",
self.server_id
)
}
}
|
{
info!("{} Stopping audio recording...", self);
if let Some(channel_name) = server.channel_name(&self.voice_channel_id) {
// Notify all users in the current voice channel
for member in server.channel_voice_members(&self.voice_channel_id) {
actions.push(MessageActions::Send::user_private(
member.id,
format!(
"Note: Audio recording has been **stopped** for your current voice channel {}.",
channel_name
)
))
}
server.stop_recording_voice(queue);
}
|
conditional_block
|
stop.rs
|
// STD Dependencies -----------------------------------------------------------
use std::fmt;
// Discord Dependencies -------------------------------------------------------
use discord::model::{ChannelId, ServerId};
// Internal Dependencies ------------------------------------------------------
use ::bot::{Bot, BotConfig};
use ::core::EventQueue;
use ::action::{ActionHandler, ActionGroup, MessageActions};
// Action Implementation ------------------------------------------------------
pub struct
|
{
server_id: ServerId,
voice_channel_id: ChannelId,
}
impl Action {
pub fn new(
server_id: ServerId,
voice_channel_id: ChannelId
) -> Box<Action> {
Box::new(Action {
server_id: server_id,
voice_channel_id: voice_channel_id
})
}
}
impl ActionHandler for Action {
fn run(&mut self, bot: &mut Bot, _: &BotConfig, queue: &mut EventQueue) -> ActionGroup {
let mut actions: Vec<Box<ActionHandler>> = Vec::new();
if let Some(server) = bot.get_server(&self.server_id) {
info!("{} Stopping audio recording...", self);
if let Some(channel_name) = server.channel_name(&self.voice_channel_id) {
// Notify all users in the current voice channel
for member in server.channel_voice_members(&self.voice_channel_id) {
actions.push(MessageActions::Send::user_private(
member.id,
format!(
"Note: Audio recording has been **stopped** for your current voice channel {}.",
channel_name
)
))
}
server.stop_recording_voice(queue);
}
}
actions
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Action] [StopRecording] Server #{}",
self.server_id
)
}
}
|
Action
|
identifier_name
|
stop.rs
|
// STD Dependencies -----------------------------------------------------------
use std::fmt;
// Discord Dependencies -------------------------------------------------------
use discord::model::{ChannelId, ServerId};
// Internal Dependencies ------------------------------------------------------
use ::bot::{Bot, BotConfig};
use ::core::EventQueue;
use ::action::{ActionHandler, ActionGroup, MessageActions};
// Action Implementation ------------------------------------------------------
pub struct Action {
server_id: ServerId,
voice_channel_id: ChannelId,
}
impl Action {
pub fn new(
|
server_id: ServerId,
voice_channel_id: ChannelId
) -> Box<Action> {
Box::new(Action {
server_id: server_id,
voice_channel_id: voice_channel_id
})
}
}
impl ActionHandler for Action {
fn run(&mut self, bot: &mut Bot, _: &BotConfig, queue: &mut EventQueue) -> ActionGroup {
let mut actions: Vec<Box<ActionHandler>> = Vec::new();
if let Some(server) = bot.get_server(&self.server_id) {
info!("{} Stopping audio recording...", self);
if let Some(channel_name) = server.channel_name(&self.voice_channel_id) {
// Notify all users in the current voice channel
for member in server.channel_voice_members(&self.voice_channel_id) {
actions.push(MessageActions::Send::user_private(
member.id,
format!(
"Note: Audio recording has been **stopped** for your current voice channel {}.",
channel_name
)
))
}
server.stop_recording_voice(queue);
}
}
actions
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Action] [StopRecording] Server #{}",
self.server_id
)
}
}
|
random_line_split
|
|
stop.rs
|
// STD Dependencies -----------------------------------------------------------
use std::fmt;
// Discord Dependencies -------------------------------------------------------
use discord::model::{ChannelId, ServerId};
// Internal Dependencies ------------------------------------------------------
use ::bot::{Bot, BotConfig};
use ::core::EventQueue;
use ::action::{ActionHandler, ActionGroup, MessageActions};
// Action Implementation ------------------------------------------------------
pub struct Action {
server_id: ServerId,
voice_channel_id: ChannelId,
}
impl Action {
pub fn new(
server_id: ServerId,
voice_channel_id: ChannelId
) -> Box<Action> {
Box::new(Action {
server_id: server_id,
voice_channel_id: voice_channel_id
})
}
}
impl ActionHandler for Action {
fn run(&mut self, bot: &mut Bot, _: &BotConfig, queue: &mut EventQueue) -> ActionGroup
|
server.stop_recording_voice(queue);
}
}
actions
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"[Action] [StopRecording] Server #{}",
self.server_id
)
}
}
|
{
let mut actions: Vec<Box<ActionHandler>> = Vec::new();
if let Some(server) = bot.get_server(&self.server_id) {
info!("{} Stopping audio recording...", self);
if let Some(channel_name) = server.channel_name(&self.voice_channel_id) {
// Notify all users in the current voice channel
for member in server.channel_voice_members(&self.voice_channel_id) {
actions.push(MessageActions::Send::user_private(
member.id,
format!(
"Note: Audio recording has been **stopped** for your current voice channel {}.",
channel_name
)
))
}
|
identifier_body
|
enums.rs
|
use operation::*;
/// Raw opcodes
///
/// Provides a way of storing both ARM and THUMB opcodes (which have
/// different sizes) in a single type.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Opcode {
/// A raw ARM opcode
///
/// Contains a 32-bit ARM opcode in machine encoding.
Arm(u32),
/// A raw THUMB opcode
///
/// Contains a 16-bit THUMB opcode in machine encoding.
Thumb(u16),
}
macro_rules! simple_from {
($src:ty => $dst:ty as $variant:path) => {
impl From<$src> for $dst {
fn from(orig: $src) -> Self {
$variant(orig)
}
}
};
}
simple_from!(u16 => Opcode as Opcode::Thumb);
simple_from!(u32 => Opcode as Opcode::Arm);
/// Decoded instructions
///
/// Provides a way of storing decoded instructions with information
/// about the processor's operating mode.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Instruction {
/// An ARM instruction
///
/// Contains the condition field from the opcode and the operation.
/// Every ARM instruction contains a condition for controlling
/// conditional execution, so the condition is available separately
/// from the operation.
Arm(Condition, Operation),
/// A THUMB instruction
///
/// Contains the decoded operation. THUMB instructions are
/// unconditionally executed (except for the branch instruction), so
/// no condition is available with the operation.
Thumb(Operation),
}
impl From<Opcode> for Instruction {
fn from(opcode: Opcode) -> Self
|
}
|
{
use arm;
use thumb;
match opcode {
Opcode::Arm(w) => {
let condition = arm::condition(w);
let operation = arm::decode(w);
Instruction::Arm(condition, operation)
}
Opcode::Thumb(s) => {
let operation = thumb::decode(s);
Instruction::Thumb(operation)
}
}
}
|
identifier_body
|
enums.rs
|
use operation::*;
/// Raw opcodes
///
/// Provides a way of storing both ARM and THUMB opcodes (which have
/// different sizes) in a single type.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum
|
{
/// A raw ARM opcode
///
/// Contains a 32-bit ARM opcode in machine encoding.
Arm(u32),
/// A raw THUMB opcode
///
/// Contains a 16-bit THUMB opcode in machine encoding.
Thumb(u16),
}
macro_rules! simple_from {
($src:ty => $dst:ty as $variant:path) => {
impl From<$src> for $dst {
fn from(orig: $src) -> Self {
$variant(orig)
}
}
};
}
simple_from!(u16 => Opcode as Opcode::Thumb);
simple_from!(u32 => Opcode as Opcode::Arm);
/// Decoded instructions
///
/// Provides a way of storing decoded instructions with information
/// about the processor's operating mode.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Instruction {
/// An ARM instruction
///
/// Contains the condition field from the opcode and the operation.
/// Every ARM instruction contains a condition for controlling
/// conditional execution, so the condition is available separately
/// from the operation.
Arm(Condition, Operation),
/// A THUMB instruction
///
/// Contains the decoded operation. THUMB instructions are
/// unconditionally executed (except for the branch instruction), so
/// no condition is available with the operation.
Thumb(Operation),
}
impl From<Opcode> for Instruction {
fn from(opcode: Opcode) -> Self {
use arm;
use thumb;
match opcode {
Opcode::Arm(w) => {
let condition = arm::condition(w);
let operation = arm::decode(w);
Instruction::Arm(condition, operation)
}
Opcode::Thumb(s) => {
let operation = thumb::decode(s);
Instruction::Thumb(operation)
}
}
}
}
|
Opcode
|
identifier_name
|
enums.rs
|
use operation::*;
/// Raw opcodes
///
/// Provides a way of storing both ARM and THUMB opcodes (which have
/// different sizes) in a single type.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Opcode {
/// A raw ARM opcode
///
/// Contains a 32-bit ARM opcode in machine encoding.
Arm(u32),
/// A raw THUMB opcode
///
/// Contains a 16-bit THUMB opcode in machine encoding.
Thumb(u16),
}
macro_rules! simple_from {
($src:ty => $dst:ty as $variant:path) => {
impl From<$src> for $dst {
fn from(orig: $src) -> Self {
$variant(orig)
}
}
};
}
simple_from!(u16 => Opcode as Opcode::Thumb);
simple_from!(u32 => Opcode as Opcode::Arm);
/// Decoded instructions
///
/// Provides a way of storing decoded instructions with information
/// about the processor's operating mode.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Instruction {
/// An ARM instruction
///
/// Contains the condition field from the opcode and the operation.
/// Every ARM instruction contains a condition for controlling
/// conditional execution, so the condition is available separately
/// from the operation.
Arm(Condition, Operation),
/// A THUMB instruction
///
/// Contains the decoded operation. THUMB instructions are
/// unconditionally executed (except for the branch instruction), so
/// no condition is available with the operation.
Thumb(Operation),
}
impl From<Opcode> for Instruction {
fn from(opcode: Opcode) -> Self {
use arm;
use thumb;
|
let condition = arm::condition(w);
let operation = arm::decode(w);
Instruction::Arm(condition, operation)
}
Opcode::Thumb(s) => {
let operation = thumb::decode(s);
Instruction::Thumb(operation)
}
}
}
}
|
match opcode {
Opcode::Arm(w) => {
|
random_line_split
|
auth.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! HTTP Authorization implementations
use std::collections::HashMap;
use hyper::{server, net, header, status};
use endpoint::Handler;
use handlers::{AuthRequiredHandler, ContentHandler};
/// Authorization result
pub enum Authorized {
/// Authorization was successful.
Yes,
/// Unsuccessful authorization. Handler for further work is returned.
No(Box<Handler>),
}
/// Authorization interface
pub trait Authorization : Send + Sync {
/// Checks if authorization is valid.
fn is_authorized(&self, req: &server::Request<net::HttpStream>)-> Authorized;
}
/// HTTP Basic Authorization handler
pub struct HttpBasicAuth {
users: HashMap<String, String>,
}
/// No-authorization implementation (authorization disabled)
pub struct NoAuth;
impl Authorization for NoAuth {
fn is_authorized(&self, _req: &server::Request<net::HttpStream>)-> Authorized {
Authorized::Yes
}
}
impl Authorization for HttpBasicAuth {
fn is_authorized(&self, req: &server::Request<net::HttpStream>) -> Authorized {
let auth = self.check_auth(&req);
match auth {
Access::Denied => {
Authorized::No(Box::new(ContentHandler::error(
status::StatusCode::Unauthorized,
"Unauthorized",
"You need to provide valid credentials to access this page.",
None,
None,
)))
},
Access::AuthRequired => {
Authorized::No(Box::new(AuthRequiredHandler))
},
Access::Granted =>
|
,
}
}
}
#[derive(Debug)]
enum Access {
Granted,
Denied,
AuthRequired,
}
impl HttpBasicAuth {
/// Creates `HttpBasicAuth` instance with only one user.
pub fn single_user(username: &str, password: &str) -> Self {
let mut users = HashMap::new();
users.insert(username.to_owned(), password.to_owned());
HttpBasicAuth {
users: users
}
}
fn is_authorized(&self, username: &str, password: &str) -> bool {
self.users.get(&username.to_owned()).map_or(false, |pass| pass == password)
}
fn check_auth(&self, req: &server::Request<net::HttpStream>) -> Access {
match req.headers().get::<header::Authorization<header::Basic>>() {
Some(&header::Authorization(
header::Basic { ref username, password: Some(ref password) }
)) if self.is_authorized(username, password) => Access::Granted,
Some(_) => Access::Denied,
None => Access::AuthRequired,
}
}
}
|
{
Authorized::Yes
}
|
conditional_block
|
auth.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! HTTP Authorization implementations
use std::collections::HashMap;
use hyper::{server, net, header, status};
use endpoint::Handler;
use handlers::{AuthRequiredHandler, ContentHandler};
/// Authorization result
pub enum Authorized {
/// Authorization was successful.
Yes,
/// Unsuccessful authorization. Handler for further work is returned.
No(Box<Handler>),
}
/// Authorization interface
pub trait Authorization : Send + Sync {
/// Checks if authorization is valid.
fn is_authorized(&self, req: &server::Request<net::HttpStream>)-> Authorized;
}
/// HTTP Basic Authorization handler
pub struct HttpBasicAuth {
users: HashMap<String, String>,
}
/// No-authorization implementation (authorization disabled)
pub struct NoAuth;
impl Authorization for NoAuth {
fn is_authorized(&self, _req: &server::Request<net::HttpStream>)-> Authorized {
Authorized::Yes
}
}
impl Authorization for HttpBasicAuth {
fn is_authorized(&self, req: &server::Request<net::HttpStream>) -> Authorized {
let auth = self.check_auth(&req);
match auth {
Access::Denied => {
Authorized::No(Box::new(ContentHandler::error(
status::StatusCode::Unauthorized,
"Unauthorized",
"You need to provide valid credentials to access this page.",
None,
None,
)))
},
Access::AuthRequired => {
Authorized::No(Box::new(AuthRequiredHandler))
},
Access::Granted => {
Authorized::Yes
},
}
}
}
#[derive(Debug)]
enum Access {
Granted,
Denied,
AuthRequired,
}
impl HttpBasicAuth {
/// Creates `HttpBasicAuth` instance with only one user.
pub fn
|
(username: &str, password: &str) -> Self {
let mut users = HashMap::new();
users.insert(username.to_owned(), password.to_owned());
HttpBasicAuth {
users: users
}
}
fn is_authorized(&self, username: &str, password: &str) -> bool {
self.users.get(&username.to_owned()).map_or(false, |pass| pass == password)
}
fn check_auth(&self, req: &server::Request<net::HttpStream>) -> Access {
match req.headers().get::<header::Authorization<header::Basic>>() {
Some(&header::Authorization(
header::Basic { ref username, password: Some(ref password) }
)) if self.is_authorized(username, password) => Access::Granted,
Some(_) => Access::Denied,
None => Access::AuthRequired,
}
}
}
|
single_user
|
identifier_name
|
auth.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! HTTP Authorization implementations
use std::collections::HashMap;
use hyper::{server, net, header, status};
use endpoint::Handler;
use handlers::{AuthRequiredHandler, ContentHandler};
/// Authorization result
pub enum Authorized {
/// Authorization was successful.
|
Yes,
/// Unsuccessful authorization. Handler for further work is returned.
No(Box<Handler>),
}
/// Authorization interface
pub trait Authorization : Send + Sync {
/// Checks if authorization is valid.
fn is_authorized(&self, req: &server::Request<net::HttpStream>)-> Authorized;
}
/// HTTP Basic Authorization handler
pub struct HttpBasicAuth {
users: HashMap<String, String>,
}
/// No-authorization implementation (authorization disabled)
pub struct NoAuth;
impl Authorization for NoAuth {
fn is_authorized(&self, _req: &server::Request<net::HttpStream>)-> Authorized {
Authorized::Yes
}
}
impl Authorization for HttpBasicAuth {
fn is_authorized(&self, req: &server::Request<net::HttpStream>) -> Authorized {
let auth = self.check_auth(&req);
match auth {
Access::Denied => {
Authorized::No(Box::new(ContentHandler::error(
status::StatusCode::Unauthorized,
"Unauthorized",
"You need to provide valid credentials to access this page.",
None,
None,
)))
},
Access::AuthRequired => {
Authorized::No(Box::new(AuthRequiredHandler))
},
Access::Granted => {
Authorized::Yes
},
}
}
}
#[derive(Debug)]
enum Access {
Granted,
Denied,
AuthRequired,
}
impl HttpBasicAuth {
/// Creates `HttpBasicAuth` instance with only one user.
pub fn single_user(username: &str, password: &str) -> Self {
let mut users = HashMap::new();
users.insert(username.to_owned(), password.to_owned());
HttpBasicAuth {
users: users
}
}
fn is_authorized(&self, username: &str, password: &str) -> bool {
self.users.get(&username.to_owned()).map_or(false, |pass| pass == password)
}
fn check_auth(&self, req: &server::Request<net::HttpStream>) -> Access {
match req.headers().get::<header::Authorization<header::Basic>>() {
Some(&header::Authorization(
header::Basic { ref username, password: Some(ref password) }
)) if self.is_authorized(username, password) => Access::Granted,
Some(_) => Access::Denied,
None => Access::AuthRequired,
}
}
}
|
random_line_split
|
|
generate-data-key.rs
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use aws_config::meta::region::RegionProviderChain;
use aws_sdk_kms::model::DataKeySpec;
use aws_sdk_kms::{Client, Error, Region, PKG_VERSION};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// The AWS Region.
#[structopt(short, long)]
region: Option<String>,
/// The encryption key.
#[structopt(short, long)]
key: String,
/// Whether to display additional information.
#[structopt(short, long)]
verbose: bool,
}
// Create a data key.
// snippet-start:[kms.rust.generate-data-key]
async fn make_key(client: &Client, key: &str) -> Result<(), Error>
|
// snippet-end:[kms.rust.generate-data-key]
/// Creates an AWS KMS data key.
/// # Arguments
///
/// * `[-k KEY]` - The name of the key.
/// * `[-r REGION]` - The Region in which the client is created.
/// If not supplied, uses the value of the **AWS_REGION** environment variable.
/// If the environment variable is not set, defaults to **us-west-2**.
/// * `[-v]` - Whether to display additional information.
#[tokio::main]
async fn main() -> Result<(), Error> {
tracing_subscriber::fmt::init();
let Opt {
key,
region,
verbose,
} = Opt::from_args();
let region_provider = RegionProviderChain::first_try(region.map(Region::new))
.or_default_provider()
.or_else(Region::new("us-west-2"));
println!();
if verbose {
println!("KMS client version: {}", PKG_VERSION);
println!(
"Region: {}",
region_provider.region().await.unwrap().as_ref()
);
println!("Key: {}", &key);
println!();
}
let shared_config = aws_config::from_env().region(region_provider).load().await;
let client = Client::new(&shared_config);
make_key(&client, &key).await
}
|
{
let resp = client
.generate_data_key()
.key_id(key)
.key_spec(DataKeySpec::Aes256)
.send()
.await?;
// Did we get an encrypted blob?
let blob = resp.ciphertext_blob.expect("Could not get encrypted text");
let bytes = blob.as_ref();
let s = base64::encode(&bytes);
println!();
println!("Data key:");
println!("{}", s);
Ok(())
}
|
identifier_body
|
generate-data-key.rs
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use aws_config::meta::region::RegionProviderChain;
use aws_sdk_kms::model::DataKeySpec;
use aws_sdk_kms::{Client, Error, Region, PKG_VERSION};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// The AWS Region.
#[structopt(short, long)]
region: Option<String>,
/// The encryption key.
#[structopt(short, long)]
key: String,
/// Whether to display additional information.
#[structopt(short, long)]
verbose: bool,
}
// Create a data key.
// snippet-start:[kms.rust.generate-data-key]
async fn
|
(client: &Client, key: &str) -> Result<(), Error> {
let resp = client
.generate_data_key()
.key_id(key)
.key_spec(DataKeySpec::Aes256)
.send()
.await?;
// Did we get an encrypted blob?
let blob = resp.ciphertext_blob.expect("Could not get encrypted text");
let bytes = blob.as_ref();
let s = base64::encode(&bytes);
println!();
println!("Data key:");
println!("{}", s);
Ok(())
}
// snippet-end:[kms.rust.generate-data-key]
/// Creates an AWS KMS data key.
/// # Arguments
///
/// * `[-k KEY]` - The name of the key.
/// * `[-r REGION]` - The Region in which the client is created.
/// If not supplied, uses the value of the **AWS_REGION** environment variable.
/// If the environment variable is not set, defaults to **us-west-2**.
/// * `[-v]` - Whether to display additional information.
#[tokio::main]
async fn main() -> Result<(), Error> {
tracing_subscriber::fmt::init();
let Opt {
key,
region,
verbose,
} = Opt::from_args();
let region_provider = RegionProviderChain::first_try(region.map(Region::new))
.or_default_provider()
.or_else(Region::new("us-west-2"));
println!();
if verbose {
println!("KMS client version: {}", PKG_VERSION);
println!(
"Region: {}",
region_provider.region().await.unwrap().as_ref()
);
println!("Key: {}", &key);
println!();
}
let shared_config = aws_config::from_env().region(region_provider).load().await;
let client = Client::new(&shared_config);
make_key(&client, &key).await
}
|
make_key
|
identifier_name
|
generate-data-key.rs
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use aws_config::meta::region::RegionProviderChain;
use aws_sdk_kms::model::DataKeySpec;
use aws_sdk_kms::{Client, Error, Region, PKG_VERSION};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// The AWS Region.
#[structopt(short, long)]
region: Option<String>,
/// The encryption key.
#[structopt(short, long)]
key: String,
/// Whether to display additional information.
#[structopt(short, long)]
verbose: bool,
}
// Create a data key.
// snippet-start:[kms.rust.generate-data-key]
async fn make_key(client: &Client, key: &str) -> Result<(), Error> {
let resp = client
.generate_data_key()
.key_id(key)
.key_spec(DataKeySpec::Aes256)
.send()
.await?;
// Did we get an encrypted blob?
let blob = resp.ciphertext_blob.expect("Could not get encrypted text");
let bytes = blob.as_ref();
let s = base64::encode(&bytes);
println!();
println!("Data key:");
println!("{}", s);
Ok(())
}
// snippet-end:[kms.rust.generate-data-key]
/// Creates an AWS KMS data key.
/// # Arguments
///
/// * `[-k KEY]` - The name of the key.
/// * `[-r REGION]` - The Region in which the client is created.
/// If not supplied, uses the value of the **AWS_REGION** environment variable.
/// If the environment variable is not set, defaults to **us-west-2**.
/// * `[-v]` - Whether to display additional information.
#[tokio::main]
async fn main() -> Result<(), Error> {
tracing_subscriber::fmt::init();
let Opt {
key,
region,
verbose,
} = Opt::from_args();
|
println!();
if verbose {
println!("KMS client version: {}", PKG_VERSION);
println!(
"Region: {}",
region_provider.region().await.unwrap().as_ref()
);
println!("Key: {}", &key);
println!();
}
let shared_config = aws_config::from_env().region(region_provider).load().await;
let client = Client::new(&shared_config);
make_key(&client, &key).await
}
|
let region_provider = RegionProviderChain::first_try(region.map(Region::new))
.or_default_provider()
.or_else(Region::new("us-west-2"));
|
random_line_split
|
generate-data-key.rs
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use aws_config::meta::region::RegionProviderChain;
use aws_sdk_kms::model::DataKeySpec;
use aws_sdk_kms::{Client, Error, Region, PKG_VERSION};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// The AWS Region.
#[structopt(short, long)]
region: Option<String>,
/// The encryption key.
#[structopt(short, long)]
key: String,
/// Whether to display additional information.
#[structopt(short, long)]
verbose: bool,
}
// Create a data key.
// snippet-start:[kms.rust.generate-data-key]
async fn make_key(client: &Client, key: &str) -> Result<(), Error> {
let resp = client
.generate_data_key()
.key_id(key)
.key_spec(DataKeySpec::Aes256)
.send()
.await?;
// Did we get an encrypted blob?
let blob = resp.ciphertext_blob.expect("Could not get encrypted text");
let bytes = blob.as_ref();
let s = base64::encode(&bytes);
println!();
println!("Data key:");
println!("{}", s);
Ok(())
}
// snippet-end:[kms.rust.generate-data-key]
/// Creates an AWS KMS data key.
/// # Arguments
///
/// * `[-k KEY]` - The name of the key.
/// * `[-r REGION]` - The Region in which the client is created.
/// If not supplied, uses the value of the **AWS_REGION** environment variable.
/// If the environment variable is not set, defaults to **us-west-2**.
/// * `[-v]` - Whether to display additional information.
#[tokio::main]
async fn main() -> Result<(), Error> {
tracing_subscriber::fmt::init();
let Opt {
key,
region,
verbose,
} = Opt::from_args();
let region_provider = RegionProviderChain::first_try(region.map(Region::new))
.or_default_provider()
.or_else(Region::new("us-west-2"));
println!();
if verbose
|
let shared_config = aws_config::from_env().region(region_provider).load().await;
let client = Client::new(&shared_config);
make_key(&client, &key).await
}
|
{
println!("KMS client version: {}", PKG_VERSION);
println!(
"Region: {}",
region_provider.region().await.unwrap().as_ref()
);
println!("Key: {}", &key);
println!();
}
|
conditional_block
|
rk4.rs
|
use libc::c_double;
/// Given a differential function dx(x, t),
/// initial condition x0,
/// and a list of times t,
/// find x(t) at each point in t
pub fn odeint(dx: (&Fn(c_double, c_double) -> c_double),
x0: c_double, t_vec: &Vec<c_double>) -> Vec<c_double> {
// Need there to be at least two times for this method to work
assert!(t_vec.len() >= 2);
// Collect x values in this vector
let mut result = Vec::<c_double>::new();
result.push(x0);
// Need to get step size by taking the difference between
// two adjacent times
for i in 0..(t_vec.len() - 1) { // Subtracting 1 from the length isn't a typo
// This implementation is from Wikipedia
let ti = t_vec[i];
let tnext = t_vec[i+1];
let h = tnext - ti;
let xi = result.pop().unwrap();
let k1 = dx(xi, ti);
let k2 = dx(xi + h/2.0*k1, ti + h/2.0);
let k3 = dx(xi + h/2.0*k2, ti + h/2.0);
let k4 = dx(xi + h*k3, ti + h);
let xnext = xi + h/6.0*(k1 + 2.0*k2 + 2.0*k3 + k4);
result.push(xi);
result.push(xnext);
}
result
}
/// FFI version of `odeint`
#[no_mangle]
pub extern "C" fn odeint_64(dx: (&Fn(c_double, c_double) -> c_double),
x0: c_double, t_vec: *mut Vec<c_double>) -> *mut Vec<c_double> {
unsafe { unsafe_alloc_vec_f64!(odeint(dx, x0, &*t_vec)) }
}
#[cfg(test)]
mod tests_rk4 {
use super::*;
const THRESHOLD: f64 = 0.0000001;
// Test differential to give to odeint
#[allow(unused_variables)]
fn velocity_one(x: f64, t: f64) -> f64 {
1.0 // Velocity of particle is 1
}
#[allow(unused_variables)]
fn free_fall(x: f64, t: f64) -> f64 {
let g = -9.81;
g*t
}
#[test]
fn rk4_compiles() {
}
#[test]
fn test_velocity_one() {
let ref t = vec![0.0, 1.0];
let x0 = 0.0;
let mut result = odeint(&velocity_one, x0, t);
assert!((result.pop().unwrap() - 1.0).abs() < THRESHOLD);
}
#[test]
fn test_length() {
let ref t = vec![0.0, 1.0, 2.0];
let x0 = 0.0;
let result = odeint(&velocity_one, x0, t);
assert_eq!(result.len(), 3);
}
|
let mut times = Vec::<f64>::new();
let mut i = 0.0;
while i <= 10.0 {
times.push(i);
i += 0.1;
}
let x0 = 0.0;
let mut result = odeint(&free_fall, x0, ×);
let expected_value = -490.5;
//println!("printing a result vector: {:?}", result);
assert!((result.pop().unwrap() - expected_value).abs() < THRESHOLD);
}
}
|
#[test]
fn test_free_fall() {
|
random_line_split
|
rk4.rs
|
use libc::c_double;
/// Given a differential function dx(x, t),
/// initial condition x0,
/// and a list of times t,
/// find x(t) at each point in t
pub fn odeint(dx: (&Fn(c_double, c_double) -> c_double),
x0: c_double, t_vec: &Vec<c_double>) -> Vec<c_double> {
// Need there to be at least two times for this method to work
assert!(t_vec.len() >= 2);
// Collect x values in this vector
let mut result = Vec::<c_double>::new();
result.push(x0);
// Need to get step size by taking the difference between
// two adjacent times
for i in 0..(t_vec.len() - 1) { // Subtracting 1 from the length isn't a typo
// This implementation is from Wikipedia
let ti = t_vec[i];
let tnext = t_vec[i+1];
let h = tnext - ti;
let xi = result.pop().unwrap();
let k1 = dx(xi, ti);
let k2 = dx(xi + h/2.0*k1, ti + h/2.0);
let k3 = dx(xi + h/2.0*k2, ti + h/2.0);
let k4 = dx(xi + h*k3, ti + h);
let xnext = xi + h/6.0*(k1 + 2.0*k2 + 2.0*k3 + k4);
result.push(xi);
result.push(xnext);
}
result
}
/// FFI version of `odeint`
#[no_mangle]
pub extern "C" fn odeint_64(dx: (&Fn(c_double, c_double) -> c_double),
x0: c_double, t_vec: *mut Vec<c_double>) -> *mut Vec<c_double> {
unsafe { unsafe_alloc_vec_f64!(odeint(dx, x0, &*t_vec)) }
}
#[cfg(test)]
mod tests_rk4 {
use super::*;
const THRESHOLD: f64 = 0.0000001;
// Test differential to give to odeint
#[allow(unused_variables)]
fn velocity_one(x: f64, t: f64) -> f64 {
1.0 // Velocity of particle is 1
}
#[allow(unused_variables)]
fn free_fall(x: f64, t: f64) -> f64
|
#[test]
fn rk4_compiles() {
}
#[test]
fn test_velocity_one() {
let ref t = vec![0.0, 1.0];
let x0 = 0.0;
let mut result = odeint(&velocity_one, x0, t);
assert!((result.pop().unwrap() - 1.0).abs() < THRESHOLD);
}
#[test]
fn test_length() {
let ref t = vec![0.0, 1.0, 2.0];
let x0 = 0.0;
let result = odeint(&velocity_one, x0, t);
assert_eq!(result.len(), 3);
}
#[test]
fn test_free_fall() {
let mut times = Vec::<f64>::new();
let mut i = 0.0;
while i <= 10.0 {
times.push(i);
i += 0.1;
}
let x0 = 0.0;
let mut result = odeint(&free_fall, x0, ×);
let expected_value = -490.5;
//println!("printing a result vector: {:?}", result);
assert!((result.pop().unwrap() - expected_value).abs() < THRESHOLD);
}
}
|
{
let g = -9.81;
g*t
}
|
identifier_body
|
rk4.rs
|
use libc::c_double;
/// Given a differential function dx(x, t),
/// initial condition x0,
/// and a list of times t,
/// find x(t) at each point in t
pub fn odeint(dx: (&Fn(c_double, c_double) -> c_double),
x0: c_double, t_vec: &Vec<c_double>) -> Vec<c_double> {
// Need there to be at least two times for this method to work
assert!(t_vec.len() >= 2);
// Collect x values in this vector
let mut result = Vec::<c_double>::new();
result.push(x0);
// Need to get step size by taking the difference between
// two adjacent times
for i in 0..(t_vec.len() - 1) { // Subtracting 1 from the length isn't a typo
// This implementation is from Wikipedia
let ti = t_vec[i];
let tnext = t_vec[i+1];
let h = tnext - ti;
let xi = result.pop().unwrap();
let k1 = dx(xi, ti);
let k2 = dx(xi + h/2.0*k1, ti + h/2.0);
let k3 = dx(xi + h/2.0*k2, ti + h/2.0);
let k4 = dx(xi + h*k3, ti + h);
let xnext = xi + h/6.0*(k1 + 2.0*k2 + 2.0*k3 + k4);
result.push(xi);
result.push(xnext);
}
result
}
/// FFI version of `odeint`
#[no_mangle]
pub extern "C" fn
|
(dx: (&Fn(c_double, c_double) -> c_double),
x0: c_double, t_vec: *mut Vec<c_double>) -> *mut Vec<c_double> {
unsafe { unsafe_alloc_vec_f64!(odeint(dx, x0, &*t_vec)) }
}
#[cfg(test)]
mod tests_rk4 {
use super::*;
const THRESHOLD: f64 = 0.0000001;
// Test differential to give to odeint
#[allow(unused_variables)]
fn velocity_one(x: f64, t: f64) -> f64 {
1.0 // Velocity of particle is 1
}
#[allow(unused_variables)]
fn free_fall(x: f64, t: f64) -> f64 {
let g = -9.81;
g*t
}
#[test]
fn rk4_compiles() {
}
#[test]
fn test_velocity_one() {
let ref t = vec![0.0, 1.0];
let x0 = 0.0;
let mut result = odeint(&velocity_one, x0, t);
assert!((result.pop().unwrap() - 1.0).abs() < THRESHOLD);
}
#[test]
fn test_length() {
let ref t = vec![0.0, 1.0, 2.0];
let x0 = 0.0;
let result = odeint(&velocity_one, x0, t);
assert_eq!(result.len(), 3);
}
#[test]
fn test_free_fall() {
let mut times = Vec::<f64>::new();
let mut i = 0.0;
while i <= 10.0 {
times.push(i);
i += 0.1;
}
let x0 = 0.0;
let mut result = odeint(&free_fall, x0, ×);
let expected_value = -490.5;
//println!("printing a result vector: {:?}", result);
assert!((result.pop().unwrap() - expected_value).abs() < THRESHOLD);
}
}
|
odeint_64
|
identifier_name
|
photos.rs
|
use api::{Collection, Id, OwnerId, Timestamp};
request_ref! {
struct Search for ["photos.search"](v => 5.37) -> Collection<Photo> {
sized {
lat: f32 = () => {},
long: f32 = () => {},
start_time: Timestamp = () => {},
end_time: Timestamp = () => {},
sort: Sort = (Sort::Popularity) => {AsRef},
offset: usize = (0) => {},
count: usize = (30) => {},
radius: u16 = (5000) => {},
}
unsized {
q: str = ("") => {=},
}
}
}
#[cfg(feature = "unstable")]
include!("photos.rs.in");
#[cfg(not(feature = "unstable"))]
include!(concat!(env!("OUT_DIR"), "/photos.rs"));
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[repr(u8)]
pub enum Sort {
DateAdded = 0,
Popularity = 1,
}
impl AsRef<str> for Sort {
fn
|
(&self) -> &str {
use self::Sort::*;
match *self {
DateAdded => "0",
Popularity => "1",
}
}
}
|
as_ref
|
identifier_name
|
photos.rs
|
use api::{Collection, Id, OwnerId, Timestamp};
request_ref! {
struct Search for ["photos.search"](v => 5.37) -> Collection<Photo> {
sized {
lat: f32 = () => {},
long: f32 = () => {},
start_time: Timestamp = () => {},
end_time: Timestamp = () => {},
sort: Sort = (Sort::Popularity) => {AsRef},
offset: usize = (0) => {},
count: usize = (30) => {},
radius: u16 = (5000) => {},
}
unsized {
q: str = ("") => {=},
}
}
}
#[cfg(feature = "unstable")]
include!("photos.rs.in");
#[cfg(not(feature = "unstable"))]
include!(concat!(env!("OUT_DIR"), "/photos.rs"));
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[repr(u8)]
pub enum Sort {
DateAdded = 0,
Popularity = 1,
}
impl AsRef<str> for Sort {
fn as_ref(&self) -> &str {
use self::Sort::*;
match *self {
DateAdded => "0",
|
}
}
}
|
Popularity => "1",
|
random_line_split
|
opaque-template-inst-member.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct
|
{
pub _address: u8,
}
/// This should not end up deriving Debug/Hash because its `mBlah` field cannot derive
/// Debug/Hash because the instantiation's definition cannot derive Debug/Hash.
#[repr(C)]
pub struct ContainsOpaqueTemplate {
pub mBlah: [u32; 101usize],
pub mBaz: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout_ContainsOpaqueTemplate() {
assert_eq!(
::std::mem::size_of::<ContainsOpaqueTemplate>(),
408usize,
concat!("Size of: ", stringify!(ContainsOpaqueTemplate))
);
assert_eq!(
::std::mem::align_of::<ContainsOpaqueTemplate>(),
4usize,
concat!("Alignment of ", stringify!(ContainsOpaqueTemplate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<ContainsOpaqueTemplate>())).mBlah as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(ContainsOpaqueTemplate),
"::",
stringify!(mBlah)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<ContainsOpaqueTemplate>())).mBaz as *const _
as usize
},
404usize,
concat!(
"Offset of field: ",
stringify!(ContainsOpaqueTemplate),
"::",
stringify!(mBaz)
)
);
}
impl Default for ContainsOpaqueTemplate {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
impl ::std::cmp::PartialEq for ContainsOpaqueTemplate {
fn eq(&self, other: &ContainsOpaqueTemplate) -> bool {
&self.mBlah[..] == &other.mBlah[..] && self.mBaz == other.mBaz
}
}
/// This should not end up deriving Debug/Hash either, for similar reasons, although
/// we're exercising base member edges now.
#[repr(C)]
pub struct InheritsOpaqueTemplate {
pub _base: [u8; 401usize],
pub wow: *mut ::std::os::raw::c_char,
}
#[test]
fn bindgen_test_layout_InheritsOpaqueTemplate() {
assert_eq!(
::std::mem::size_of::<InheritsOpaqueTemplate>(),
416usize,
concat!("Size of: ", stringify!(InheritsOpaqueTemplate))
);
assert_eq!(
::std::mem::align_of::<InheritsOpaqueTemplate>(),
8usize,
concat!("Alignment of ", stringify!(InheritsOpaqueTemplate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<InheritsOpaqueTemplate>())).wow as *const _
as usize
},
408usize,
concat!(
"Offset of field: ",
stringify!(InheritsOpaqueTemplate),
"::",
stringify!(wow)
)
);
}
impl Default for InheritsOpaqueTemplate {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
impl ::std::cmp::PartialEq for InheritsOpaqueTemplate {
fn eq(&self, other: &InheritsOpaqueTemplate) -> bool {
&self._base[..] == &other._base[..] && self.wow == other.wow
}
}
|
OpaqueTemplate
|
identifier_name
|
opaque-template-inst-member.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct OpaqueTemplate {
pub _address: u8,
}
/// This should not end up deriving Debug/Hash because its `mBlah` field cannot derive
/// Debug/Hash because the instantiation's definition cannot derive Debug/Hash.
#[repr(C)]
pub struct ContainsOpaqueTemplate {
pub mBlah: [u32; 101usize],
pub mBaz: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout_ContainsOpaqueTemplate() {
assert_eq!(
::std::mem::size_of::<ContainsOpaqueTemplate>(),
408usize,
concat!("Size of: ", stringify!(ContainsOpaqueTemplate))
);
assert_eq!(
::std::mem::align_of::<ContainsOpaqueTemplate>(),
4usize,
concat!("Alignment of ", stringify!(ContainsOpaqueTemplate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<ContainsOpaqueTemplate>())).mBlah as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(ContainsOpaqueTemplate),
"::",
stringify!(mBlah)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<ContainsOpaqueTemplate>())).mBaz as *const _
as usize
},
404usize,
concat!(
"Offset of field: ",
stringify!(ContainsOpaqueTemplate),
"::",
stringify!(mBaz)
)
);
}
impl Default for ContainsOpaqueTemplate {
fn default() -> Self
|
}
impl ::std::cmp::PartialEq for ContainsOpaqueTemplate {
fn eq(&self, other: &ContainsOpaqueTemplate) -> bool {
&self.mBlah[..] == &other.mBlah[..] && self.mBaz == other.mBaz
}
}
/// This should not end up deriving Debug/Hash either, for similar reasons, although
/// we're exercising base member edges now.
#[repr(C)]
pub struct InheritsOpaqueTemplate {
pub _base: [u8; 401usize],
pub wow: *mut ::std::os::raw::c_char,
}
#[test]
fn bindgen_test_layout_InheritsOpaqueTemplate() {
assert_eq!(
::std::mem::size_of::<InheritsOpaqueTemplate>(),
416usize,
concat!("Size of: ", stringify!(InheritsOpaqueTemplate))
);
assert_eq!(
::std::mem::align_of::<InheritsOpaqueTemplate>(),
8usize,
concat!("Alignment of ", stringify!(InheritsOpaqueTemplate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<InheritsOpaqueTemplate>())).wow as *const _
as usize
},
408usize,
concat!(
"Offset of field: ",
stringify!(InheritsOpaqueTemplate),
"::",
stringify!(wow)
)
);
}
impl Default for InheritsOpaqueTemplate {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
impl ::std::cmp::PartialEq for InheritsOpaqueTemplate {
fn eq(&self, other: &InheritsOpaqueTemplate) -> bool {
&self._base[..] == &other._base[..] && self.wow == other.wow
}
}
|
{
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
|
identifier_body
|
opaque-template-inst-member.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)]
pub struct OpaqueTemplate {
pub _address: u8,
}
/// This should not end up deriving Debug/Hash because its `mBlah` field cannot derive
/// Debug/Hash because the instantiation's definition cannot derive Debug/Hash.
#[repr(C)]
pub struct ContainsOpaqueTemplate {
pub mBlah: [u32; 101usize],
pub mBaz: ::std::os::raw::c_int,
}
#[test]
fn bindgen_test_layout_ContainsOpaqueTemplate() {
assert_eq!(
::std::mem::size_of::<ContainsOpaqueTemplate>(),
408usize,
concat!("Size of: ", stringify!(ContainsOpaqueTemplate))
);
assert_eq!(
::std::mem::align_of::<ContainsOpaqueTemplate>(),
4usize,
concat!("Alignment of ", stringify!(ContainsOpaqueTemplate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<ContainsOpaqueTemplate>())).mBlah as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(ContainsOpaqueTemplate),
"::",
stringify!(mBlah)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<ContainsOpaqueTemplate>())).mBaz as *const _
as usize
},
404usize,
concat!(
"Offset of field: ",
stringify!(ContainsOpaqueTemplate),
"::",
stringify!(mBaz)
)
);
}
impl Default for ContainsOpaqueTemplate {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
|
&self.mBlah[..] == &other.mBlah[..] && self.mBaz == other.mBaz
}
}
/// This should not end up deriving Debug/Hash either, for similar reasons, although
/// we're exercising base member edges now.
#[repr(C)]
pub struct InheritsOpaqueTemplate {
pub _base: [u8; 401usize],
pub wow: *mut ::std::os::raw::c_char,
}
#[test]
fn bindgen_test_layout_InheritsOpaqueTemplate() {
assert_eq!(
::std::mem::size_of::<InheritsOpaqueTemplate>(),
416usize,
concat!("Size of: ", stringify!(InheritsOpaqueTemplate))
);
assert_eq!(
::std::mem::align_of::<InheritsOpaqueTemplate>(),
8usize,
concat!("Alignment of ", stringify!(InheritsOpaqueTemplate))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<InheritsOpaqueTemplate>())).wow as *const _
as usize
},
408usize,
concat!(
"Offset of field: ",
stringify!(InheritsOpaqueTemplate),
"::",
stringify!(wow)
)
);
}
impl Default for InheritsOpaqueTemplate {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
unsafe {
::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
impl ::std::cmp::PartialEq for InheritsOpaqueTemplate {
fn eq(&self, other: &InheritsOpaqueTemplate) -> bool {
&self._base[..] == &other._base[..] && self.wow == other.wow
}
}
|
impl ::std::cmp::PartialEq for ContainsOpaqueTemplate {
fn eq(&self, other: &ContainsOpaqueTemplate) -> bool {
|
random_line_split
|
float.rs
|
#[macro_use]
extern crate nom;
use nom::{IResult,digit};
|
use std::str::FromStr;
named!(unsigned_float <f32>, map_res!(
map_res!(
recognize!(
alt!(
delimited!(digit, tag!("."), opt!(complete!(digit))) |
delimited!(opt!(digit), tag!("."), digit)
)
),
str::from_utf8
),
FromStr::from_str
));
named!(float <f32>, map!(
pair!(
opt!(alt!(tag!("+") | tag!("-"))),
unsigned_float
),
|(sign, value): (Option<&[u8]>, f32)| {
sign.and_then(|s| if s[0] == ('-' as u8) { Some(-1f32) } else { None }).unwrap_or(1f32) * value
}
));
#[test]
fn unsigned_float_test() {
assert_eq!(unsigned_float(&b"123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(unsigned_float(&b"0.123"[..]), IResult::Done(&b""[..], 0.123));
assert_eq!(unsigned_float(&b"123.0"[..]), IResult::Done(&b""[..], 123.0));
assert_eq!(unsigned_float(&b"123."[..]), IResult::Done(&b""[..], 123.0));
assert_eq!(unsigned_float(&b".123"[..]), IResult::Done(&b""[..], 0.123));
}
#[test]
fn float_test() {
assert_eq!(float(&b"123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(float(&b"+123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(float(&b"-123.456"[..]), IResult::Done(&b""[..], -123.456));
}
|
use std::str;
|
random_line_split
|
float.rs
|
#[macro_use]
extern crate nom;
use nom::{IResult,digit};
use std::str;
use std::str::FromStr;
named!(unsigned_float <f32>, map_res!(
map_res!(
recognize!(
alt!(
delimited!(digit, tag!("."), opt!(complete!(digit))) |
delimited!(opt!(digit), tag!("."), digit)
)
),
str::from_utf8
),
FromStr::from_str
));
named!(float <f32>, map!(
pair!(
opt!(alt!(tag!("+") | tag!("-"))),
unsigned_float
),
|(sign, value): (Option<&[u8]>, f32)| {
sign.and_then(|s| if s[0] == ('-' as u8) { Some(-1f32) } else { None }).unwrap_or(1f32) * value
}
));
#[test]
fn
|
() {
assert_eq!(unsigned_float(&b"123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(unsigned_float(&b"0.123"[..]), IResult::Done(&b""[..], 0.123));
assert_eq!(unsigned_float(&b"123.0"[..]), IResult::Done(&b""[..], 123.0));
assert_eq!(unsigned_float(&b"123."[..]), IResult::Done(&b""[..], 123.0));
assert_eq!(unsigned_float(&b".123"[..]), IResult::Done(&b""[..], 0.123));
}
#[test]
fn float_test() {
assert_eq!(float(&b"123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(float(&b"+123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(float(&b"-123.456"[..]), IResult::Done(&b""[..], -123.456));
}
|
unsigned_float_test
|
identifier_name
|
float.rs
|
#[macro_use]
extern crate nom;
use nom::{IResult,digit};
use std::str;
use std::str::FromStr;
named!(unsigned_float <f32>, map_res!(
map_res!(
recognize!(
alt!(
delimited!(digit, tag!("."), opt!(complete!(digit))) |
delimited!(opt!(digit), tag!("."), digit)
)
),
str::from_utf8
),
FromStr::from_str
));
named!(float <f32>, map!(
pair!(
opt!(alt!(tag!("+") | tag!("-"))),
unsigned_float
),
|(sign, value): (Option<&[u8]>, f32)| {
sign.and_then(|s| if s[0] == ('-' as u8) { Some(-1f32) } else { None }).unwrap_or(1f32) * value
}
));
#[test]
fn unsigned_float_test() {
assert_eq!(unsigned_float(&b"123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(unsigned_float(&b"0.123"[..]), IResult::Done(&b""[..], 0.123));
assert_eq!(unsigned_float(&b"123.0"[..]), IResult::Done(&b""[..], 123.0));
assert_eq!(unsigned_float(&b"123."[..]), IResult::Done(&b""[..], 123.0));
assert_eq!(unsigned_float(&b".123"[..]), IResult::Done(&b""[..], 0.123));
}
#[test]
fn float_test()
|
{
assert_eq!(float(&b"123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(float(&b"+123.456"[..]), IResult::Done(&b""[..], 123.456));
assert_eq!(float(&b"-123.456"[..]), IResult::Done(&b""[..], -123.456));
}
|
identifier_body
|
|
gio_futures_await.rs
|
extern crate gio;
extern crate glib;
use gio::prelude::*;
use futures::prelude::*;
use std::str;
// Throughout our chained futures, we convert all errors to strings
// via map_err() return them directly.
async fn read_file(file: gio::File) -> Result<(), String> {
// Try to open the file.
let strm = file
.read_async_future(glib::PRIORITY_DEFAULT)
.map_err(|err| format!("Failed to open file: {}", err))
.await?;
// If opening the file succeeds, we asynchronously loop and
// read the file in up to 64 byte chunks and re-use the same
// vec for each read.
let mut buf = vec![0; 64];
let mut idx = 0;
loop {
let (b, len) = strm
.read_async_future(buf, glib::PRIORITY_DEFAULT)
.map_err(|(_buf, err)| format!("Failed to read from stream: {}", err))
.await?;
// Once 0 is returned, we know that we're done with reading, otherwise
// loop again and read another chunk.
if len == 0 {
break;
}
buf = b;
println!("line {}: {:?}", idx, str::from_utf8(&buf[0..len]).unwrap());
idx += 1;
}
// Asynchronously close the stream in the end.
let _ = strm
.close_async_future(glib::PRIORITY_DEFAULT)
.map_err(|err| format!("Failed to close stream: {}", err))
.await?;
Ok(())
}
fn main() {
let c = glib::MainContext::default();
let l = glib::MainLoop::new(Some(&c), false);
c.push_thread_default();
let file = gio::File::new_for_path("Cargo.toml");
let l_clone = l.clone();
let future = async move {
match read_file(file).await {
Ok(()) => (),
Err(err) => eprintln!("Got error: {}", err),
}
l_clone.quit();
};
c.spawn_local(future);
|
l.run();
c.pop_thread_default();
}
|
random_line_split
|
|
gio_futures_await.rs
|
extern crate gio;
extern crate glib;
use gio::prelude::*;
use futures::prelude::*;
use std::str;
// Throughout our chained futures, we convert all errors to strings
// via map_err() return them directly.
async fn read_file(file: gio::File) -> Result<(), String> {
// Try to open the file.
let strm = file
.read_async_future(glib::PRIORITY_DEFAULT)
.map_err(|err| format!("Failed to open file: {}", err))
.await?;
// If opening the file succeeds, we asynchronously loop and
// read the file in up to 64 byte chunks and re-use the same
// vec for each read.
let mut buf = vec![0; 64];
let mut idx = 0;
loop {
let (b, len) = strm
.read_async_future(buf, glib::PRIORITY_DEFAULT)
.map_err(|(_buf, err)| format!("Failed to read from stream: {}", err))
.await?;
// Once 0 is returned, we know that we're done with reading, otherwise
// loop again and read another chunk.
if len == 0 {
break;
}
buf = b;
println!("line {}: {:?}", idx, str::from_utf8(&buf[0..len]).unwrap());
idx += 1;
}
// Asynchronously close the stream in the end.
let _ = strm
.close_async_future(glib::PRIORITY_DEFAULT)
.map_err(|err| format!("Failed to close stream: {}", err))
.await?;
Ok(())
}
fn
|
() {
let c = glib::MainContext::default();
let l = glib::MainLoop::new(Some(&c), false);
c.push_thread_default();
let file = gio::File::new_for_path("Cargo.toml");
let l_clone = l.clone();
let future = async move {
match read_file(file).await {
Ok(()) => (),
Err(err) => eprintln!("Got error: {}", err),
}
l_clone.quit();
};
c.spawn_local(future);
l.run();
c.pop_thread_default();
}
|
main
|
identifier_name
|
gio_futures_await.rs
|
extern crate gio;
extern crate glib;
use gio::prelude::*;
use futures::prelude::*;
use std::str;
// Throughout our chained futures, we convert all errors to strings
// via map_err() return them directly.
async fn read_file(file: gio::File) -> Result<(), String> {
// Try to open the file.
let strm = file
.read_async_future(glib::PRIORITY_DEFAULT)
.map_err(|err| format!("Failed to open file: {}", err))
.await?;
// If opening the file succeeds, we asynchronously loop and
// read the file in up to 64 byte chunks and re-use the same
// vec for each read.
let mut buf = vec![0; 64];
let mut idx = 0;
loop {
let (b, len) = strm
.read_async_future(buf, glib::PRIORITY_DEFAULT)
.map_err(|(_buf, err)| format!("Failed to read from stream: {}", err))
.await?;
// Once 0 is returned, we know that we're done with reading, otherwise
// loop again and read another chunk.
if len == 0 {
break;
}
buf = b;
println!("line {}: {:?}", idx, str::from_utf8(&buf[0..len]).unwrap());
idx += 1;
}
// Asynchronously close the stream in the end.
let _ = strm
.close_async_future(glib::PRIORITY_DEFAULT)
.map_err(|err| format!("Failed to close stream: {}", err))
.await?;
Ok(())
}
fn main()
|
c.pop_thread_default();
}
|
{
let c = glib::MainContext::default();
let l = glib::MainLoop::new(Some(&c), false);
c.push_thread_default();
let file = gio::File::new_for_path("Cargo.toml");
let l_clone = l.clone();
let future = async move {
match read_file(file).await {
Ok(()) => (),
Err(err) => eprintln!("Got error: {}", err),
}
l_clone.quit();
};
c.spawn_local(future);
l.run();
|
identifier_body
|
pre_parse.rs
|
use futures::{Stream, StreamExt, TryFutureExt};
use juniper::{
executor::{execute_validated_query_async, get_operation, resolve_validated_subscription},
graphql_object, graphql_subscription,
parser::parse_document_source,
validation::{validate_input_values, visit_all_rules, ValidatorContext},
EmptyMutation, FieldError, OperationType, RootNode, Variables,
};
use std::pin::Pin;
pub struct Context;
impl juniper::Context for Context {}
pub type UserStream = Pin<Box<dyn Stream<Item = Result<User, FieldError>> + Send>>;
pub struct Query;
#[graphql_object(context = Context)]
impl Query {
fn users() -> Vec<User> {
vec![User]
}
}
pub struct Subscription;
#[graphql_subscription(context = Context)]
impl Subscription {
async fn
|
() -> UserStream {
Box::pin(futures::stream::iter(vec![Ok(User)]))
}
}
#[derive(Clone)]
pub struct User;
#[graphql_object(context = Context)]
impl User {
fn id() -> i32 {
1
}
}
type Schema = RootNode<'static, Query, EmptyMutation<Context>, Subscription>;
#[tokio::test]
async fn query_document_can_be_pre_parsed() {
let root_node = &Schema::new(Query, EmptyMutation::<Context>::new(), Subscription);
let document_source = r#"query { users { id } }"#;
let document = parse_document_source(document_source, &root_node.schema).unwrap();
{
let mut ctx = ValidatorContext::new(&root_node.schema, &document);
visit_all_rules(&mut ctx, &document);
let errors = ctx.into_errors();
assert!(errors.is_empty());
}
let operation = get_operation(&document, None).unwrap();
assert!(operation.item.operation_type == OperationType::Query);
let errors = validate_input_values(&juniper::Variables::new(), operation, &root_node.schema);
assert!(errors.is_empty());
let (_, errors) = execute_validated_query_async(
&document,
operation,
root_node,
&Variables::new(),
&Context {},
)
.await
.unwrap();
assert!(errors.len() == 0);
}
#[tokio::test]
async fn subscription_document_can_be_pre_parsed() {
let root_node = &Schema::new(Query, EmptyMutation::<Context>::new(), Subscription);
let document_source = r#"subscription { users { id } }"#;
let document = parse_document_source(document_source, &root_node.schema).unwrap();
let operation = get_operation(&document, None).unwrap();
assert!(operation.item.operation_type == OperationType::Subscription);
let mut stream = resolve_validated_subscription(
&document,
&operation,
&root_node,
&Variables::new(),
&Context {},
)
.map_ok(|(stream, errors)| juniper_subscriptions::Connection::from_stream(stream, errors))
.await
.unwrap();
let _ = stream.next().await.unwrap();
}
|
users
|
identifier_name
|
pre_parse.rs
|
use futures::{Stream, StreamExt, TryFutureExt};
use juniper::{
executor::{execute_validated_query_async, get_operation, resolve_validated_subscription},
graphql_object, graphql_subscription,
parser::parse_document_source,
validation::{validate_input_values, visit_all_rules, ValidatorContext},
EmptyMutation, FieldError, OperationType, RootNode, Variables,
};
use std::pin::Pin;
pub struct Context;
impl juniper::Context for Context {}
pub type UserStream = Pin<Box<dyn Stream<Item = Result<User, FieldError>> + Send>>;
pub struct Query;
#[graphql_object(context = Context)]
impl Query {
fn users() -> Vec<User> {
vec![User]
}
}
pub struct Subscription;
#[graphql_subscription(context = Context)]
impl Subscription {
async fn users() -> UserStream {
Box::pin(futures::stream::iter(vec![Ok(User)]))
}
}
#[derive(Clone)]
pub struct User;
#[graphql_object(context = Context)]
impl User {
fn id() -> i32 {
1
}
}
type Schema = RootNode<'static, Query, EmptyMutation<Context>, Subscription>;
#[tokio::test]
async fn query_document_can_be_pre_parsed() {
let root_node = &Schema::new(Query, EmptyMutation::<Context>::new(), Subscription);
let document_source = r#"query { users { id } }"#;
let document = parse_document_source(document_source, &root_node.schema).unwrap();
{
let mut ctx = ValidatorContext::new(&root_node.schema, &document);
visit_all_rules(&mut ctx, &document);
let errors = ctx.into_errors();
assert!(errors.is_empty());
}
let operation = get_operation(&document, None).unwrap();
assert!(operation.item.operation_type == OperationType::Query);
let errors = validate_input_values(&juniper::Variables::new(), operation, &root_node.schema);
assert!(errors.is_empty());
let (_, errors) = execute_validated_query_async(
&document,
operation,
root_node,
&Variables::new(),
&Context {},
)
.await
.unwrap();
assert!(errors.len() == 0);
}
#[tokio::test]
async fn subscription_document_can_be_pre_parsed() {
let root_node = &Schema::new(Query, EmptyMutation::<Context>::new(), Subscription);
let document_source = r#"subscription { users { id } }"#;
let document = parse_document_source(document_source, &root_node.schema).unwrap();
|
&document,
&operation,
&root_node,
&Variables::new(),
&Context {},
)
.map_ok(|(stream, errors)| juniper_subscriptions::Connection::from_stream(stream, errors))
.await
.unwrap();
let _ = stream.next().await.unwrap();
}
|
let operation = get_operation(&document, None).unwrap();
assert!(operation.item.operation_type == OperationType::Subscription);
let mut stream = resolve_validated_subscription(
|
random_line_split
|
thin.rs
|
use anyhow::Result;
use std::path::PathBuf;
use thinp::file_utils;
use thinp::io_engine::*;
use crate::args;
use crate::common::fixture::*;
use crate::common::process::*;
use crate::common::target::*;
use crate::common::test_dir::TestDir;
use crate::common::thin_xml_generator::{write_xml, SingleThinS};
//-----------------------------------------------
pub fn mk_valid_xml(td: &mut TestDir) -> Result<PathBuf> {
let xml = td.mk_path("meta.xml");
let mut gen = SingleThinS::new(0, 1024, 2048, 2048);
write_xml(&xml, &mut gen)?;
Ok(xml)
}
pub fn mk_valid_md(td: &mut TestDir) -> Result<PathBuf> {
let xml = td.mk_path("meta.xml");
let md = td.mk_path("meta.bin");
let mut gen = SingleThinS::new(0, 1024, 20480, 20480);
write_xml(&xml, &mut gen)?;
let _file = file_utils::create_sized_file(&md, 4096 * 4096);
run_ok(thin_restore_cmd(args!["-i", &xml, "-o", &md]))?;
Ok(md)
}
//-----------------------------------------------
// FIXME: replace mk_valid_md with this?
pub fn prep_metadata(td: &mut TestDir) -> Result<PathBuf> {
let md = mk_zeroed_md(td)?;
let args = args!["-o", &md, "--format", "--nr-data-blocks", "102400"];
run_ok(thin_generate_metadata_cmd(args))?;
// Create a 2GB device
let args = args!["-o", &md, "--create-thin", "1"];
run_ok(thin_generate_metadata_cmd(args))?;
let args = args![
"-o",
&md,
"--dev-id",
"1",
"--size",
"2097152",
"--rw=randwrite",
"--seq-nr=16"
];
run_ok(thin_generate_mappings_cmd(args))?;
// Take a few snapshots.
let mut snap_id = 2;
for _i in 0..10 {
// take a snapshot
let snap_id_str = snap_id.to_string();
let args = args!["-o", &md, "--create-snap", &snap_id_str, "--origin", "1"];
run_ok(thin_generate_metadata_cmd(args))?;
// partially overwrite the origin (64MB)
let args = args![
"-o",
&md,
"--dev-id",
"1",
"--size",
"2097152",
"--io-size",
"131072",
"--rw=randwrite",
"--seq-nr=16"
];
run_ok(thin_generate_mappings_cmd(args))?;
snap_id += 1;
}
Ok(md)
}
pub fn
|
(md: &PathBuf) -> Result<()> {
let args = args!["-o", &md, "--set-needs-check"];
run_ok(thin_generate_metadata_cmd(args))?;
Ok(())
}
pub fn generate_metadata_leaks(
md: &PathBuf,
nr_blocks: u64,
expected: u32,
actual: u32,
) -> Result<()> {
let nr_blocks_str = nr_blocks.to_string();
let expected_str = expected.to_string();
let actual_str = actual.to_string();
let args = args![
"-o",
&md,
"--create-metadata-leaks",
"--nr-blocks",
&nr_blocks_str,
"--expected",
&expected_str,
"--actual",
&actual_str
];
run_ok(thin_generate_damage_cmd(args))?;
Ok(())
}
pub fn get_needs_check(md: &PathBuf) -> Result<bool> {
use thinp::thin::superblock::*;
let engine = SyncIoEngine::new(md, 1, false)?;
let sb = read_superblock(&engine, SUPERBLOCK_LOCATION)?;
Ok(sb.flags.needs_check)
}
pub fn reserve_metadata_snap(md: &PathBuf) -> Result<()> {
let args = args!["-o", &md, "--reserve-metadata-snap"];
run_ok(thin_generate_metadata_cmd(args))?;
Ok(())
}
pub fn release_metadata_snap(md: &PathBuf) -> Result<()> {
let args = args!["-o", &md, "--release-metadata-snap"];
run_ok(thin_generate_metadata_cmd(args))?;
Ok(())
}
//-----------------------------------------------
|
set_needs_check
|
identifier_name
|
thin.rs
|
use anyhow::Result;
use std::path::PathBuf;
use thinp::file_utils;
use thinp::io_engine::*;
use crate::args;
use crate::common::fixture::*;
use crate::common::process::*;
use crate::common::target::*;
use crate::common::test_dir::TestDir;
use crate::common::thin_xml_generator::{write_xml, SingleThinS};
//-----------------------------------------------
pub fn mk_valid_xml(td: &mut TestDir) -> Result<PathBuf> {
let xml = td.mk_path("meta.xml");
let mut gen = SingleThinS::new(0, 1024, 2048, 2048);
write_xml(&xml, &mut gen)?;
Ok(xml)
}
pub fn mk_valid_md(td: &mut TestDir) -> Result<PathBuf> {
let xml = td.mk_path("meta.xml");
let md = td.mk_path("meta.bin");
let mut gen = SingleThinS::new(0, 1024, 20480, 20480);
write_xml(&xml, &mut gen)?;
let _file = file_utils::create_sized_file(&md, 4096 * 4096);
run_ok(thin_restore_cmd(args!["-i", &xml, "-o", &md]))?;
Ok(md)
}
//-----------------------------------------------
// FIXME: replace mk_valid_md with this?
pub fn prep_metadata(td: &mut TestDir) -> Result<PathBuf>
|
// Take a few snapshots.
let mut snap_id = 2;
for _i in 0..10 {
// take a snapshot
let snap_id_str = snap_id.to_string();
let args = args!["-o", &md, "--create-snap", &snap_id_str, "--origin", "1"];
run_ok(thin_generate_metadata_cmd(args))?;
// partially overwrite the origin (64MB)
let args = args![
"-o",
&md,
"--dev-id",
"1",
"--size",
"2097152",
"--io-size",
"131072",
"--rw=randwrite",
"--seq-nr=16"
];
run_ok(thin_generate_mappings_cmd(args))?;
snap_id += 1;
}
Ok(md)
}
pub fn set_needs_check(md: &PathBuf) -> Result<()> {
let args = args!["-o", &md, "--set-needs-check"];
run_ok(thin_generate_metadata_cmd(args))?;
Ok(())
}
pub fn generate_metadata_leaks(
md: &PathBuf,
nr_blocks: u64,
expected: u32,
actual: u32,
) -> Result<()> {
let nr_blocks_str = nr_blocks.to_string();
let expected_str = expected.to_string();
let actual_str = actual.to_string();
let args = args![
"-o",
&md,
"--create-metadata-leaks",
"--nr-blocks",
&nr_blocks_str,
"--expected",
&expected_str,
"--actual",
&actual_str
];
run_ok(thin_generate_damage_cmd(args))?;
Ok(())
}
pub fn get_needs_check(md: &PathBuf) -> Result<bool> {
use thinp::thin::superblock::*;
let engine = SyncIoEngine::new(md, 1, false)?;
let sb = read_superblock(&engine, SUPERBLOCK_LOCATION)?;
Ok(sb.flags.needs_check)
}
pub fn reserve_metadata_snap(md: &PathBuf) -> Result<()> {
let args = args!["-o", &md, "--reserve-metadata-snap"];
run_ok(thin_generate_metadata_cmd(args))?;
Ok(())
}
pub fn release_metadata_snap(md: &PathBuf) -> Result<()> {
let args = args!["-o", &md, "--release-metadata-snap"];
run_ok(thin_generate_metadata_cmd(args))?;
Ok(())
}
//-----------------------------------------------
|
{
let md = mk_zeroed_md(td)?;
let args = args!["-o", &md, "--format", "--nr-data-blocks", "102400"];
run_ok(thin_generate_metadata_cmd(args))?;
// Create a 2GB device
let args = args!["-o", &md, "--create-thin", "1"];
run_ok(thin_generate_metadata_cmd(args))?;
let args = args![
"-o",
&md,
"--dev-id",
"1",
"--size",
"2097152",
"--rw=randwrite",
"--seq-nr=16"
];
run_ok(thin_generate_mappings_cmd(args))?;
|
identifier_body
|
blake2b.rs
|
KEYBYTES],
key_length: u8,
last_node: u8,
digest_length: u8,
computed: bool, // whether the final digest has been computed
param: Blake2bParam
}
impl Clone for Blake2b { fn clone(&self) -> Blake2b { *self } }
#[derive(Copy, Clone)]
struct Blake2bParam {
digest_length: u8,
key_length: u8,
fanout: u8,
depth: u8,
leaf_length: u32,
node_offset: u64,
node_depth: u8,
inner_length: u8,
reserved: [u8; 14],
salt: [u8; BLAKE2B_SALTBYTES],
personal: [u8; BLAKE2B_PERSONALBYTES],
}
macro_rules! G( ($r:expr, $i:expr, $a:expr, $b:expr, $c:expr, $d:expr, $m:expr) => ({
$a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+0]]);
$d = ($d ^ $a).rotate_right(32);
$c = $c.wrapping_add($d);
$b = ($b ^ $c).rotate_right(24);
$a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+1]]);
$d = ($d ^ $a).rotate_right(16);
$c = $c.wrapping_add($d);
$b = ($b ^ $c).rotate_right(63);
}));
macro_rules! round( ($r:expr, $v:expr, $m:expr) => ( {
G!($r,0,$v[ 0],$v[ 4],$v[ 8],$v[12], $m);
G!($r,1,$v[ 1],$v[ 5],$v[ 9],$v[13], $m);
G!($r,2,$v[ 2],$v[ 6],$v[10],$v[14], $m);
G!($r,3,$v[ 3],$v[ 7],$v[11],$v[15], $m);
G!($r,4,$v[ 0],$v[ 5],$v[10],$v[15], $m);
G!($r,5,$v[ 1],$v[ 6],$v[11],$v[12], $m);
G!($r,6,$v[ 2],$v[ 7],$v[ 8],$v[13], $m);
G!($r,7,$v[ 3],$v[ 4],$v[ 9],$v[14], $m);
}
));
impl Blake2b {
fn set_lastnode(&mut self) {
self.f[1] = 0xFFFFFFFFFFFFFFFF;
}
fn set_lastblock(&mut self)
|
fn increment_counter(&mut self, inc : u64) {
self.t[0] += inc;
self.t[1] += if self.t[0] < inc { 1 } else { 0 };
}
fn init0(param: Blake2bParam, digest_length: u8, key: &[u8]) -> Blake2b {
assert!(key.len() <= BLAKE2B_KEYBYTES);
let mut b = Blake2b {
h: IV,
t: [0,0],
f: [0,0],
buf: [0; 2*BLAKE2B_BLOCKBYTES],
buflen: 0,
last_node: 0,
digest_length: digest_length,
computed: false,
key: [0; BLAKE2B_KEYBYTES],
key_length: key.len() as u8,
param: param
};
copy_memory(key, &mut b.key);
b
}
fn apply_param(&mut self) {
use std::io::Write;
use cryptoutil::WriteExt;
let mut param_bytes : [u8; 64] = [0; 64];
{
let mut writer: &mut [u8] = &mut param_bytes;
writer.write_u8(self.param.digest_length).unwrap();
writer.write_u8(self.param.key_length).unwrap();
writer.write_u8(self.param.fanout).unwrap();
writer.write_u8(self.param.depth).unwrap();
writer.write_u32_le(self.param.leaf_length).unwrap();
writer.write_u64_le(self.param.node_offset).unwrap();
writer.write_u8(self.param.node_depth).unwrap();
writer.write_u8(self.param.inner_length).unwrap();
writer.write_all(&self.param.reserved).unwrap();
writer.write_all(&self.param.salt).unwrap();
writer.write_all(&self.param.personal).unwrap();
}
let mut param_words : [u64; 8] = [0; 8];
read_u64v_le(&mut param_words, ¶m_bytes);
for (h, param_word) in self.h.iter_mut().zip(param_words.iter()) {
*h = *h ^ *param_word;
}
}
// init xors IV with input parameter block
fn init_param( p: Blake2bParam, key: &[u8] ) -> Blake2b {
let mut b = Blake2b::init0(p, p.digest_length, key);
b.apply_param();
b
}
fn default_param(outlen: u8) -> Blake2bParam {
Blake2bParam {
digest_length: outlen,
key_length: 0,
fanout: 1,
depth: 1,
leaf_length: 0,
node_offset: 0,
node_depth: 0,
inner_length: 0,
reserved: [0; 14],
salt: [0; BLAKE2B_SALTBYTES],
personal: [0; BLAKE2B_PERSONALBYTES],
}
}
pub fn new(outlen: usize) -> Blake2b {
assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES);
Blake2b::init_param(Blake2b::default_param(outlen as u8), &[])
}
fn apply_key(&mut self) {
let mut block : [u8; BLAKE2B_BLOCKBYTES] = [0; BLAKE2B_BLOCKBYTES];
copy_memory(&self.key[..self.key_length as usize], &mut block);
self.update(&block);
secure_memset(&mut block[..], 0);
}
pub fn new_keyed(outlen: usize, key: &[u8] ) -> Blake2b {
assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES);
assert!(key.len() > 0 && key.len() <= BLAKE2B_KEYBYTES);
let param = Blake2bParam {
digest_length: outlen as u8,
key_length: key.len() as u8,
fanout: 1,
depth: 1,
leaf_length: 0,
node_offset: 0,
node_depth: 0,
inner_length: 0,
reserved: [0; 14],
salt: [0; BLAKE2B_SALTBYTES],
personal: [0; BLAKE2B_PERSONALBYTES],
};
let mut b = Blake2b::init_param(param, key);
b.apply_key();
b
}
fn compress(&mut self) {
let mut ms: [u64; 16] = [0; 16];
let mut vs: [u64; 16] = [0; 16];
read_u64v_le(&mut ms, &self.buf[0..BLAKE2B_BLOCKBYTES]);
for (v, h) in vs.iter_mut().zip(self.h.iter()) {
*v = *h;
}
vs[ 8] = IV[0];
vs[ 9] = IV[1];
vs[10] = IV[2];
vs[11] = IV[3];
vs[12] = self.t[0] ^ IV[4];
vs[13] = self.t[1] ^ IV[5];
vs[14] = self.f[0] ^ IV[6];
vs[15] = self.f[1] ^ IV[7];
round!( 0, vs, ms );
round!( 1, vs, ms );
round!( 2, vs, ms );
round!( 3, vs, ms );
round!( 4, vs, ms );
round!( 5, vs, ms );
round!( 6, vs, ms );
round!( 7, vs, ms );
round!( 8, vs, ms );
round!( 9, vs, ms );
round!( 10, vs, ms );
round!( 11, vs, ms );
for (h_elem, (v_low, v_high)) in self.h.iter_mut().zip( vs[0..8].iter().zip(vs[8..16].iter()) ) {
*h_elem = *h_elem ^ *v_low ^ *v_high;
}
}
fn update( &mut self, mut input: &[u8] ) {
while input.len() > 0 {
let left = self.buflen;
let fill = 2 * BLAKE2B_BLOCKBYTES - left;
if input.len() > fill {
copy_memory(&input[0..fill], &mut self.buf[left..]); // Fill buffer
self.buflen += fill;
self.increment_counter( BLAKE2B_BLOCKBYTES as u64);
self.compress();
let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES);
let first_half = halves.next().unwrap();
let second_half = halves.next().unwrap();
copy_memory(second_half, first_half);
self.buflen -= BLAKE2B_BLOCKBYTES;
input = &input[fill..input.len()];
} else { // inlen <= fill
copy_memory(input, &mut self.buf[left..]);
self.buflen += input.len();
break;
}
}
}
fn finalize( &mut self, out: &mut [u8] ) {
assert!(out.len() == self.digest_length as usize);
if!self.computed {
if self.buflen > BLAKE2B_BLOCKBYTES {
self.increment_counter(BLAKE2B_BLOCKBYTES as u64);
self.compress();
self.buflen -= BLAKE2B_BLOCKBYTES;
let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES);
let first_half = halves.next().unwrap();
let second_half = halves.next().unwrap();
copy_memory(second_half, first_half);
}
let incby = self.buflen as u64;
self.increment_counter(incby);
self.set_lastblock();
for b in self.buf[self.buflen..].iter_mut() {
*b = 0;
}
self.compress();
write_u64v_le(&mut self.buf[0..64], &self.h);
self.computed = true;
}
let outlen = out.len();
copy_memory(&self.buf[0..outlen], out);
}
pub fn reset(&mut self) {
for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) {
*h_elem = *iv_elem;
}
for t_elem in self.t.iter_mut() {
*t_elem = 0;
}
for f_elem in self.f.iter_mut() {
*f_elem = 0;
}
for b in self.buf.iter_mut() {
*b = 0;
}
self.buflen = 0;
self.last_node = 0;
self.computed = false;
self.apply_param();
if self.key_length > 0 {
self.apply_key();
}
}
pub fn blake2b(out: &mut[u8], input: &[u8], key: &[u8]) {
let mut hasher : Blake2b = if key.len() > 0 { Blake2b::new_keyed(out.len(), key) } else { Blake2b::new(out.len()) };
hasher.update(input);
hasher.finalize(out);
}
}
impl Digest for Blake2b {
fn reset(&mut self) { Blake2b::reset(self); }
fn input(&mut self, msg: &[u8]) { self.update(msg); }
fn result(&mut self, out: &mut [u8]) { self.finalize(out); }
fn output_bits(&self) -> usize { 8 * (self.digest_length as usize) }
fn block_size(&self) -> usize { 8 * BLAKE2B_BLOCKBYTES }
}
impl Mac for Blake2b {
/**
* Process input data.
*
* # Arguments
* * data - The input data to process.
*
*/
fn input(&mut self, data: &[u8]) {
self.update(data);
}
/**
* Reset the Mac state to begin processing another input stream.
*/
fn reset(&mut self) {
Blake2b::reset(self);
}
/**
* Obtain the result of a Mac computation as a MacResult.
*/
fn result(&mut self) -> MacResult {
let mut mac: Vec<u8> = repeat(0).take(self.digest_length as usize).collect();
self.raw_result(&mut mac);
MacResult::new_from_owned(mac)
}
/**
* Obtain the result of a Mac computation as [u8]. This method should be used very carefully
* since incorrect use of the Mac code could result in permitting a timing attack which defeats
* the security provided by a Mac function.
*/
fn raw_result(&mut self, output: &mut [u8]) {
self.finalize(output);
}
/**
* Get the size of the Mac code, in bytes.
*/
fn output_bytes(&self) -> usize { self.digest_length as usize }
}
#[cfg(test)]
mod digest_tests {
//use cryptoutil::test::test_digest_1million_random;
use blake2b::Blake2b;
use digest::Digest;
use serialize::hex::FromHex;
struct Test {
input: Vec<u8>,
output: Vec<u8>,
key: Option<Vec<u8>>,
}
fn test_hash(tests: &[Test]) {
for t in tests {
let mut sh = match t.key {
Some(ref key) => Blake2b::new_keyed(64, &key),
None => Blake2b::new(64)
};
// Test that it works when accepting the message all at once
sh.input(&t.input[..]);
let mut out = [0u8; 64];
sh.result(&mut out);
assert!(&out[..] == &t.output[..]);
sh.reset();
// Test that it works when accepting the message in pieces
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input(&t.input[len - left..take + len - left]);
left -= take;
}
let mut out = [0u8; 64];
sh.result(&mut out);
assert!(&out[..] == &t.output[..]);
sh.reset();
}
}
#[test]
fn test_blake2b_digest() {
let tests = vec![
// Examples from wikipedia
Test {
input: vec![],
output: "786a02f742015903c6c6fd852552d272\
912f4740e15847618a86e217f71f5419\
d25e1031afee585313896444934eb04b\
903a685b1448b755d56f701afe9be2ce".from_hex().unwrap(),
key: None
},
Test {
input: "The quick brown fox jumps over the lazy dog".as_bytes().to_vec(),
output: "a8add4bdddfd93e4877d2746e62817b1\
16364a1fa7bc148d95090bc7333b3673\
f82401cf7aa2e4cb1ecd90296e3f14cb\
5413f8ed77be73045b13914cdcd6a918".from_hex().unwrap(),
key: None
},
// from: https://github.com/BLAKE2/BLAKE2/blob/master/testvectors/blake2b-test.txt
Test {
input: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0
|
{
if self.last_node!=0 {
self.set_lastnode();
}
self.f[0] = 0xFFFFFFFFFFFFFFFF;
}
|
identifier_body
|
blake2b.rs
|
KEYBYTES],
key_length: u8,
last_node: u8,
digest_length: u8,
computed: bool, // whether the final digest has been computed
param: Blake2bParam
}
impl Clone for Blake2b { fn clone(&self) -> Blake2b { *self } }
#[derive(Copy, Clone)]
struct Blake2bParam {
digest_length: u8,
key_length: u8,
fanout: u8,
depth: u8,
leaf_length: u32,
node_offset: u64,
node_depth: u8,
inner_length: u8,
reserved: [u8; 14],
salt: [u8; BLAKE2B_SALTBYTES],
personal: [u8; BLAKE2B_PERSONALBYTES],
}
macro_rules! G( ($r:expr, $i:expr, $a:expr, $b:expr, $c:expr, $d:expr, $m:expr) => ({
$a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+0]]);
$d = ($d ^ $a).rotate_right(32);
$c = $c.wrapping_add($d);
$b = ($b ^ $c).rotate_right(24);
$a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+1]]);
$d = ($d ^ $a).rotate_right(16);
$c = $c.wrapping_add($d);
$b = ($b ^ $c).rotate_right(63);
}));
macro_rules! round( ($r:expr, $v:expr, $m:expr) => ( {
G!($r,0,$v[ 0],$v[ 4],$v[ 8],$v[12], $m);
G!($r,1,$v[ 1],$v[ 5],$v[ 9],$v[13], $m);
G!($r,2,$v[ 2],$v[ 6],$v[10],$v[14], $m);
G!($r,3,$v[ 3],$v[ 7],$v[11],$v[15], $m);
G!($r,4,$v[ 0],$v[ 5],$v[10],$v[15], $m);
G!($r,5,$v[ 1],$v[ 6],$v[11],$v[12], $m);
G!($r,6,$v[ 2],$v[ 7],$v[ 8],$v[13], $m);
G!($r,7,$v[ 3],$v[ 4],$v[ 9],$v[14], $m);
}
));
impl Blake2b {
fn set_lastnode(&mut self) {
self.f[1] = 0xFFFFFFFFFFFFFFFF;
}
fn set_lastblock(&mut self) {
if self.last_node!=0 {
self.set_lastnode();
}
self.f[0] = 0xFFFFFFFFFFFFFFFF;
}
fn increment_counter(&mut self, inc : u64) {
self.t[0] += inc;
self.t[1] += if self.t[0] < inc { 1 } else { 0 };
}
fn init0(param: Blake2bParam, digest_length: u8, key: &[u8]) -> Blake2b {
assert!(key.len() <= BLAKE2B_KEYBYTES);
let mut b = Blake2b {
h: IV,
t: [0,0],
f: [0,0],
buf: [0; 2*BLAKE2B_BLOCKBYTES],
buflen: 0,
last_node: 0,
digest_length: digest_length,
computed: false,
key: [0; BLAKE2B_KEYBYTES],
key_length: key.len() as u8,
param: param
};
copy_memory(key, &mut b.key);
b
}
fn apply_param(&mut self) {
use std::io::Write;
use cryptoutil::WriteExt;
let mut param_bytes : [u8; 64] = [0; 64];
{
let mut writer: &mut [u8] = &mut param_bytes;
writer.write_u8(self.param.digest_length).unwrap();
writer.write_u8(self.param.key_length).unwrap();
writer.write_u8(self.param.fanout).unwrap();
writer.write_u8(self.param.depth).unwrap();
writer.write_u32_le(self.param.leaf_length).unwrap();
writer.write_u64_le(self.param.node_offset).unwrap();
writer.write_u8(self.param.node_depth).unwrap();
writer.write_u8(self.param.inner_length).unwrap();
writer.write_all(&self.param.reserved).unwrap();
writer.write_all(&self.param.salt).unwrap();
writer.write_all(&self.param.personal).unwrap();
}
let mut param_words : [u64; 8] = [0; 8];
read_u64v_le(&mut param_words, ¶m_bytes);
for (h, param_word) in self.h.iter_mut().zip(param_words.iter()) {
*h = *h ^ *param_word;
}
}
// init xors IV with input parameter block
fn init_param( p: Blake2bParam, key: &[u8] ) -> Blake2b {
let mut b = Blake2b::init0(p, p.digest_length, key);
b.apply_param();
b
}
fn default_param(outlen: u8) -> Blake2bParam {
Blake2bParam {
digest_length: outlen,
key_length: 0,
fanout: 1,
depth: 1,
leaf_length: 0,
node_offset: 0,
node_depth: 0,
inner_length: 0,
reserved: [0; 14],
salt: [0; BLAKE2B_SALTBYTES],
personal: [0; BLAKE2B_PERSONALBYTES],
}
}
pub fn
|
(outlen: usize) -> Blake2b {
assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES);
Blake2b::init_param(Blake2b::default_param(outlen as u8), &[])
}
fn apply_key(&mut self) {
let mut block : [u8; BLAKE2B_BLOCKBYTES] = [0; BLAKE2B_BLOCKBYTES];
copy_memory(&self.key[..self.key_length as usize], &mut block);
self.update(&block);
secure_memset(&mut block[..], 0);
}
pub fn new_keyed(outlen: usize, key: &[u8] ) -> Blake2b {
assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES);
assert!(key.len() > 0 && key.len() <= BLAKE2B_KEYBYTES);
let param = Blake2bParam {
digest_length: outlen as u8,
key_length: key.len() as u8,
fanout: 1,
depth: 1,
leaf_length: 0,
node_offset: 0,
node_depth: 0,
inner_length: 0,
reserved: [0; 14],
salt: [0; BLAKE2B_SALTBYTES],
personal: [0; BLAKE2B_PERSONALBYTES],
};
let mut b = Blake2b::init_param(param, key);
b.apply_key();
b
}
fn compress(&mut self) {
let mut ms: [u64; 16] = [0; 16];
let mut vs: [u64; 16] = [0; 16];
read_u64v_le(&mut ms, &self.buf[0..BLAKE2B_BLOCKBYTES]);
for (v, h) in vs.iter_mut().zip(self.h.iter()) {
*v = *h;
}
vs[ 8] = IV[0];
vs[ 9] = IV[1];
vs[10] = IV[2];
vs[11] = IV[3];
vs[12] = self.t[0] ^ IV[4];
vs[13] = self.t[1] ^ IV[5];
vs[14] = self.f[0] ^ IV[6];
vs[15] = self.f[1] ^ IV[7];
round!( 0, vs, ms );
round!( 1, vs, ms );
round!( 2, vs, ms );
round!( 3, vs, ms );
round!( 4, vs, ms );
round!( 5, vs, ms );
round!( 6, vs, ms );
round!( 7, vs, ms );
round!( 8, vs, ms );
round!( 9, vs, ms );
round!( 10, vs, ms );
round!( 11, vs, ms );
for (h_elem, (v_low, v_high)) in self.h.iter_mut().zip( vs[0..8].iter().zip(vs[8..16].iter()) ) {
*h_elem = *h_elem ^ *v_low ^ *v_high;
}
}
fn update( &mut self, mut input: &[u8] ) {
while input.len() > 0 {
let left = self.buflen;
let fill = 2 * BLAKE2B_BLOCKBYTES - left;
if input.len() > fill {
copy_memory(&input[0..fill], &mut self.buf[left..]); // Fill buffer
self.buflen += fill;
self.increment_counter( BLAKE2B_BLOCKBYTES as u64);
self.compress();
let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES);
let first_half = halves.next().unwrap();
let second_half = halves.next().unwrap();
copy_memory(second_half, first_half);
self.buflen -= BLAKE2B_BLOCKBYTES;
input = &input[fill..input.len()];
} else { // inlen <= fill
copy_memory(input, &mut self.buf[left..]);
self.buflen += input.len();
break;
}
}
}
fn finalize( &mut self, out: &mut [u8] ) {
assert!(out.len() == self.digest_length as usize);
if!self.computed {
if self.buflen > BLAKE2B_BLOCKBYTES {
self.increment_counter(BLAKE2B_BLOCKBYTES as u64);
self.compress();
self.buflen -= BLAKE2B_BLOCKBYTES;
let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES);
let first_half = halves.next().unwrap();
let second_half = halves.next().unwrap();
copy_memory(second_half, first_half);
}
let incby = self.buflen as u64;
self.increment_counter(incby);
self.set_lastblock();
for b in self.buf[self.buflen..].iter_mut() {
*b = 0;
}
self.compress();
write_u64v_le(&mut self.buf[0..64], &self.h);
self.computed = true;
}
let outlen = out.len();
copy_memory(&self.buf[0..outlen], out);
}
pub fn reset(&mut self) {
for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) {
*h_elem = *iv_elem;
}
for t_elem in self.t.iter_mut() {
*t_elem = 0;
}
for f_elem in self.f.iter_mut() {
*f_elem = 0;
}
for b in self.buf.iter_mut() {
*b = 0;
}
self.buflen = 0;
self.last_node = 0;
self.computed = false;
self.apply_param();
if self.key_length > 0 {
self.apply_key();
}
}
pub fn blake2b(out: &mut[u8], input: &[u8], key: &[u8]) {
let mut hasher : Blake2b = if key.len() > 0 { Blake2b::new_keyed(out.len(), key) } else { Blake2b::new(out.len()) };
hasher.update(input);
hasher.finalize(out);
}
}
impl Digest for Blake2b {
fn reset(&mut self) { Blake2b::reset(self); }
fn input(&mut self, msg: &[u8]) { self.update(msg); }
fn result(&mut self, out: &mut [u8]) { self.finalize(out); }
fn output_bits(&self) -> usize { 8 * (self.digest_length as usize) }
fn block_size(&self) -> usize { 8 * BLAKE2B_BLOCKBYTES }
}
impl Mac for Blake2b {
/**
* Process input data.
*
* # Arguments
* * data - The input data to process.
*
*/
fn input(&mut self, data: &[u8]) {
self.update(data);
}
/**
* Reset the Mac state to begin processing another input stream.
*/
fn reset(&mut self) {
Blake2b::reset(self);
}
/**
* Obtain the result of a Mac computation as a MacResult.
*/
fn result(&mut self) -> MacResult {
let mut mac: Vec<u8> = repeat(0).take(self.digest_length as usize).collect();
self.raw_result(&mut mac);
MacResult::new_from_owned(mac)
}
/**
* Obtain the result of a Mac computation as [u8]. This method should be used very carefully
* since incorrect use of the Mac code could result in permitting a timing attack which defeats
* the security provided by a Mac function.
*/
fn raw_result(&mut self, output: &mut [u8]) {
self.finalize(output);
}
/**
* Get the size of the Mac code, in bytes.
*/
fn output_bytes(&self) -> usize { self.digest_length as usize }
}
#[cfg(test)]
mod digest_tests {
//use cryptoutil::test::test_digest_1million_random;
use blake2b::Blake2b;
use digest::Digest;
use serialize::hex::FromHex;
struct Test {
input: Vec<u8>,
output: Vec<u8>,
key: Option<Vec<u8>>,
}
fn test_hash(tests: &[Test]) {
for t in tests {
let mut sh = match t.key {
Some(ref key) => Blake2b::new_keyed(64, &key),
None => Blake2b::new(64)
};
// Test that it works when accepting the message all at once
sh.input(&t.input[..]);
let mut out = [0u8; 64];
sh.result(&mut out);
assert!(&out[..] == &t.output[..]);
sh.reset();
// Test that it works when accepting the message in pieces
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input(&t.input[len - left..take + len - left]);
left -= take;
}
let mut out = [0u8; 64];
sh.result(&mut out);
assert!(&out[..] == &t.output[..]);
sh.reset();
}
}
#[test]
fn test_blake2b_digest() {
let tests = vec![
// Examples from wikipedia
Test {
input: vec![],
output: "786a02f742015903c6c6fd852552d272\
912f4740e15847618a86e217f71f5419\
d25e1031afee585313896444934eb04b\
903a685b1448b755d56f701afe9be2ce".from_hex().unwrap(),
key: None
},
Test {
input: "The quick brown fox jumps over the lazy dog".as_bytes().to_vec(),
output: "a8add4bdddfd93e4877d2746e62817b1\
16364a1fa7bc148d95090bc7333b3673\
f82401cf7aa2e4cb1ecd90296e3f14cb\
5413f8ed77be73045b13914cdcd6a918".from_hex().unwrap(),
key: None
},
// from: https://github.com/BLAKE2/BLAKE2/blob/master/testvectors/blake2b-test.txt
Test {
input: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0
|
new
|
identifier_name
|
blake2b.rs
|
use std::io::Write;
use cryptoutil::WriteExt;
let mut param_bytes : [u8; 64] = [0; 64];
{
let mut writer: &mut [u8] = &mut param_bytes;
writer.write_u8(self.param.digest_length).unwrap();
writer.write_u8(self.param.key_length).unwrap();
writer.write_u8(self.param.fanout).unwrap();
writer.write_u8(self.param.depth).unwrap();
writer.write_u32_le(self.param.leaf_length).unwrap();
writer.write_u64_le(self.param.node_offset).unwrap();
writer.write_u8(self.param.node_depth).unwrap();
writer.write_u8(self.param.inner_length).unwrap();
writer.write_all(&self.param.reserved).unwrap();
writer.write_all(&self.param.salt).unwrap();
writer.write_all(&self.param.personal).unwrap();
}
let mut param_words : [u64; 8] = [0; 8];
read_u64v_le(&mut param_words, ¶m_bytes);
for (h, param_word) in self.h.iter_mut().zip(param_words.iter()) {
*h = *h ^ *param_word;
}
}
// init xors IV with input parameter block
fn init_param( p: Blake2bParam, key: &[u8] ) -> Blake2b {
let mut b = Blake2b::init0(p, p.digest_length, key);
b.apply_param();
b
}
fn default_param(outlen: u8) -> Blake2bParam {
Blake2bParam {
digest_length: outlen,
key_length: 0,
fanout: 1,
depth: 1,
leaf_length: 0,
node_offset: 0,
node_depth: 0,
inner_length: 0,
reserved: [0; 14],
salt: [0; BLAKE2B_SALTBYTES],
personal: [0; BLAKE2B_PERSONALBYTES],
}
}
pub fn new(outlen: usize) -> Blake2b {
assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES);
Blake2b::init_param(Blake2b::default_param(outlen as u8), &[])
}
fn apply_key(&mut self) {
let mut block : [u8; BLAKE2B_BLOCKBYTES] = [0; BLAKE2B_BLOCKBYTES];
copy_memory(&self.key[..self.key_length as usize], &mut block);
self.update(&block);
secure_memset(&mut block[..], 0);
}
pub fn new_keyed(outlen: usize, key: &[u8] ) -> Blake2b {
assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES);
assert!(key.len() > 0 && key.len() <= BLAKE2B_KEYBYTES);
let param = Blake2bParam {
digest_length: outlen as u8,
key_length: key.len() as u8,
fanout: 1,
depth: 1,
leaf_length: 0,
node_offset: 0,
node_depth: 0,
inner_length: 0,
reserved: [0; 14],
salt: [0; BLAKE2B_SALTBYTES],
personal: [0; BLAKE2B_PERSONALBYTES],
};
let mut b = Blake2b::init_param(param, key);
b.apply_key();
b
}
fn compress(&mut self) {
let mut ms: [u64; 16] = [0; 16];
let mut vs: [u64; 16] = [0; 16];
read_u64v_le(&mut ms, &self.buf[0..BLAKE2B_BLOCKBYTES]);
for (v, h) in vs.iter_mut().zip(self.h.iter()) {
*v = *h;
}
vs[ 8] = IV[0];
vs[ 9] = IV[1];
vs[10] = IV[2];
vs[11] = IV[3];
vs[12] = self.t[0] ^ IV[4];
vs[13] = self.t[1] ^ IV[5];
vs[14] = self.f[0] ^ IV[6];
vs[15] = self.f[1] ^ IV[7];
round!( 0, vs, ms );
round!( 1, vs, ms );
round!( 2, vs, ms );
round!( 3, vs, ms );
round!( 4, vs, ms );
round!( 5, vs, ms );
round!( 6, vs, ms );
round!( 7, vs, ms );
round!( 8, vs, ms );
round!( 9, vs, ms );
round!( 10, vs, ms );
round!( 11, vs, ms );
for (h_elem, (v_low, v_high)) in self.h.iter_mut().zip( vs[0..8].iter().zip(vs[8..16].iter()) ) {
*h_elem = *h_elem ^ *v_low ^ *v_high;
}
}
fn update( &mut self, mut input: &[u8] ) {
while input.len() > 0 {
let left = self.buflen;
let fill = 2 * BLAKE2B_BLOCKBYTES - left;
if input.len() > fill {
copy_memory(&input[0..fill], &mut self.buf[left..]); // Fill buffer
self.buflen += fill;
self.increment_counter( BLAKE2B_BLOCKBYTES as u64);
self.compress();
let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES);
let first_half = halves.next().unwrap();
let second_half = halves.next().unwrap();
copy_memory(second_half, first_half);
self.buflen -= BLAKE2B_BLOCKBYTES;
input = &input[fill..input.len()];
} else { // inlen <= fill
copy_memory(input, &mut self.buf[left..]);
self.buflen += input.len();
break;
}
}
}
fn finalize( &mut self, out: &mut [u8] ) {
assert!(out.len() == self.digest_length as usize);
if!self.computed {
if self.buflen > BLAKE2B_BLOCKBYTES {
self.increment_counter(BLAKE2B_BLOCKBYTES as u64);
self.compress();
self.buflen -= BLAKE2B_BLOCKBYTES;
let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES);
let first_half = halves.next().unwrap();
let second_half = halves.next().unwrap();
copy_memory(second_half, first_half);
}
let incby = self.buflen as u64;
self.increment_counter(incby);
self.set_lastblock();
for b in self.buf[self.buflen..].iter_mut() {
*b = 0;
}
self.compress();
write_u64v_le(&mut self.buf[0..64], &self.h);
self.computed = true;
}
let outlen = out.len();
copy_memory(&self.buf[0..outlen], out);
}
pub fn reset(&mut self) {
for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) {
*h_elem = *iv_elem;
}
for t_elem in self.t.iter_mut() {
*t_elem = 0;
}
for f_elem in self.f.iter_mut() {
*f_elem = 0;
}
for b in self.buf.iter_mut() {
*b = 0;
}
self.buflen = 0;
self.last_node = 0;
self.computed = false;
self.apply_param();
if self.key_length > 0 {
self.apply_key();
}
}
pub fn blake2b(out: &mut[u8], input: &[u8], key: &[u8]) {
let mut hasher : Blake2b = if key.len() > 0 { Blake2b::new_keyed(out.len(), key) } else { Blake2b::new(out.len()) };
hasher.update(input);
hasher.finalize(out);
}
}
impl Digest for Blake2b {
fn reset(&mut self) { Blake2b::reset(self); }
fn input(&mut self, msg: &[u8]) { self.update(msg); }
fn result(&mut self, out: &mut [u8]) { self.finalize(out); }
fn output_bits(&self) -> usize { 8 * (self.digest_length as usize) }
fn block_size(&self) -> usize { 8 * BLAKE2B_BLOCKBYTES }
}
impl Mac for Blake2b {
/**
* Process input data.
*
* # Arguments
* * data - The input data to process.
*
*/
fn input(&mut self, data: &[u8]) {
self.update(data);
}
/**
* Reset the Mac state to begin processing another input stream.
*/
fn reset(&mut self) {
Blake2b::reset(self);
}
/**
* Obtain the result of a Mac computation as a MacResult.
*/
fn result(&mut self) -> MacResult {
let mut mac: Vec<u8> = repeat(0).take(self.digest_length as usize).collect();
self.raw_result(&mut mac);
MacResult::new_from_owned(mac)
}
/**
* Obtain the result of a Mac computation as [u8]. This method should be used very carefully
* since incorrect use of the Mac code could result in permitting a timing attack which defeats
* the security provided by a Mac function.
*/
fn raw_result(&mut self, output: &mut [u8]) {
self.finalize(output);
}
/**
* Get the size of the Mac code, in bytes.
*/
fn output_bytes(&self) -> usize { self.digest_length as usize }
}
#[cfg(test)]
mod digest_tests {
//use cryptoutil::test::test_digest_1million_random;
use blake2b::Blake2b;
use digest::Digest;
use serialize::hex::FromHex;
struct Test {
input: Vec<u8>,
output: Vec<u8>,
key: Option<Vec<u8>>,
}
fn test_hash(tests: &[Test]) {
for t in tests {
let mut sh = match t.key {
Some(ref key) => Blake2b::new_keyed(64, &key),
None => Blake2b::new(64)
};
// Test that it works when accepting the message all at once
sh.input(&t.input[..]);
let mut out = [0u8; 64];
sh.result(&mut out);
assert!(&out[..] == &t.output[..]);
sh.reset();
// Test that it works when accepting the message in pieces
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input(&t.input[len - left..take + len - left]);
left -= take;
}
let mut out = [0u8; 64];
sh.result(&mut out);
assert!(&out[..] == &t.output[..]);
sh.reset();
}
}
#[test]
fn test_blake2b_digest() {
let tests = vec![
// Examples from wikipedia
Test {
input: vec![],
output: "786a02f742015903c6c6fd852552d272\
912f4740e15847618a86e217f71f5419\
d25e1031afee585313896444934eb04b\
903a685b1448b755d56f701afe9be2ce".from_hex().unwrap(),
key: None
},
Test {
input: "The quick brown fox jumps over the lazy dog".as_bytes().to_vec(),
output: "a8add4bdddfd93e4877d2746e62817b1\
16364a1fa7bc148d95090bc7333b3673\
f82401cf7aa2e4cb1ecd90296e3f14cb\
5413f8ed77be73045b13914cdcd6a918".from_hex().unwrap(),
key: None
},
// from: https://github.com/BLAKE2/BLAKE2/blob/master/testvectors/blake2b-test.txt
Test {
input: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb,
0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3,
0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb,
0xfc, 0xfd, 0xfe],
output: vec![0x14, 0x27, 0x09, 0xd6, 0x2e, 0x28, 0xfc, 0xcc, 0xd0, 0xaf, 0x97,
0xfa, 0xd0, 0xf8, 0x46, 0x5b, 0x97, 0x1e, 0x82, 0x20, 0x1d, 0xc5,
0x10, 0x70, 0xfa, 0xa0, 0x37, 0x2a, 0xa4, 0x3e, 0x92, 0x48, 0x4b,
0xe1, 0xc1, 0xe7, 0x3b, 0xa1, 0x09, 0x06, 0xd5, 0xd1, 0x85, 0x3d,
0xb6, 0xa4, 0x10, 0x6e, 0x0a, 0x7b, 0xf9, 0x80, 0x0d, 0x37, 0x3d,
0x6d, 0xee, 0x2d, 0x46, 0xd6, 0x2e, 0xf2, 0xa4, 0x61],
key: Some(vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a,
0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36,
0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f])
},
];
test_hash(&tests[..]);
}
}
#[cfg(test)]
|
mod mac_tests {
use blake2b::Blake2b;
|
random_line_split
|
|
blake2b.rs
|
KEYBYTES],
key_length: u8,
last_node: u8,
digest_length: u8,
computed: bool, // whether the final digest has been computed
param: Blake2bParam
}
impl Clone for Blake2b { fn clone(&self) -> Blake2b { *self } }
#[derive(Copy, Clone)]
struct Blake2bParam {
digest_length: u8,
key_length: u8,
fanout: u8,
depth: u8,
leaf_length: u32,
node_offset: u64,
node_depth: u8,
inner_length: u8,
reserved: [u8; 14],
salt: [u8; BLAKE2B_SALTBYTES],
personal: [u8; BLAKE2B_PERSONALBYTES],
}
macro_rules! G( ($r:expr, $i:expr, $a:expr, $b:expr, $c:expr, $d:expr, $m:expr) => ({
$a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+0]]);
$d = ($d ^ $a).rotate_right(32);
$c = $c.wrapping_add($d);
$b = ($b ^ $c).rotate_right(24);
$a = $a.wrapping_add($b).wrapping_add($m[SIGMA[$r][2*$i+1]]);
$d = ($d ^ $a).rotate_right(16);
$c = $c.wrapping_add($d);
$b = ($b ^ $c).rotate_right(63);
}));
macro_rules! round( ($r:expr, $v:expr, $m:expr) => ( {
G!($r,0,$v[ 0],$v[ 4],$v[ 8],$v[12], $m);
G!($r,1,$v[ 1],$v[ 5],$v[ 9],$v[13], $m);
G!($r,2,$v[ 2],$v[ 6],$v[10],$v[14], $m);
G!($r,3,$v[ 3],$v[ 7],$v[11],$v[15], $m);
G!($r,4,$v[ 0],$v[ 5],$v[10],$v[15], $m);
G!($r,5,$v[ 1],$v[ 6],$v[11],$v[12], $m);
G!($r,6,$v[ 2],$v[ 7],$v[ 8],$v[13], $m);
G!($r,7,$v[ 3],$v[ 4],$v[ 9],$v[14], $m);
}
));
impl Blake2b {
fn set_lastnode(&mut self) {
self.f[1] = 0xFFFFFFFFFFFFFFFF;
}
fn set_lastblock(&mut self) {
if self.last_node!=0
|
self.f[0] = 0xFFFFFFFFFFFFFFFF;
}
fn increment_counter(&mut self, inc : u64) {
self.t[0] += inc;
self.t[1] += if self.t[0] < inc { 1 } else { 0 };
}
fn init0(param: Blake2bParam, digest_length: u8, key: &[u8]) -> Blake2b {
assert!(key.len() <= BLAKE2B_KEYBYTES);
let mut b = Blake2b {
h: IV,
t: [0,0],
f: [0,0],
buf: [0; 2*BLAKE2B_BLOCKBYTES],
buflen: 0,
last_node: 0,
digest_length: digest_length,
computed: false,
key: [0; BLAKE2B_KEYBYTES],
key_length: key.len() as u8,
param: param
};
copy_memory(key, &mut b.key);
b
}
fn apply_param(&mut self) {
use std::io::Write;
use cryptoutil::WriteExt;
let mut param_bytes : [u8; 64] = [0; 64];
{
let mut writer: &mut [u8] = &mut param_bytes;
writer.write_u8(self.param.digest_length).unwrap();
writer.write_u8(self.param.key_length).unwrap();
writer.write_u8(self.param.fanout).unwrap();
writer.write_u8(self.param.depth).unwrap();
writer.write_u32_le(self.param.leaf_length).unwrap();
writer.write_u64_le(self.param.node_offset).unwrap();
writer.write_u8(self.param.node_depth).unwrap();
writer.write_u8(self.param.inner_length).unwrap();
writer.write_all(&self.param.reserved).unwrap();
writer.write_all(&self.param.salt).unwrap();
writer.write_all(&self.param.personal).unwrap();
}
let mut param_words : [u64; 8] = [0; 8];
read_u64v_le(&mut param_words, ¶m_bytes);
for (h, param_word) in self.h.iter_mut().zip(param_words.iter()) {
*h = *h ^ *param_word;
}
}
// init xors IV with input parameter block
fn init_param( p: Blake2bParam, key: &[u8] ) -> Blake2b {
let mut b = Blake2b::init0(p, p.digest_length, key);
b.apply_param();
b
}
fn default_param(outlen: u8) -> Blake2bParam {
Blake2bParam {
digest_length: outlen,
key_length: 0,
fanout: 1,
depth: 1,
leaf_length: 0,
node_offset: 0,
node_depth: 0,
inner_length: 0,
reserved: [0; 14],
salt: [0; BLAKE2B_SALTBYTES],
personal: [0; BLAKE2B_PERSONALBYTES],
}
}
pub fn new(outlen: usize) -> Blake2b {
assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES);
Blake2b::init_param(Blake2b::default_param(outlen as u8), &[])
}
fn apply_key(&mut self) {
let mut block : [u8; BLAKE2B_BLOCKBYTES] = [0; BLAKE2B_BLOCKBYTES];
copy_memory(&self.key[..self.key_length as usize], &mut block);
self.update(&block);
secure_memset(&mut block[..], 0);
}
pub fn new_keyed(outlen: usize, key: &[u8] ) -> Blake2b {
assert!(outlen > 0 && outlen <= BLAKE2B_OUTBYTES);
assert!(key.len() > 0 && key.len() <= BLAKE2B_KEYBYTES);
let param = Blake2bParam {
digest_length: outlen as u8,
key_length: key.len() as u8,
fanout: 1,
depth: 1,
leaf_length: 0,
node_offset: 0,
node_depth: 0,
inner_length: 0,
reserved: [0; 14],
salt: [0; BLAKE2B_SALTBYTES],
personal: [0; BLAKE2B_PERSONALBYTES],
};
let mut b = Blake2b::init_param(param, key);
b.apply_key();
b
}
fn compress(&mut self) {
let mut ms: [u64; 16] = [0; 16];
let mut vs: [u64; 16] = [0; 16];
read_u64v_le(&mut ms, &self.buf[0..BLAKE2B_BLOCKBYTES]);
for (v, h) in vs.iter_mut().zip(self.h.iter()) {
*v = *h;
}
vs[ 8] = IV[0];
vs[ 9] = IV[1];
vs[10] = IV[2];
vs[11] = IV[3];
vs[12] = self.t[0] ^ IV[4];
vs[13] = self.t[1] ^ IV[5];
vs[14] = self.f[0] ^ IV[6];
vs[15] = self.f[1] ^ IV[7];
round!( 0, vs, ms );
round!( 1, vs, ms );
round!( 2, vs, ms );
round!( 3, vs, ms );
round!( 4, vs, ms );
round!( 5, vs, ms );
round!( 6, vs, ms );
round!( 7, vs, ms );
round!( 8, vs, ms );
round!( 9, vs, ms );
round!( 10, vs, ms );
round!( 11, vs, ms );
for (h_elem, (v_low, v_high)) in self.h.iter_mut().zip( vs[0..8].iter().zip(vs[8..16].iter()) ) {
*h_elem = *h_elem ^ *v_low ^ *v_high;
}
}
fn update( &mut self, mut input: &[u8] ) {
while input.len() > 0 {
let left = self.buflen;
let fill = 2 * BLAKE2B_BLOCKBYTES - left;
if input.len() > fill {
copy_memory(&input[0..fill], &mut self.buf[left..]); // Fill buffer
self.buflen += fill;
self.increment_counter( BLAKE2B_BLOCKBYTES as u64);
self.compress();
let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES);
let first_half = halves.next().unwrap();
let second_half = halves.next().unwrap();
copy_memory(second_half, first_half);
self.buflen -= BLAKE2B_BLOCKBYTES;
input = &input[fill..input.len()];
} else { // inlen <= fill
copy_memory(input, &mut self.buf[left..]);
self.buflen += input.len();
break;
}
}
}
fn finalize( &mut self, out: &mut [u8] ) {
assert!(out.len() == self.digest_length as usize);
if!self.computed {
if self.buflen > BLAKE2B_BLOCKBYTES {
self.increment_counter(BLAKE2B_BLOCKBYTES as u64);
self.compress();
self.buflen -= BLAKE2B_BLOCKBYTES;
let mut halves = self.buf.chunks_mut(BLAKE2B_BLOCKBYTES);
let first_half = halves.next().unwrap();
let second_half = halves.next().unwrap();
copy_memory(second_half, first_half);
}
let incby = self.buflen as u64;
self.increment_counter(incby);
self.set_lastblock();
for b in self.buf[self.buflen..].iter_mut() {
*b = 0;
}
self.compress();
write_u64v_le(&mut self.buf[0..64], &self.h);
self.computed = true;
}
let outlen = out.len();
copy_memory(&self.buf[0..outlen], out);
}
pub fn reset(&mut self) {
for (h_elem, iv_elem) in self.h.iter_mut().zip(IV.iter()) {
*h_elem = *iv_elem;
}
for t_elem in self.t.iter_mut() {
*t_elem = 0;
}
for f_elem in self.f.iter_mut() {
*f_elem = 0;
}
for b in self.buf.iter_mut() {
*b = 0;
}
self.buflen = 0;
self.last_node = 0;
self.computed = false;
self.apply_param();
if self.key_length > 0 {
self.apply_key();
}
}
pub fn blake2b(out: &mut[u8], input: &[u8], key: &[u8]) {
let mut hasher : Blake2b = if key.len() > 0 { Blake2b::new_keyed(out.len(), key) } else { Blake2b::new(out.len()) };
hasher.update(input);
hasher.finalize(out);
}
}
impl Digest for Blake2b {
fn reset(&mut self) { Blake2b::reset(self); }
fn input(&mut self, msg: &[u8]) { self.update(msg); }
fn result(&mut self, out: &mut [u8]) { self.finalize(out); }
fn output_bits(&self) -> usize { 8 * (self.digest_length as usize) }
fn block_size(&self) -> usize { 8 * BLAKE2B_BLOCKBYTES }
}
impl Mac for Blake2b {
/**
* Process input data.
*
* # Arguments
* * data - The input data to process.
*
*/
fn input(&mut self, data: &[u8]) {
self.update(data);
}
/**
* Reset the Mac state to begin processing another input stream.
*/
fn reset(&mut self) {
Blake2b::reset(self);
}
/**
* Obtain the result of a Mac computation as a MacResult.
*/
fn result(&mut self) -> MacResult {
let mut mac: Vec<u8> = repeat(0).take(self.digest_length as usize).collect();
self.raw_result(&mut mac);
MacResult::new_from_owned(mac)
}
/**
* Obtain the result of a Mac computation as [u8]. This method should be used very carefully
* since incorrect use of the Mac code could result in permitting a timing attack which defeats
* the security provided by a Mac function.
*/
fn raw_result(&mut self, output: &mut [u8]) {
self.finalize(output);
}
/**
* Get the size of the Mac code, in bytes.
*/
fn output_bytes(&self) -> usize { self.digest_length as usize }
}
#[cfg(test)]
mod digest_tests {
//use cryptoutil::test::test_digest_1million_random;
use blake2b::Blake2b;
use digest::Digest;
use serialize::hex::FromHex;
struct Test {
input: Vec<u8>,
output: Vec<u8>,
key: Option<Vec<u8>>,
}
fn test_hash(tests: &[Test]) {
for t in tests {
let mut sh = match t.key {
Some(ref key) => Blake2b::new_keyed(64, &key),
None => Blake2b::new(64)
};
// Test that it works when accepting the message all at once
sh.input(&t.input[..]);
let mut out = [0u8; 64];
sh.result(&mut out);
assert!(&out[..] == &t.output[..]);
sh.reset();
// Test that it works when accepting the message in pieces
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input(&t.input[len - left..take + len - left]);
left -= take;
}
let mut out = [0u8; 64];
sh.result(&mut out);
assert!(&out[..] == &t.output[..]);
sh.reset();
}
}
#[test]
fn test_blake2b_digest() {
let tests = vec![
// Examples from wikipedia
Test {
input: vec![],
output: "786a02f742015903c6c6fd852552d272\
912f4740e15847618a86e217f71f5419\
d25e1031afee585313896444934eb04b\
903a685b1448b755d56f701afe9be2ce".from_hex().unwrap(),
key: None
},
Test {
input: "The quick brown fox jumps over the lazy dog".as_bytes().to_vec(),
output: "a8add4bdddfd93e4877d2746e62817b1\
16364a1fa7bc148d95090bc7333b3673\
f82401cf7aa2e4cb1ecd90296e3f14cb\
5413f8ed77be73045b13914cdcd6a918".from_hex().unwrap(),
key: None
},
// from: https://github.com/BLAKE2/BLAKE2/blob/master/testvectors/blake2b-test.txt
Test {
input: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0
|
{
self.set_lastnode();
}
|
conditional_block
|
gameboy.rs
|
// This file is part of Mooneye GB.
// Copyright (C) 2014-2020 Joonas Javanainen <[email protected]>
//
// Mooneye GB is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Mooneye GB is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Mooneye GB. If not, see <http://www.gnu.org/licenses/>.
pub type HiramData = [u8; HIRAM_SIZE];
pub type ScreenBuffer = [Color; SCREEN_PIXELS];
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(u8)]
pub enum Color {
Off = 0,
Light = 1,
Dark = 2,
On = 3,
}
impl Color {
#[inline]
|
3 => On,
_ => Off,
}
}
}
impl Into<u8> for Color {
fn into(self) -> u8 {
match self {
Color::Off => 0,
Color::Light => 1,
Color::Dark => 2,
Color::On => 3,
}
}
}
pub const CPU_SPEED_HZ: usize = 4_194_304;
pub const HIRAM_SIZE: usize = 0x80;
pub const HIRAM_EMPTY: HiramData = [0; HIRAM_SIZE];
pub const ROM_BANK_SIZE: usize = 0x4000;
pub const RAM_BANK_SIZE: usize = 0x2000;
pub const SCREEN_WIDTH: usize = 160;
pub const SCREEN_HEIGHT: usize = 144;
pub const SCREEN_PIXELS: usize = SCREEN_WIDTH * SCREEN_HEIGHT;
pub const SCREEN_EMPTY: ScreenBuffer = [Color::Off; SCREEN_PIXELS];
|
pub fn from_u8(value: u8) -> Color {
use self::Color::*;
match value {
1 => Light,
2 => Dark,
|
random_line_split
|
gameboy.rs
|
// This file is part of Mooneye GB.
// Copyright (C) 2014-2020 Joonas Javanainen <[email protected]>
//
// Mooneye GB is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Mooneye GB is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Mooneye GB. If not, see <http://www.gnu.org/licenses/>.
pub type HiramData = [u8; HIRAM_SIZE];
pub type ScreenBuffer = [Color; SCREEN_PIXELS];
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(u8)]
pub enum Color {
Off = 0,
Light = 1,
Dark = 2,
On = 3,
}
impl Color {
#[inline]
pub fn from_u8(value: u8) -> Color {
use self::Color::*;
match value {
1 => Light,
2 => Dark,
3 => On,
_ => Off,
}
}
}
impl Into<u8> for Color {
fn into(self) -> u8
|
}
pub const CPU_SPEED_HZ: usize = 4_194_304;
pub const HIRAM_SIZE: usize = 0x80;
pub const HIRAM_EMPTY: HiramData = [0; HIRAM_SIZE];
pub const ROM_BANK_SIZE: usize = 0x4000;
pub const RAM_BANK_SIZE: usize = 0x2000;
pub const SCREEN_WIDTH: usize = 160;
pub const SCREEN_HEIGHT: usize = 144;
pub const SCREEN_PIXELS: usize = SCREEN_WIDTH * SCREEN_HEIGHT;
pub const SCREEN_EMPTY: ScreenBuffer = [Color::Off; SCREEN_PIXELS];
|
{
match self {
Color::Off => 0,
Color::Light => 1,
Color::Dark => 2,
Color::On => 3,
}
}
|
identifier_body
|
gameboy.rs
|
// This file is part of Mooneye GB.
// Copyright (C) 2014-2020 Joonas Javanainen <[email protected]>
//
// Mooneye GB is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Mooneye GB is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Mooneye GB. If not, see <http://www.gnu.org/licenses/>.
pub type HiramData = [u8; HIRAM_SIZE];
pub type ScreenBuffer = [Color; SCREEN_PIXELS];
#[derive(Debug, PartialEq, Clone, Copy)]
#[repr(u8)]
pub enum
|
{
Off = 0,
Light = 1,
Dark = 2,
On = 3,
}
impl Color {
#[inline]
pub fn from_u8(value: u8) -> Color {
use self::Color::*;
match value {
1 => Light,
2 => Dark,
3 => On,
_ => Off,
}
}
}
impl Into<u8> for Color {
fn into(self) -> u8 {
match self {
Color::Off => 0,
Color::Light => 1,
Color::Dark => 2,
Color::On => 3,
}
}
}
pub const CPU_SPEED_HZ: usize = 4_194_304;
pub const HIRAM_SIZE: usize = 0x80;
pub const HIRAM_EMPTY: HiramData = [0; HIRAM_SIZE];
pub const ROM_BANK_SIZE: usize = 0x4000;
pub const RAM_BANK_SIZE: usize = 0x2000;
pub const SCREEN_WIDTH: usize = 160;
pub const SCREEN_HEIGHT: usize = 144;
pub const SCREEN_PIXELS: usize = SCREEN_WIDTH * SCREEN_HEIGHT;
pub const SCREEN_EMPTY: ScreenBuffer = [Color::Off; SCREEN_PIXELS];
|
Color
|
identifier_name
|
thunk.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Because this module is temporary...
#![allow(missing_docs)]
#![unstable(feature = "std_misc")]
use alloc::boxed::Box;
use core::marker::Send;
use core::ops::FnOnce;
pub struct Thunk<A=(),R=()> {
invoke: Box<Invoke<A,R>+Send>
}
impl<R> Thunk<(),R> {
pub fn new<F>(func: F) -> Thunk<(),R>
where F : FnOnce() -> R, F : Send
{
Thunk::with_arg(move|()| func())
}
}
impl<A,R> Thunk<A,R> {
pub fn with_arg<F>(func: F) -> Thunk<A,R>
where F : FnOnce(A) -> R, F : Send
{
Thunk {
invoke: box func
}
|
}
pub fn invoke(self, arg: A) -> R {
self.invoke.invoke(arg)
}
}
pub trait Invoke<A=(),R=()> {
fn invoke(self: Box<Self>, arg: A) -> R;
}
impl<A,R,F> Invoke<A,R> for F
where F : FnOnce(A) -> R
{
fn invoke(self: Box<F>, arg: A) -> R {
let f = *self;
f(arg)
}
}
|
random_line_split
|
|
thunk.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Because this module is temporary...
#![allow(missing_docs)]
#![unstable(feature = "std_misc")]
use alloc::boxed::Box;
use core::marker::Send;
use core::ops::FnOnce;
pub struct Thunk<A=(),R=()> {
invoke: Box<Invoke<A,R>+Send>
}
impl<R> Thunk<(),R> {
pub fn new<F>(func: F) -> Thunk<(),R>
where F : FnOnce() -> R, F : Send
{
Thunk::with_arg(move|()| func())
}
}
impl<A,R> Thunk<A,R> {
pub fn with_arg<F>(func: F) -> Thunk<A,R>
where F : FnOnce(A) -> R, F : Send
{
Thunk {
invoke: box func
}
}
pub fn
|
(self, arg: A) -> R {
self.invoke.invoke(arg)
}
}
pub trait Invoke<A=(),R=()> {
fn invoke(self: Box<Self>, arg: A) -> R;
}
impl<A,R,F> Invoke<A,R> for F
where F : FnOnce(A) -> R
{
fn invoke(self: Box<F>, arg: A) -> R {
let f = *self;
f(arg)
}
}
|
invoke
|
identifier_name
|
thunk.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Because this module is temporary...
#![allow(missing_docs)]
#![unstable(feature = "std_misc")]
use alloc::boxed::Box;
use core::marker::Send;
use core::ops::FnOnce;
pub struct Thunk<A=(),R=()> {
invoke: Box<Invoke<A,R>+Send>
}
impl<R> Thunk<(),R> {
pub fn new<F>(func: F) -> Thunk<(),R>
where F : FnOnce() -> R, F : Send
|
}
impl<A,R> Thunk<A,R> {
pub fn with_arg<F>(func: F) -> Thunk<A,R>
where F : FnOnce(A) -> R, F : Send
{
Thunk {
invoke: box func
}
}
pub fn invoke(self, arg: A) -> R {
self.invoke.invoke(arg)
}
}
pub trait Invoke<A=(),R=()> {
fn invoke(self: Box<Self>, arg: A) -> R;
}
impl<A,R,F> Invoke<A,R> for F
where F : FnOnce(A) -> R
{
fn invoke(self: Box<F>, arg: A) -> R {
let f = *self;
f(arg)
}
}
|
{
Thunk::with_arg(move|()| func())
}
|
identifier_body
|
regionmanip.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// #![warn(deprecated_mode)]
pub use self::WfConstraint::*;
use middle::infer::GenericKind;
use middle::subst::{ParamSpace, Subst, Substs};
use middle::ty::{self, Ty};
use middle::ty_fold::{TypeFolder};
use syntax::ast;
use util::ppaux::Repr;
// Helper functions related to manipulating region types.
pub enum WfConstraint<'tcx> {
RegionSubRegionConstraint(Option<Ty<'tcx>>, ty::Region, ty::Region),
RegionSubGenericConstraint(Option<Ty<'tcx>>, ty::Region, GenericKind<'tcx>),
}
struct Wf<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
stack: Vec<(ty::Region, Option<Ty<'tcx>>)>,
out: Vec<WfConstraint<'tcx>>,
}
/// This routine computes the well-formedness constraints that must hold for the type `ty` to
/// appear in a context with lifetime `outer_region`
pub fn region_wf_constraints<'tcx>(
tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
outer_region: ty::Region)
-> Vec<WfConstraint<'tcx>>
{
let mut stack = Vec::new();
stack.push((outer_region, None));
let mut wf = Wf { tcx: tcx,
stack: stack,
out: Vec::new() };
wf.accumulate_from_ty(ty);
wf.out
}
impl<'a, 'tcx> Wf<'a, 'tcx> {
fn accumulate_from_ty(&mut self, ty: Ty<'tcx>) {
debug!("Wf::accumulate_from_ty(ty={})",
ty.repr(self.tcx));
match ty.sty {
ty::ty_bool |
ty::ty_char |
ty::ty_int(..) |
ty::ty_uint(..) |
ty::ty_float(..) |
ty::ty_bare_fn(..) |
ty::ty_err |
ty::ty_str => {
// No borrowed content reachable here.
}
ty::ty_unboxed_closure(_, region, _) => {
// An "unboxed closure type" is basically
// modeled here as equivalent to a struct like
//
// struct TheClosure<'b> {
// ...
// }
//
// where the `'b` is the lifetime bound of the
// contents (i.e., all contents must outlive 'b).
//
// Even though unboxed closures are glorified structs
// of upvars, we do not need to consider them as they
// can't generate any new constraints. The
// substitutions on the closure are equal to the free
// substitutions of the enclosing parameter
// environment. An upvar captured by value has the
// same type as the original local variable which is
// already checked for consistency. If the upvar is
// captured by reference it must also outlive the
// region bound on the closure, but this is explicitly
// handled by logic in regionck.
self.push_region_constraint_from_top(*region);
}
ty::ty_trait(ref t) => {
let required_region_bounds =
ty::object_region_bounds(self.tcx, Some(&t.principal), t.bounds.builtin_bounds);
self.accumulate_from_object_ty(ty, t.bounds.region_bound, required_region_bounds)
}
ty::ty_enum(def_id, substs) |
ty::ty_struct(def_id, substs) => {
let item_scheme = ty::lookup_item_type(self.tcx, def_id);
self.accumulate_from_adt(ty, def_id, &item_scheme.generics, substs)
}
ty::ty_vec(t, _) |
ty::ty_ptr(ty::mt { ty: t,.. }) |
ty::ty_uniq(t) => {
self.accumulate_from_ty(t)
}
ty::ty_rptr(r_b, mt) => {
self.accumulate_from_rptr(ty, *r_b, mt.ty);
}
ty::ty_param(p) => {
self.push_param_constraint_from_top(p);
}
ty::ty_projection(ref data) => {
// `<T as TraitRef<..>>::Name`
self.push_projection_constraint_from_top(data);
// this seems like a minimal requirement:
let trait_def = ty::lookup_trait_def(self.tcx, data.trait_ref.def_id);
self.accumulate_from_adt(ty, data.trait_ref.def_id,
&trait_def.generics, data.trait_ref.substs)
}
ty::ty_tup(ref tuptys) => {
for &tupty in tuptys.iter() {
self.accumulate_from_ty(tupty);
}
}
ty::ty_infer(_) => {
// This should not happen, BUT:
//
// Currently we uncover region relationships on
// entering the fn check. We should do this after
// the fn check, then we can call this case a bug().
}
ty::ty_open(_) => {
self.tcx.sess.bug(
&format!("Unexpected type encountered while doing wf check: {}",
ty.repr(self.tcx))[]);
}
}
}
fn accumulate_from_rptr(&mut self,
ty: Ty<'tcx>,
r_b: ty::Region,
ty_b: Ty<'tcx>) {
// We are walking down a type like this, and current
// position is indicated by caret:
//
// &'a &'b ty_b
// ^
//
// At this point, top of stack will be `'a`. We must
// require that `'a <= 'b`.
self.push_region_constraint_from_top(r_b);
// Now we push `'b` onto the stack, because it must
// constrain any borrowed content we find within `T`.
self.stack.push((r_b, Some(ty)));
self.accumulate_from_ty(ty_b);
self.stack.pop().unwrap();
}
/// Pushes a constraint that `r_b` must outlive the top region on the stack.
fn push_region_constraint_from_top(&mut self,
r_b: ty::Region)
|
/// Pushes a constraint that `r_a <= r_b`, due to `opt_ty`
fn push_sub_region_constraint(&mut self,
opt_ty: Option<Ty<'tcx>>,
r_a: ty::Region,
r_b: ty::Region) {
self.out.push(RegionSubRegionConstraint(opt_ty, r_a, r_b));
}
/// Pushes a constraint that `param_ty` must outlive the top region on the stack.
fn push_param_constraint_from_top(&mut self,
param_ty: ty::ParamTy) {
let &(region, opt_ty) = self.stack.last().unwrap();
self.push_param_constraint(region, opt_ty, param_ty);
}
/// Pushes a constraint that `projection_ty` must outlive the top region on the stack.
fn push_projection_constraint_from_top(&mut self,
projection_ty: &ty::ProjectionTy<'tcx>) {
let &(region, opt_ty) = self.stack.last().unwrap();
self.out.push(RegionSubGenericConstraint(
opt_ty, region, GenericKind::Projection(projection_ty.clone())));
}
/// Pushes a constraint that `region <= param_ty`, due to `opt_ty`
fn push_param_constraint(&mut self,
region: ty::Region,
opt_ty: Option<Ty<'tcx>>,
param_ty: ty::ParamTy) {
self.out.push(RegionSubGenericConstraint(
opt_ty, region, GenericKind::Param(param_ty)));
}
fn accumulate_from_adt(&mut self,
ty: Ty<'tcx>,
def_id: ast::DefId,
generics: &ty::Generics<'tcx>,
substs: &Substs<'tcx>)
{
// The generic declarations from the type, appropriately
// substituted for the actual substitutions.
let generics = generics.subst(self.tcx, substs);
// Variance of each type/region parameter.
let variances = ty::item_variances(self.tcx, def_id);
for &space in ParamSpace::all().iter() {
let region_params = substs.regions().get_slice(space);
let region_variances = variances.regions.get_slice(space);
let region_param_defs = generics.regions.get_slice(space);
assert_eq!(region_params.len(), region_variances.len());
for (®ion_param, (®ion_variance, region_param_def)) in
region_params.iter().zip(
region_variances.iter().zip(
region_param_defs.iter()))
{
match region_variance {
ty::Covariant | ty::Bivariant => {
// Ignore covariant or bivariant region
// parameters. To understand why, consider a
// struct `Foo<'a>`. If `Foo` contains any
// references with lifetime `'a`, then `'a` must
// be at least contravariant (and possibly
// invariant). The only way to have a covariant
// result is if `Foo` contains only a field with a
// type like `fn() -> &'a T`; i.e., a bare
// function that can produce a reference of
// lifetime `'a`. In this case, there is no
// *actual data* with lifetime `'a` that is
// reachable. (Presumably this bare function is
// really returning static data.)
}
ty::Contravariant | ty::Invariant => {
// If the parameter is contravariant or
// invariant, there may indeed be reachable
// data with this lifetime. See other case for
// more details.
self.push_region_constraint_from_top(region_param);
}
}
for ®ion_bound in region_param_def.bounds.iter() {
// The type declared a constraint like
//
// 'b : 'a
//
// which means that `'a <= 'b` (after
// substitution). So take the region we
// substituted for `'a` (`region_bound`) and make
// it a subregion of the region we substituted
// `'b` (`region_param`).
self.push_sub_region_constraint(
Some(ty), region_bound, region_param);
}
}
let types = substs.types.get_slice(space);
let type_variances = variances.types.get_slice(space);
let type_param_defs = generics.types.get_slice(space);
assert_eq!(types.len(), type_variances.len());
for (&type_param_ty, (&variance, type_param_def)) in
types.iter().zip(
type_variances.iter().zip(
type_param_defs.iter()))
{
debug!("type_param_ty={} variance={}",
type_param_ty.repr(self.tcx),
variance.repr(self.tcx));
match variance {
ty::Contravariant | ty::Bivariant => {
// As above, except that in this it is a
// *contravariant* reference that indices that no
// actual data of type T is reachable.
}
ty::Covariant | ty::Invariant => {
self.accumulate_from_ty(type_param_ty);
}
}
// Inspect bounds on this type parameter for any
// region bounds.
for &r in type_param_def.bounds.region_bounds.iter() {
self.stack.push((r, Some(ty)));
self.accumulate_from_ty(type_param_ty);
self.stack.pop().unwrap();
}
}
}
}
fn accumulate_from_object_ty(&mut self,
ty: Ty<'tcx>,
region_bound: ty::Region,
required_region_bounds: Vec<ty::Region>)
{
// Imagine a type like this:
//
// trait Foo { }
// trait Bar<'c> : 'c { }
//
// &'b (Foo+'c+Bar<'d>)
// ^
//
// In this case, the following relationships must hold:
//
// 'b <= 'c
// 'd <= 'c
//
// The first conditions is due to the normal region pointer
// rules, which say that a reference cannot outlive its
// referent.
//
// The final condition may be a bit surprising. In particular,
// you may expect that it would have been `'c <= 'd`, since
// usually lifetimes of outer things are conservative
// approximations for inner things. However, it works somewhat
// differently with trait objects: here the idea is that if the
// user specifies a region bound (`'c`, in this case) it is the
// "master bound" that *implies* that bounds from other traits are
// all met. (Remember that *all bounds* in a type like
// `Foo+Bar+Zed` must be met, not just one, hence if we write
// `Foo<'x>+Bar<'y>`, we know that the type outlives *both* 'x and
// 'y.)
//
// Note: in fact we only permit builtin traits, not `Bar<'d>`, I
// am looking forward to the future here.
// The content of this object type must outlive
// `bounds.region_bound`:
let r_c = region_bound;
self.push_region_constraint_from_top(r_c);
// And then, in turn, to be well-formed, the
// `region_bound` that user specified must imply the
// region bounds required from all of the trait types:
for &r_d in required_region_bounds.iter() {
// Each of these is an instance of the `'c <= 'b`
// constraint above
self.out.push(RegionSubRegionConstraint(Some(ty), r_d, r_c));
}
}
}
impl<'tcx> Repr<'tcx> for WfConstraint<'tcx> {
fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String {
match *self {
RegionSubRegionConstraint(_, ref r_a, ref r_b) => {
format!("RegionSubRegionConstraint({}, {})",
r_a.repr(tcx),
r_b.repr(tcx))
}
RegionSubGenericConstraint(_, ref r, ref p) => {
format!("RegionSubGenericConstraint({}, {})",
r.repr(tcx),
p.repr(tcx))
}
}
}
}
|
{
// Indicates that we have found borrowed content with a lifetime
// of at least `r_b`. This adds a constraint that `r_b` must
// outlive the region `r_a` on top of the stack.
//
// As an example, imagine walking a type like:
//
// &'a &'b T
// ^
//
// when we hit the inner pointer (indicated by caret), `'a` will
// be on top of stack and `'b` will be the lifetime of the content
// we just found. So we add constraint that `'a <= 'b`.
let &(r_a, opt_ty) = self.stack.last().unwrap();
self.push_sub_region_constraint(opt_ty, r_a, r_b);
}
|
identifier_body
|
regionmanip.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// #![warn(deprecated_mode)]
pub use self::WfConstraint::*;
use middle::infer::GenericKind;
use middle::subst::{ParamSpace, Subst, Substs};
use middle::ty::{self, Ty};
use middle::ty_fold::{TypeFolder};
use syntax::ast;
use util::ppaux::Repr;
// Helper functions related to manipulating region types.
pub enum WfConstraint<'tcx> {
RegionSubRegionConstraint(Option<Ty<'tcx>>, ty::Region, ty::Region),
RegionSubGenericConstraint(Option<Ty<'tcx>>, ty::Region, GenericKind<'tcx>),
}
struct Wf<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
stack: Vec<(ty::Region, Option<Ty<'tcx>>)>,
out: Vec<WfConstraint<'tcx>>,
}
/// This routine computes the well-formedness constraints that must hold for the type `ty` to
/// appear in a context with lifetime `outer_region`
pub fn region_wf_constraints<'tcx>(
tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
outer_region: ty::Region)
-> Vec<WfConstraint<'tcx>>
{
let mut stack = Vec::new();
stack.push((outer_region, None));
let mut wf = Wf { tcx: tcx,
stack: stack,
out: Vec::new() };
wf.accumulate_from_ty(ty);
wf.out
}
impl<'a, 'tcx> Wf<'a, 'tcx> {
fn accumulate_from_ty(&mut self, ty: Ty<'tcx>) {
debug!("Wf::accumulate_from_ty(ty={})",
ty.repr(self.tcx));
match ty.sty {
ty::ty_bool |
ty::ty_char |
ty::ty_int(..) |
ty::ty_uint(..) |
ty::ty_float(..) |
ty::ty_bare_fn(..) |
ty::ty_err |
ty::ty_str => {
// No borrowed content reachable here.
}
ty::ty_unboxed_closure(_, region, _) => {
// An "unboxed closure type" is basically
// modeled here as equivalent to a struct like
//
// struct TheClosure<'b> {
// ...
// }
//
// where the `'b` is the lifetime bound of the
// contents (i.e., all contents must outlive 'b).
//
// Even though unboxed closures are glorified structs
// of upvars, we do not need to consider them as they
// can't generate any new constraints. The
// substitutions on the closure are equal to the free
// substitutions of the enclosing parameter
// environment. An upvar captured by value has the
// same type as the original local variable which is
// already checked for consistency. If the upvar is
// captured by reference it must also outlive the
// region bound on the closure, but this is explicitly
// handled by logic in regionck.
self.push_region_constraint_from_top(*region);
}
ty::ty_trait(ref t) => {
let required_region_bounds =
ty::object_region_bounds(self.tcx, Some(&t.principal), t.bounds.builtin_bounds);
self.accumulate_from_object_ty(ty, t.bounds.region_bound, required_region_bounds)
}
ty::ty_enum(def_id, substs) |
ty::ty_struct(def_id, substs) => {
let item_scheme = ty::lookup_item_type(self.tcx, def_id);
self.accumulate_from_adt(ty, def_id, &item_scheme.generics, substs)
}
ty::ty_vec(t, _) |
ty::ty_ptr(ty::mt { ty: t,.. }) |
ty::ty_uniq(t) => {
self.accumulate_from_ty(t)
}
ty::ty_rptr(r_b, mt) => {
self.accumulate_from_rptr(ty, *r_b, mt.ty);
}
ty::ty_param(p) => {
self.push_param_constraint_from_top(p);
}
ty::ty_projection(ref data) => {
// `<T as TraitRef<..>>::Name`
self.push_projection_constraint_from_top(data);
// this seems like a minimal requirement:
let trait_def = ty::lookup_trait_def(self.tcx, data.trait_ref.def_id);
self.accumulate_from_adt(ty, data.trait_ref.def_id,
&trait_def.generics, data.trait_ref.substs)
}
ty::ty_tup(ref tuptys) => {
for &tupty in tuptys.iter() {
self.accumulate_from_ty(tupty);
}
}
ty::ty_infer(_) => {
// This should not happen, BUT:
//
// Currently we uncover region relationships on
// entering the fn check. We should do this after
// the fn check, then we can call this case a bug().
}
ty::ty_open(_) => {
self.tcx.sess.bug(
&format!("Unexpected type encountered while doing wf check: {}",
ty.repr(self.tcx))[]);
}
}
}
fn accumulate_from_rptr(&mut self,
ty: Ty<'tcx>,
r_b: ty::Region,
ty_b: Ty<'tcx>) {
// We are walking down a type like this, and current
// position is indicated by caret:
//
// &'a &'b ty_b
// ^
//
// At this point, top of stack will be `'a`. We must
// require that `'a <= 'b`.
self.push_region_constraint_from_top(r_b);
// Now we push `'b` onto the stack, because it must
// constrain any borrowed content we find within `T`.
self.stack.push((r_b, Some(ty)));
self.accumulate_from_ty(ty_b);
self.stack.pop().unwrap();
}
/// Pushes a constraint that `r_b` must outlive the top region on the stack.
fn push_region_constraint_from_top(&mut self,
r_b: ty::Region) {
// Indicates that we have found borrowed content with a lifetime
// of at least `r_b`. This adds a constraint that `r_b` must
// outlive the region `r_a` on top of the stack.
//
// As an example, imagine walking a type like:
//
// &'a &'b T
// ^
//
// when we hit the inner pointer (indicated by caret), `'a` will
// be on top of stack and `'b` will be the lifetime of the content
// we just found. So we add constraint that `'a <= 'b`.
let &(r_a, opt_ty) = self.stack.last().unwrap();
self.push_sub_region_constraint(opt_ty, r_a, r_b);
}
/// Pushes a constraint that `r_a <= r_b`, due to `opt_ty`
fn push_sub_region_constraint(&mut self,
opt_ty: Option<Ty<'tcx>>,
r_a: ty::Region,
r_b: ty::Region) {
self.out.push(RegionSubRegionConstraint(opt_ty, r_a, r_b));
}
/// Pushes a constraint that `param_ty` must outlive the top region on the stack.
fn push_param_constraint_from_top(&mut self,
param_ty: ty::ParamTy) {
let &(region, opt_ty) = self.stack.last().unwrap();
self.push_param_constraint(region, opt_ty, param_ty);
}
/// Pushes a constraint that `projection_ty` must outlive the top region on the stack.
fn push_projection_constraint_from_top(&mut self,
projection_ty: &ty::ProjectionTy<'tcx>) {
let &(region, opt_ty) = self.stack.last().unwrap();
self.out.push(RegionSubGenericConstraint(
opt_ty, region, GenericKind::Projection(projection_ty.clone())));
}
/// Pushes a constraint that `region <= param_ty`, due to `opt_ty`
fn push_param_constraint(&mut self,
region: ty::Region,
opt_ty: Option<Ty<'tcx>>,
param_ty: ty::ParamTy) {
self.out.push(RegionSubGenericConstraint(
opt_ty, region, GenericKind::Param(param_ty)));
}
fn
|
(&mut self,
ty: Ty<'tcx>,
def_id: ast::DefId,
generics: &ty::Generics<'tcx>,
substs: &Substs<'tcx>)
{
// The generic declarations from the type, appropriately
// substituted for the actual substitutions.
let generics = generics.subst(self.tcx, substs);
// Variance of each type/region parameter.
let variances = ty::item_variances(self.tcx, def_id);
for &space in ParamSpace::all().iter() {
let region_params = substs.regions().get_slice(space);
let region_variances = variances.regions.get_slice(space);
let region_param_defs = generics.regions.get_slice(space);
assert_eq!(region_params.len(), region_variances.len());
for (®ion_param, (®ion_variance, region_param_def)) in
region_params.iter().zip(
region_variances.iter().zip(
region_param_defs.iter()))
{
match region_variance {
ty::Covariant | ty::Bivariant => {
// Ignore covariant or bivariant region
// parameters. To understand why, consider a
// struct `Foo<'a>`. If `Foo` contains any
// references with lifetime `'a`, then `'a` must
// be at least contravariant (and possibly
// invariant). The only way to have a covariant
// result is if `Foo` contains only a field with a
// type like `fn() -> &'a T`; i.e., a bare
// function that can produce a reference of
// lifetime `'a`. In this case, there is no
// *actual data* with lifetime `'a` that is
// reachable. (Presumably this bare function is
// really returning static data.)
}
ty::Contravariant | ty::Invariant => {
// If the parameter is contravariant or
// invariant, there may indeed be reachable
// data with this lifetime. See other case for
// more details.
self.push_region_constraint_from_top(region_param);
}
}
for ®ion_bound in region_param_def.bounds.iter() {
// The type declared a constraint like
//
// 'b : 'a
//
// which means that `'a <= 'b` (after
// substitution). So take the region we
// substituted for `'a` (`region_bound`) and make
// it a subregion of the region we substituted
// `'b` (`region_param`).
self.push_sub_region_constraint(
Some(ty), region_bound, region_param);
}
}
let types = substs.types.get_slice(space);
let type_variances = variances.types.get_slice(space);
let type_param_defs = generics.types.get_slice(space);
assert_eq!(types.len(), type_variances.len());
for (&type_param_ty, (&variance, type_param_def)) in
types.iter().zip(
type_variances.iter().zip(
type_param_defs.iter()))
{
debug!("type_param_ty={} variance={}",
type_param_ty.repr(self.tcx),
variance.repr(self.tcx));
match variance {
ty::Contravariant | ty::Bivariant => {
// As above, except that in this it is a
// *contravariant* reference that indices that no
// actual data of type T is reachable.
}
ty::Covariant | ty::Invariant => {
self.accumulate_from_ty(type_param_ty);
}
}
// Inspect bounds on this type parameter for any
// region bounds.
for &r in type_param_def.bounds.region_bounds.iter() {
self.stack.push((r, Some(ty)));
self.accumulate_from_ty(type_param_ty);
self.stack.pop().unwrap();
}
}
}
}
fn accumulate_from_object_ty(&mut self,
ty: Ty<'tcx>,
region_bound: ty::Region,
required_region_bounds: Vec<ty::Region>)
{
// Imagine a type like this:
//
// trait Foo { }
// trait Bar<'c> : 'c { }
//
// &'b (Foo+'c+Bar<'d>)
// ^
//
// In this case, the following relationships must hold:
//
// 'b <= 'c
// 'd <= 'c
//
// The first conditions is due to the normal region pointer
// rules, which say that a reference cannot outlive its
// referent.
//
// The final condition may be a bit surprising. In particular,
// you may expect that it would have been `'c <= 'd`, since
// usually lifetimes of outer things are conservative
// approximations for inner things. However, it works somewhat
// differently with trait objects: here the idea is that if the
// user specifies a region bound (`'c`, in this case) it is the
// "master bound" that *implies* that bounds from other traits are
// all met. (Remember that *all bounds* in a type like
// `Foo+Bar+Zed` must be met, not just one, hence if we write
// `Foo<'x>+Bar<'y>`, we know that the type outlives *both* 'x and
// 'y.)
//
// Note: in fact we only permit builtin traits, not `Bar<'d>`, I
// am looking forward to the future here.
// The content of this object type must outlive
// `bounds.region_bound`:
let r_c = region_bound;
self.push_region_constraint_from_top(r_c);
// And then, in turn, to be well-formed, the
// `region_bound` that user specified must imply the
// region bounds required from all of the trait types:
for &r_d in required_region_bounds.iter() {
// Each of these is an instance of the `'c <= 'b`
// constraint above
self.out.push(RegionSubRegionConstraint(Some(ty), r_d, r_c));
}
}
}
impl<'tcx> Repr<'tcx> for WfConstraint<'tcx> {
fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String {
match *self {
RegionSubRegionConstraint(_, ref r_a, ref r_b) => {
format!("RegionSubRegionConstraint({}, {})",
r_a.repr(tcx),
r_b.repr(tcx))
}
RegionSubGenericConstraint(_, ref r, ref p) => {
format!("RegionSubGenericConstraint({}, {})",
r.repr(tcx),
p.repr(tcx))
}
}
}
}
|
accumulate_from_adt
|
identifier_name
|
regionmanip.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// #![warn(deprecated_mode)]
pub use self::WfConstraint::*;
use middle::infer::GenericKind;
use middle::subst::{ParamSpace, Subst, Substs};
use middle::ty::{self, Ty};
use middle::ty_fold::{TypeFolder};
use syntax::ast;
use util::ppaux::Repr;
// Helper functions related to manipulating region types.
pub enum WfConstraint<'tcx> {
RegionSubRegionConstraint(Option<Ty<'tcx>>, ty::Region, ty::Region),
RegionSubGenericConstraint(Option<Ty<'tcx>>, ty::Region, GenericKind<'tcx>),
}
struct Wf<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
stack: Vec<(ty::Region, Option<Ty<'tcx>>)>,
out: Vec<WfConstraint<'tcx>>,
}
/// This routine computes the well-formedness constraints that must hold for the type `ty` to
/// appear in a context with lifetime `outer_region`
pub fn region_wf_constraints<'tcx>(
tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
outer_region: ty::Region)
-> Vec<WfConstraint<'tcx>>
{
let mut stack = Vec::new();
stack.push((outer_region, None));
let mut wf = Wf { tcx: tcx,
stack: stack,
out: Vec::new() };
wf.accumulate_from_ty(ty);
wf.out
}
impl<'a, 'tcx> Wf<'a, 'tcx> {
fn accumulate_from_ty(&mut self, ty: Ty<'tcx>) {
debug!("Wf::accumulate_from_ty(ty={})",
ty.repr(self.tcx));
match ty.sty {
ty::ty_bool |
ty::ty_char |
ty::ty_int(..) |
ty::ty_uint(..) |
ty::ty_float(..) |
ty::ty_bare_fn(..) |
ty::ty_err |
ty::ty_str => {
// No borrowed content reachable here.
}
ty::ty_unboxed_closure(_, region, _) => {
// An "unboxed closure type" is basically
// modeled here as equivalent to a struct like
//
// struct TheClosure<'b> {
// ...
// }
//
// where the `'b` is the lifetime bound of the
// contents (i.e., all contents must outlive 'b).
//
// Even though unboxed closures are glorified structs
// of upvars, we do not need to consider them as they
// can't generate any new constraints. The
// substitutions on the closure are equal to the free
// substitutions of the enclosing parameter
// environment. An upvar captured by value has the
// same type as the original local variable which is
// already checked for consistency. If the upvar is
// captured by reference it must also outlive the
// region bound on the closure, but this is explicitly
// handled by logic in regionck.
self.push_region_constraint_from_top(*region);
}
ty::ty_trait(ref t) => {
let required_region_bounds =
ty::object_region_bounds(self.tcx, Some(&t.principal), t.bounds.builtin_bounds);
self.accumulate_from_object_ty(ty, t.bounds.region_bound, required_region_bounds)
}
ty::ty_enum(def_id, substs) |
ty::ty_struct(def_id, substs) => {
let item_scheme = ty::lookup_item_type(self.tcx, def_id);
self.accumulate_from_adt(ty, def_id, &item_scheme.generics, substs)
}
ty::ty_vec(t, _) |
ty::ty_ptr(ty::mt { ty: t,.. }) |
ty::ty_uniq(t) => {
self.accumulate_from_ty(t)
}
ty::ty_rptr(r_b, mt) => {
self.accumulate_from_rptr(ty, *r_b, mt.ty);
}
ty::ty_param(p) => {
self.push_param_constraint_from_top(p);
}
ty::ty_projection(ref data) => {
// `<T as TraitRef<..>>::Name`
self.push_projection_constraint_from_top(data);
// this seems like a minimal requirement:
let trait_def = ty::lookup_trait_def(self.tcx, data.trait_ref.def_id);
self.accumulate_from_adt(ty, data.trait_ref.def_id,
&trait_def.generics, data.trait_ref.substs)
}
ty::ty_tup(ref tuptys) => {
for &tupty in tuptys.iter() {
self.accumulate_from_ty(tupty);
}
}
ty::ty_infer(_) => {
// This should not happen, BUT:
//
// Currently we uncover region relationships on
// entering the fn check. We should do this after
// the fn check, then we can call this case a bug().
}
ty::ty_open(_) => {
self.tcx.sess.bug(
&format!("Unexpected type encountered while doing wf check: {}",
ty.repr(self.tcx))[]);
}
}
}
fn accumulate_from_rptr(&mut self,
ty: Ty<'tcx>,
r_b: ty::Region,
ty_b: Ty<'tcx>) {
// We are walking down a type like this, and current
// position is indicated by caret:
//
// &'a &'b ty_b
// ^
//
// At this point, top of stack will be `'a`. We must
// require that `'a <= 'b`.
self.push_region_constraint_from_top(r_b);
// Now we push `'b` onto the stack, because it must
// constrain any borrowed content we find within `T`.
self.stack.push((r_b, Some(ty)));
self.accumulate_from_ty(ty_b);
self.stack.pop().unwrap();
}
/// Pushes a constraint that `r_b` must outlive the top region on the stack.
fn push_region_constraint_from_top(&mut self,
r_b: ty::Region) {
// Indicates that we have found borrowed content with a lifetime
// of at least `r_b`. This adds a constraint that `r_b` must
// outlive the region `r_a` on top of the stack.
//
// As an example, imagine walking a type like:
//
// &'a &'b T
// ^
//
// when we hit the inner pointer (indicated by caret), `'a` will
// be on top of stack and `'b` will be the lifetime of the content
// we just found. So we add constraint that `'a <= 'b`.
let &(r_a, opt_ty) = self.stack.last().unwrap();
self.push_sub_region_constraint(opt_ty, r_a, r_b);
}
/// Pushes a constraint that `r_a <= r_b`, due to `opt_ty`
fn push_sub_region_constraint(&mut self,
opt_ty: Option<Ty<'tcx>>,
r_a: ty::Region,
r_b: ty::Region) {
self.out.push(RegionSubRegionConstraint(opt_ty, r_a, r_b));
}
/// Pushes a constraint that `param_ty` must outlive the top region on the stack.
fn push_param_constraint_from_top(&mut self,
param_ty: ty::ParamTy) {
let &(region, opt_ty) = self.stack.last().unwrap();
self.push_param_constraint(region, opt_ty, param_ty);
}
/// Pushes a constraint that `projection_ty` must outlive the top region on the stack.
fn push_projection_constraint_from_top(&mut self,
projection_ty: &ty::ProjectionTy<'tcx>) {
let &(region, opt_ty) = self.stack.last().unwrap();
self.out.push(RegionSubGenericConstraint(
opt_ty, region, GenericKind::Projection(projection_ty.clone())));
}
/// Pushes a constraint that `region <= param_ty`, due to `opt_ty`
fn push_param_constraint(&mut self,
region: ty::Region,
opt_ty: Option<Ty<'tcx>>,
param_ty: ty::ParamTy) {
self.out.push(RegionSubGenericConstraint(
opt_ty, region, GenericKind::Param(param_ty)));
}
fn accumulate_from_adt(&mut self,
ty: Ty<'tcx>,
def_id: ast::DefId,
generics: &ty::Generics<'tcx>,
substs: &Substs<'tcx>)
{
// The generic declarations from the type, appropriately
// substituted for the actual substitutions.
let generics = generics.subst(self.tcx, substs);
// Variance of each type/region parameter.
let variances = ty::item_variances(self.tcx, def_id);
for &space in ParamSpace::all().iter() {
let region_params = substs.regions().get_slice(space);
let region_variances = variances.regions.get_slice(space);
let region_param_defs = generics.regions.get_slice(space);
assert_eq!(region_params.len(), region_variances.len());
for (®ion_param, (®ion_variance, region_param_def)) in
region_params.iter().zip(
region_variances.iter().zip(
region_param_defs.iter()))
{
match region_variance {
ty::Covariant | ty::Bivariant =>
|
ty::Contravariant | ty::Invariant => {
// If the parameter is contravariant or
// invariant, there may indeed be reachable
// data with this lifetime. See other case for
// more details.
self.push_region_constraint_from_top(region_param);
}
}
for ®ion_bound in region_param_def.bounds.iter() {
// The type declared a constraint like
//
// 'b : 'a
//
// which means that `'a <= 'b` (after
// substitution). So take the region we
// substituted for `'a` (`region_bound`) and make
// it a subregion of the region we substituted
// `'b` (`region_param`).
self.push_sub_region_constraint(
Some(ty), region_bound, region_param);
}
}
let types = substs.types.get_slice(space);
let type_variances = variances.types.get_slice(space);
let type_param_defs = generics.types.get_slice(space);
assert_eq!(types.len(), type_variances.len());
for (&type_param_ty, (&variance, type_param_def)) in
types.iter().zip(
type_variances.iter().zip(
type_param_defs.iter()))
{
debug!("type_param_ty={} variance={}",
type_param_ty.repr(self.tcx),
variance.repr(self.tcx));
match variance {
ty::Contravariant | ty::Bivariant => {
// As above, except that in this it is a
// *contravariant* reference that indices that no
// actual data of type T is reachable.
}
ty::Covariant | ty::Invariant => {
self.accumulate_from_ty(type_param_ty);
}
}
// Inspect bounds on this type parameter for any
// region bounds.
for &r in type_param_def.bounds.region_bounds.iter() {
self.stack.push((r, Some(ty)));
self.accumulate_from_ty(type_param_ty);
self.stack.pop().unwrap();
}
}
}
}
fn accumulate_from_object_ty(&mut self,
ty: Ty<'tcx>,
region_bound: ty::Region,
required_region_bounds: Vec<ty::Region>)
{
// Imagine a type like this:
//
// trait Foo { }
// trait Bar<'c> : 'c { }
//
// &'b (Foo+'c+Bar<'d>)
// ^
//
// In this case, the following relationships must hold:
//
// 'b <= 'c
// 'd <= 'c
//
// The first conditions is due to the normal region pointer
// rules, which say that a reference cannot outlive its
// referent.
//
// The final condition may be a bit surprising. In particular,
// you may expect that it would have been `'c <= 'd`, since
// usually lifetimes of outer things are conservative
// approximations for inner things. However, it works somewhat
// differently with trait objects: here the idea is that if the
// user specifies a region bound (`'c`, in this case) it is the
// "master bound" that *implies* that bounds from other traits are
// all met. (Remember that *all bounds* in a type like
// `Foo+Bar+Zed` must be met, not just one, hence if we write
// `Foo<'x>+Bar<'y>`, we know that the type outlives *both* 'x and
// 'y.)
//
// Note: in fact we only permit builtin traits, not `Bar<'d>`, I
// am looking forward to the future here.
// The content of this object type must outlive
// `bounds.region_bound`:
let r_c = region_bound;
self.push_region_constraint_from_top(r_c);
// And then, in turn, to be well-formed, the
// `region_bound` that user specified must imply the
// region bounds required from all of the trait types:
for &r_d in required_region_bounds.iter() {
// Each of these is an instance of the `'c <= 'b`
// constraint above
self.out.push(RegionSubRegionConstraint(Some(ty), r_d, r_c));
}
}
}
impl<'tcx> Repr<'tcx> for WfConstraint<'tcx> {
fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String {
match *self {
RegionSubRegionConstraint(_, ref r_a, ref r_b) => {
format!("RegionSubRegionConstraint({}, {})",
r_a.repr(tcx),
r_b.repr(tcx))
}
RegionSubGenericConstraint(_, ref r, ref p) => {
format!("RegionSubGenericConstraint({}, {})",
r.repr(tcx),
p.repr(tcx))
}
}
}
}
|
{
// Ignore covariant or bivariant region
// parameters. To understand why, consider a
// struct `Foo<'a>`. If `Foo` contains any
// references with lifetime `'a`, then `'a` must
// be at least contravariant (and possibly
// invariant). The only way to have a covariant
// result is if `Foo` contains only a field with a
// type like `fn() -> &'a T`; i.e., a bare
// function that can produce a reference of
// lifetime `'a`. In this case, there is no
// *actual data* with lifetime `'a` that is
// reachable. (Presumably this bare function is
// really returning static data.)
}
|
conditional_block
|
regionmanip.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// #![warn(deprecated_mode)]
pub use self::WfConstraint::*;
use middle::infer::GenericKind;
use middle::subst::{ParamSpace, Subst, Substs};
use middle::ty::{self, Ty};
use middle::ty_fold::{TypeFolder};
use syntax::ast;
use util::ppaux::Repr;
// Helper functions related to manipulating region types.
pub enum WfConstraint<'tcx> {
RegionSubRegionConstraint(Option<Ty<'tcx>>, ty::Region, ty::Region),
RegionSubGenericConstraint(Option<Ty<'tcx>>, ty::Region, GenericKind<'tcx>),
}
struct Wf<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
stack: Vec<(ty::Region, Option<Ty<'tcx>>)>,
out: Vec<WfConstraint<'tcx>>,
}
/// This routine computes the well-formedness constraints that must hold for the type `ty` to
/// appear in a context with lifetime `outer_region`
pub fn region_wf_constraints<'tcx>(
tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
outer_region: ty::Region)
-> Vec<WfConstraint<'tcx>>
{
let mut stack = Vec::new();
stack.push((outer_region, None));
let mut wf = Wf { tcx: tcx,
stack: stack,
out: Vec::new() };
wf.accumulate_from_ty(ty);
wf.out
}
impl<'a, 'tcx> Wf<'a, 'tcx> {
fn accumulate_from_ty(&mut self, ty: Ty<'tcx>) {
debug!("Wf::accumulate_from_ty(ty={})",
ty.repr(self.tcx));
match ty.sty {
ty::ty_bool |
ty::ty_char |
ty::ty_int(..) |
ty::ty_uint(..) |
ty::ty_float(..) |
ty::ty_bare_fn(..) |
ty::ty_err |
ty::ty_str => {
// No borrowed content reachable here.
}
ty::ty_unboxed_closure(_, region, _) => {
// An "unboxed closure type" is basically
// modeled here as equivalent to a struct like
//
// struct TheClosure<'b> {
// ...
// }
//
// where the `'b` is the lifetime bound of the
// contents (i.e., all contents must outlive 'b).
//
// Even though unboxed closures are glorified structs
// of upvars, we do not need to consider them as they
// can't generate any new constraints. The
// substitutions on the closure are equal to the free
// substitutions of the enclosing parameter
// environment. An upvar captured by value has the
// same type as the original local variable which is
// already checked for consistency. If the upvar is
// captured by reference it must also outlive the
// region bound on the closure, but this is explicitly
// handled by logic in regionck.
self.push_region_constraint_from_top(*region);
}
ty::ty_trait(ref t) => {
let required_region_bounds =
ty::object_region_bounds(self.tcx, Some(&t.principal), t.bounds.builtin_bounds);
self.accumulate_from_object_ty(ty, t.bounds.region_bound, required_region_bounds)
}
ty::ty_enum(def_id, substs) |
ty::ty_struct(def_id, substs) => {
let item_scheme = ty::lookup_item_type(self.tcx, def_id);
self.accumulate_from_adt(ty, def_id, &item_scheme.generics, substs)
}
ty::ty_vec(t, _) |
ty::ty_ptr(ty::mt { ty: t,.. }) |
ty::ty_uniq(t) => {
self.accumulate_from_ty(t)
}
ty::ty_rptr(r_b, mt) => {
self.accumulate_from_rptr(ty, *r_b, mt.ty);
}
ty::ty_param(p) => {
self.push_param_constraint_from_top(p);
}
ty::ty_projection(ref data) => {
// `<T as TraitRef<..>>::Name`
self.push_projection_constraint_from_top(data);
// this seems like a minimal requirement:
let trait_def = ty::lookup_trait_def(self.tcx, data.trait_ref.def_id);
self.accumulate_from_adt(ty, data.trait_ref.def_id,
&trait_def.generics, data.trait_ref.substs)
}
ty::ty_tup(ref tuptys) => {
for &tupty in tuptys.iter() {
self.accumulate_from_ty(tupty);
}
}
ty::ty_infer(_) => {
// This should not happen, BUT:
//
// Currently we uncover region relationships on
// entering the fn check. We should do this after
// the fn check, then we can call this case a bug().
}
ty::ty_open(_) => {
self.tcx.sess.bug(
&format!("Unexpected type encountered while doing wf check: {}",
ty.repr(self.tcx))[]);
}
}
}
fn accumulate_from_rptr(&mut self,
ty: Ty<'tcx>,
r_b: ty::Region,
ty_b: Ty<'tcx>) {
// We are walking down a type like this, and current
// position is indicated by caret:
//
// &'a &'b ty_b
// ^
//
// At this point, top of stack will be `'a`. We must
// require that `'a <= 'b`.
self.push_region_constraint_from_top(r_b);
// Now we push `'b` onto the stack, because it must
// constrain any borrowed content we find within `T`.
self.stack.push((r_b, Some(ty)));
self.accumulate_from_ty(ty_b);
self.stack.pop().unwrap();
}
/// Pushes a constraint that `r_b` must outlive the top region on the stack.
fn push_region_constraint_from_top(&mut self,
r_b: ty::Region) {
// Indicates that we have found borrowed content with a lifetime
// of at least `r_b`. This adds a constraint that `r_b` must
// outlive the region `r_a` on top of the stack.
//
// As an example, imagine walking a type like:
//
// &'a &'b T
// ^
//
// when we hit the inner pointer (indicated by caret), `'a` will
// be on top of stack and `'b` will be the lifetime of the content
// we just found. So we add constraint that `'a <= 'b`.
let &(r_a, opt_ty) = self.stack.last().unwrap();
self.push_sub_region_constraint(opt_ty, r_a, r_b);
}
/// Pushes a constraint that `r_a <= r_b`, due to `opt_ty`
fn push_sub_region_constraint(&mut self,
opt_ty: Option<Ty<'tcx>>,
r_a: ty::Region,
r_b: ty::Region) {
self.out.push(RegionSubRegionConstraint(opt_ty, r_a, r_b));
}
/// Pushes a constraint that `param_ty` must outlive the top region on the stack.
fn push_param_constraint_from_top(&mut self,
param_ty: ty::ParamTy) {
let &(region, opt_ty) = self.stack.last().unwrap();
self.push_param_constraint(region, opt_ty, param_ty);
}
/// Pushes a constraint that `projection_ty` must outlive the top region on the stack.
fn push_projection_constraint_from_top(&mut self,
projection_ty: &ty::ProjectionTy<'tcx>) {
let &(region, opt_ty) = self.stack.last().unwrap();
self.out.push(RegionSubGenericConstraint(
opt_ty, region, GenericKind::Projection(projection_ty.clone())));
}
/// Pushes a constraint that `region <= param_ty`, due to `opt_ty`
fn push_param_constraint(&mut self,
region: ty::Region,
opt_ty: Option<Ty<'tcx>>,
param_ty: ty::ParamTy) {
self.out.push(RegionSubGenericConstraint(
opt_ty, region, GenericKind::Param(param_ty)));
}
fn accumulate_from_adt(&mut self,
ty: Ty<'tcx>,
def_id: ast::DefId,
generics: &ty::Generics<'tcx>,
substs: &Substs<'tcx>)
{
// The generic declarations from the type, appropriately
// substituted for the actual substitutions.
let generics = generics.subst(self.tcx, substs);
// Variance of each type/region parameter.
let variances = ty::item_variances(self.tcx, def_id);
for &space in ParamSpace::all().iter() {
let region_params = substs.regions().get_slice(space);
let region_variances = variances.regions.get_slice(space);
let region_param_defs = generics.regions.get_slice(space);
assert_eq!(region_params.len(), region_variances.len());
for (®ion_param, (®ion_variance, region_param_def)) in
region_params.iter().zip(
region_variances.iter().zip(
region_param_defs.iter()))
{
match region_variance {
ty::Covariant | ty::Bivariant => {
// Ignore covariant or bivariant region
// parameters. To understand why, consider a
// struct `Foo<'a>`. If `Foo` contains any
// references with lifetime `'a`, then `'a` must
// be at least contravariant (and possibly
// invariant). The only way to have a covariant
// result is if `Foo` contains only a field with a
// type like `fn() -> &'a T`; i.e., a bare
// function that can produce a reference of
// lifetime `'a`. In this case, there is no
// *actual data* with lifetime `'a` that is
// reachable. (Presumably this bare function is
// really returning static data.)
}
ty::Contravariant | ty::Invariant => {
// If the parameter is contravariant or
// invariant, there may indeed be reachable
// data with this lifetime. See other case for
// more details.
self.push_region_constraint_from_top(region_param);
}
}
for ®ion_bound in region_param_def.bounds.iter() {
// The type declared a constraint like
//
// 'b : 'a
//
// which means that `'a <= 'b` (after
// substitution). So take the region we
// substituted for `'a` (`region_bound`) and make
// it a subregion of the region we substituted
// `'b` (`region_param`).
self.push_sub_region_constraint(
Some(ty), region_bound, region_param);
}
}
let types = substs.types.get_slice(space);
let type_variances = variances.types.get_slice(space);
let type_param_defs = generics.types.get_slice(space);
assert_eq!(types.len(), type_variances.len());
for (&type_param_ty, (&variance, type_param_def)) in
types.iter().zip(
type_variances.iter().zip(
type_param_defs.iter()))
{
debug!("type_param_ty={} variance={}",
type_param_ty.repr(self.tcx),
variance.repr(self.tcx));
match variance {
ty::Contravariant | ty::Bivariant => {
// As above, except that in this it is a
// *contravariant* reference that indices that no
// actual data of type T is reachable.
}
ty::Covariant | ty::Invariant => {
self.accumulate_from_ty(type_param_ty);
}
}
// Inspect bounds on this type parameter for any
// region bounds.
for &r in type_param_def.bounds.region_bounds.iter() {
self.stack.push((r, Some(ty)));
self.accumulate_from_ty(type_param_ty);
self.stack.pop().unwrap();
}
}
}
}
fn accumulate_from_object_ty(&mut self,
ty: Ty<'tcx>,
region_bound: ty::Region,
required_region_bounds: Vec<ty::Region>)
{
// Imagine a type like this:
//
// trait Foo { }
// trait Bar<'c> : 'c { }
//
// &'b (Foo+'c+Bar<'d>)
// ^
//
// In this case, the following relationships must hold:
//
// 'b <= 'c
// 'd <= 'c
//
// The first conditions is due to the normal region pointer
// rules, which say that a reference cannot outlive its
// referent.
//
// The final condition may be a bit surprising. In particular,
// you may expect that it would have been `'c <= 'd`, since
// usually lifetimes of outer things are conservative
// approximations for inner things. However, it works somewhat
// differently with trait objects: here the idea is that if the
// user specifies a region bound (`'c`, in this case) it is the
// "master bound" that *implies* that bounds from other traits are
// all met. (Remember that *all bounds* in a type like
// `Foo+Bar+Zed` must be met, not just one, hence if we write
// `Foo<'x>+Bar<'y>`, we know that the type outlives *both* 'x and
// 'y.)
//
// Note: in fact we only permit builtin traits, not `Bar<'d>`, I
// am looking forward to the future here.
// The content of this object type must outlive
|
// `region_bound` that user specified must imply the
// region bounds required from all of the trait types:
for &r_d in required_region_bounds.iter() {
// Each of these is an instance of the `'c <= 'b`
// constraint above
self.out.push(RegionSubRegionConstraint(Some(ty), r_d, r_c));
}
}
}
impl<'tcx> Repr<'tcx> for WfConstraint<'tcx> {
fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String {
match *self {
RegionSubRegionConstraint(_, ref r_a, ref r_b) => {
format!("RegionSubRegionConstraint({}, {})",
r_a.repr(tcx),
r_b.repr(tcx))
}
RegionSubGenericConstraint(_, ref r, ref p) => {
format!("RegionSubGenericConstraint({}, {})",
r.repr(tcx),
p.repr(tcx))
}
}
}
}
|
// `bounds.region_bound`:
let r_c = region_bound;
self.push_region_constraint_from_top(r_c);
// And then, in turn, to be well-formed, the
|
random_line_split
|
back.rs
|
//! Back-end module for the task queue. The back-end is running
//! on a separate thread. All it does is listening to a command
//! channel and starting new tasks when the time comes.
use std::sync::atomic::*;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{Sender, Receiver, channel};
use std::collections::HashMap;
use std::thread;
use bran;
use pulse::*;
use deque;
use num_cpus;
use {Wait, Schedule, FnBox};
use super::worker;
struct Inner {
index: usize,
stealers: HashMap<usize, deque::Stealer<ReadyTask>>,
workers: HashMap<usize, Sender<worker::Command>>,
joins: Vec<thread::JoinHandle<()>>
}
/// Task queue back-end.
pub struct Backend {
active: AtomicBool,
global_queue: Mutex<deque::Worker<ReadyTask>>,
workers: Mutex<Inner>,
pool: bran::StackPool
}
/// A ready task
pub struct
|
(bran::Handle);
impl ReadyTask {
pub fn run(self) {
use bran::fiber::State;
let ReadyTask(task) = self;
match task.run() {
State::Pending(signal) => {
worker::requeue(task, signal);
}
State::PendingTimeout(_, _) => {
panic!("Timeouts are not supported")
}
State::Finished | State::Panicked => ()
}
}
}
impl Backend {
/// Create a new back-end.
pub fn new() -> Arc<Backend> {
let buffer = deque::BufferPool::new();
let (worker, stealer) = buffer.deque();
let mut map = HashMap::new();
map.insert(0, stealer);
let back = Arc::new(Backend {
active: AtomicBool::new(false),
global_queue: Mutex::new(worker),
workers: Mutex::new(Inner {
index: 1,
stealers: map,
workers: HashMap::new(),
joins: Vec::new()
}),
pool: bran::StackPool::new()
});
for _ in 0..num_cpus::get() {
worker::Worker::new(back.clone()).start();
}
back
}
/// Start a task on the global work queue
fn start_on_global_queue(&self, rt: ReadyTask) {
let guard = self.global_queue.lock().unwrap();
guard.push(rt);
}
/// Start a task that will run once all the Handle's have
/// been completed.
pub fn start(back: Arc<Backend>, task: Box<FnBox+Send>, mut after: Vec<Signal>) {
// Create the wait signal if needed
let signal = if after.len() == 0 {
Signal::pulsed()
} else if after.len() == 1 {
after.pop().unwrap()
} else {
Barrier::new(&after).signal()
};
signal.callback(move || {
if!back.active.load(Ordering::SeqCst) {
let fiber = bran::fiber::Fiber::spawn_with(move || {
task.call_box(&mut worker::FiberSchedule)
}, back.pool.clone());
let try_thread = worker::start(ReadyTask(fiber));
match try_thread {
Ok(b) => b,
Err(rt) => {
back.start_on_global_queue(rt);
true
}
};
}
});
}
/// Start a task that will run once all the Handle's have
/// been completed.
pub fn enqueue(back: Arc<Backend>, task: bran::Handle, after: Signal) {
after.callback(move || {
if!back.active.load(Ordering::SeqCst) {
let try_thread = worker::start(ReadyTask(task));
match try_thread {
Ok(b) => b,
Err(rt) => {
back.start_on_global_queue(rt);
true
}
};
}
});
}
/// Kill the backend, wait until the condition is satisfied.
pub fn exit(&self, wait: Wait) {
// read the current active count, OR in the BLOCK
// flag if needed for the wait
match wait {
Wait::None | Wait::Active => {
self.active.store(true, Ordering::SeqCst);
}
Wait::Pending => ()
};
let mut guard = self.workers.lock().unwrap();
for (_, send) in guard.workers.iter() {
let _ = send.send(worker::Command::Exit);
}
while let Some(join) = guard.joins.pop() {
join.join().unwrap();
}
}
/// Create a new deque
pub fn new_deque(&self) -> (usize,
deque::Worker<ReadyTask>,
Receiver<worker::Command>) {
let buffer = deque::BufferPool::new();
let (worker, stealer) = buffer.deque();
let (send, recv) = channel();
let mut guard = self.workers.lock().unwrap();
let index = guard.index;
guard.index += 1;
for (&key, stealer) in guard.stealers.iter() {
send.send(worker::Command::Add(key, stealer.clone())).unwrap();
}
for (_, workers) in guard.workers.iter() {
workers.send(worker::Command::Add(index, stealer.clone())).unwrap();
}
guard.stealers.insert(index, stealer);
guard.workers.insert(index, send);
(index, worker, recv)
}
///
pub fn register_worker(&self, handle: thread::JoinHandle<()>) {
let mut guard = self.workers.lock().unwrap();
guard.joins.push(handle);
}
}
impl<'a> Schedule for Arc<Backend> {
fn add_task(&mut self, task: Box<FnBox+Send>, after: Vec<Signal>) {
Backend::start(self.clone(), task, after)
}
}
|
ReadyTask
|
identifier_name
|
back.rs
|
//! Back-end module for the task queue. The back-end is running
//! on a separate thread. All it does is listening to a command
//! channel and starting new tasks when the time comes.
use std::sync::atomic::*;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{Sender, Receiver, channel};
use std::collections::HashMap;
use std::thread;
use bran;
use pulse::*;
use deque;
use num_cpus;
use {Wait, Schedule, FnBox};
use super::worker;
struct Inner {
index: usize,
stealers: HashMap<usize, deque::Stealer<ReadyTask>>,
workers: HashMap<usize, Sender<worker::Command>>,
joins: Vec<thread::JoinHandle<()>>
}
/// Task queue back-end.
pub struct Backend {
active: AtomicBool,
global_queue: Mutex<deque::Worker<ReadyTask>>,
workers: Mutex<Inner>,
pool: bran::StackPool
}
/// A ready task
pub struct ReadyTask(bran::Handle);
impl ReadyTask {
pub fn run(self) {
use bran::fiber::State;
let ReadyTask(task) = self;
match task.run() {
State::Pending(signal) => {
worker::requeue(task, signal);
}
State::PendingTimeout(_, _) => {
panic!("Timeouts are not supported")
}
State::Finished | State::Panicked => ()
}
}
}
impl Backend {
/// Create a new back-end.
pub fn new() -> Arc<Backend> {
let buffer = deque::BufferPool::new();
let (worker, stealer) = buffer.deque();
let mut map = HashMap::new();
map.insert(0, stealer);
let back = Arc::new(Backend {
active: AtomicBool::new(false),
global_queue: Mutex::new(worker),
workers: Mutex::new(Inner {
index: 1,
stealers: map,
workers: HashMap::new(),
joins: Vec::new()
}),
pool: bran::StackPool::new()
});
for _ in 0..num_cpus::get() {
worker::Worker::new(back.clone()).start();
}
back
}
/// Start a task on the global work queue
fn start_on_global_queue(&self, rt: ReadyTask) {
let guard = self.global_queue.lock().unwrap();
guard.push(rt);
}
/// Start a task that will run once all the Handle's have
/// been completed.
pub fn start(back: Arc<Backend>, task: Box<FnBox+Send>, mut after: Vec<Signal>) {
// Create the wait signal if needed
let signal = if after.len() == 0 {
Signal::pulsed()
} else if after.len() == 1 {
after.pop().unwrap()
} else {
Barrier::new(&after).signal()
};
signal.callback(move || {
if!back.active.load(Ordering::SeqCst) {
let fiber = bran::fiber::Fiber::spawn_with(move || {
task.call_box(&mut worker::FiberSchedule)
}, back.pool.clone());
let try_thread = worker::start(ReadyTask(fiber));
match try_thread {
Ok(b) => b,
Err(rt) => {
back.start_on_global_queue(rt);
true
}
};
}
});
}
/// Start a task that will run once all the Handle's have
/// been completed.
pub fn enqueue(back: Arc<Backend>, task: bran::Handle, after: Signal) {
after.callback(move || {
if!back.active.load(Ordering::SeqCst) {
let try_thread = worker::start(ReadyTask(task));
match try_thread {
Ok(b) => b,
Err(rt) => {
back.start_on_global_queue(rt);
true
}
};
}
});
}
/// Kill the backend, wait until the condition is satisfied.
pub fn exit(&self, wait: Wait)
|
/// Create a new deque
pub fn new_deque(&self) -> (usize,
deque::Worker<ReadyTask>,
Receiver<worker::Command>) {
let buffer = deque::BufferPool::new();
let (worker, stealer) = buffer.deque();
let (send, recv) = channel();
let mut guard = self.workers.lock().unwrap();
let index = guard.index;
guard.index += 1;
for (&key, stealer) in guard.stealers.iter() {
send.send(worker::Command::Add(key, stealer.clone())).unwrap();
}
for (_, workers) in guard.workers.iter() {
workers.send(worker::Command::Add(index, stealer.clone())).unwrap();
}
guard.stealers.insert(index, stealer);
guard.workers.insert(index, send);
(index, worker, recv)
}
///
pub fn register_worker(&self, handle: thread::JoinHandle<()>) {
let mut guard = self.workers.lock().unwrap();
guard.joins.push(handle);
}
}
impl<'a> Schedule for Arc<Backend> {
fn add_task(&mut self, task: Box<FnBox+Send>, after: Vec<Signal>) {
Backend::start(self.clone(), task, after)
}
}
|
{
// read the current active count, OR in the BLOCK
// flag if needed for the wait
match wait {
Wait::None | Wait::Active => {
self.active.store(true, Ordering::SeqCst);
}
Wait::Pending => ()
};
let mut guard = self.workers.lock().unwrap();
for (_, send) in guard.workers.iter() {
let _ = send.send(worker::Command::Exit);
}
while let Some(join) = guard.joins.pop() {
join.join().unwrap();
}
}
|
identifier_body
|
back.rs
|
//! Back-end module for the task queue. The back-end is running
//! on a separate thread. All it does is listening to a command
//! channel and starting new tasks when the time comes.
use std::sync::atomic::*;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{Sender, Receiver, channel};
use std::collections::HashMap;
use std::thread;
use bran;
use pulse::*;
use deque;
use num_cpus;
use {Wait, Schedule, FnBox};
use super::worker;
struct Inner {
index: usize,
stealers: HashMap<usize, deque::Stealer<ReadyTask>>,
workers: HashMap<usize, Sender<worker::Command>>,
joins: Vec<thread::JoinHandle<()>>
}
/// Task queue back-end.
pub struct Backend {
active: AtomicBool,
global_queue: Mutex<deque::Worker<ReadyTask>>,
workers: Mutex<Inner>,
pool: bran::StackPool
}
/// A ready task
pub struct ReadyTask(bran::Handle);
impl ReadyTask {
pub fn run(self) {
use bran::fiber::State;
let ReadyTask(task) = self;
match task.run() {
State::Pending(signal) => {
worker::requeue(task, signal);
}
State::PendingTimeout(_, _) => {
panic!("Timeouts are not supported")
}
State::Finished | State::Panicked => ()
}
}
}
impl Backend {
/// Create a new back-end.
pub fn new() -> Arc<Backend> {
let buffer = deque::BufferPool::new();
let (worker, stealer) = buffer.deque();
let mut map = HashMap::new();
map.insert(0, stealer);
let back = Arc::new(Backend {
active: AtomicBool::new(false),
global_queue: Mutex::new(worker),
workers: Mutex::new(Inner {
index: 1,
stealers: map,
workers: HashMap::new(),
joins: Vec::new()
}),
pool: bran::StackPool::new()
});
for _ in 0..num_cpus::get() {
worker::Worker::new(back.clone()).start();
}
back
}
/// Start a task on the global work queue
fn start_on_global_queue(&self, rt: ReadyTask) {
let guard = self.global_queue.lock().unwrap();
guard.push(rt);
}
/// Start a task that will run once all the Handle's have
/// been completed.
pub fn start(back: Arc<Backend>, task: Box<FnBox+Send>, mut after: Vec<Signal>) {
// Create the wait signal if needed
let signal = if after.len() == 0 {
Signal::pulsed()
} else if after.len() == 1 {
after.pop().unwrap()
} else {
Barrier::new(&after).signal()
};
signal.callback(move || {
if!back.active.load(Ordering::SeqCst) {
let fiber = bran::fiber::Fiber::spawn_with(move || {
task.call_box(&mut worker::FiberSchedule)
}, back.pool.clone());
let try_thread = worker::start(ReadyTask(fiber));
match try_thread {
Ok(b) => b,
Err(rt) => {
back.start_on_global_queue(rt);
true
}
};
|
/// been completed.
pub fn enqueue(back: Arc<Backend>, task: bran::Handle, after: Signal) {
after.callback(move || {
if!back.active.load(Ordering::SeqCst) {
let try_thread = worker::start(ReadyTask(task));
match try_thread {
Ok(b) => b,
Err(rt) => {
back.start_on_global_queue(rt);
true
}
};
}
});
}
/// Kill the backend, wait until the condition is satisfied.
pub fn exit(&self, wait: Wait) {
// read the current active count, OR in the BLOCK
// flag if needed for the wait
match wait {
Wait::None | Wait::Active => {
self.active.store(true, Ordering::SeqCst);
}
Wait::Pending => ()
};
let mut guard = self.workers.lock().unwrap();
for (_, send) in guard.workers.iter() {
let _ = send.send(worker::Command::Exit);
}
while let Some(join) = guard.joins.pop() {
join.join().unwrap();
}
}
/// Create a new deque
pub fn new_deque(&self) -> (usize,
deque::Worker<ReadyTask>,
Receiver<worker::Command>) {
let buffer = deque::BufferPool::new();
let (worker, stealer) = buffer.deque();
let (send, recv) = channel();
let mut guard = self.workers.lock().unwrap();
let index = guard.index;
guard.index += 1;
for (&key, stealer) in guard.stealers.iter() {
send.send(worker::Command::Add(key, stealer.clone())).unwrap();
}
for (_, workers) in guard.workers.iter() {
workers.send(worker::Command::Add(index, stealer.clone())).unwrap();
}
guard.stealers.insert(index, stealer);
guard.workers.insert(index, send);
(index, worker, recv)
}
///
pub fn register_worker(&self, handle: thread::JoinHandle<()>) {
let mut guard = self.workers.lock().unwrap();
guard.joins.push(handle);
}
}
impl<'a> Schedule for Arc<Backend> {
fn add_task(&mut self, task: Box<FnBox+Send>, after: Vec<Signal>) {
Backend::start(self.clone(), task, after)
}
}
|
}
});
}
/// Start a task that will run once all the Handle's have
|
random_line_split
|
lib.rs
|
// Copyright 2013-2014 Jeffery Olson
//
// Licensed under the 3-Clause BSD License, see LICENSE.txt
// at the top-level of this repository.
// This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_id="p2dux#0.1"]
#![crate_type="rlib"]
#![desc = "All UX/Frontend-specific code in the p2d 2D-graphics library"]
#![license = "MIT"]
#![feature(globs)]
extern crate time;
extern crate serialize;
extern crate uuid;
extern crate collections;
extern crate p2d;
extern crate sdl2;
extern crate sdl2_image;
use time::precise_time_ns;
pub mod gfx;
pub mod ui;
pub mod view;
pub struct TimeTracker {
pub last_time: u64,
pub now_time: u64,
pub next_fps_time: u64,
pub fps_ctr: uint,
pub curr_fps: uint
}
impl TimeTracker {
pub fn new() -> TimeTracker {
let curr_time = precise_time_ns() / 1000000u64;
let mut tt = TimeTracker {
last_time: curr_time,
now_time: curr_time,
next_fps_time: curr_time + 1000u64,
fps_ctr: 0,
curr_fps: 0
};
tt.update();
tt
}
pub fn update(&mut self) {
self.last_time = self.now_time;
self.now_time = precise_time_ns() / 1000000u64;
if self.now_time >= self.next_fps_time
|
else {
self.fps_ctr += 1;
}
}
pub fn get_curr_fps(&self) -> uint { self.curr_fps }
pub fn get_ms_since(&self) -> uint { (self.now_time - self.last_time) as uint }
}
|
{
self.curr_fps = self.fps_ctr;
self.fps_ctr = 0;
self.next_fps_time = self.now_time + 1000u64;
}
|
conditional_block
|
lib.rs
|
// Copyright 2013-2014 Jeffery Olson
//
// Licensed under the 3-Clause BSD License, see LICENSE.txt
// at the top-level of this repository.
// This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_id="p2dux#0.1"]
#![crate_type="rlib"]
#![desc = "All UX/Frontend-specific code in the p2d 2D-graphics library"]
#![license = "MIT"]
#![feature(globs)]
extern crate time;
extern crate serialize;
extern crate uuid;
extern crate collections;
extern crate p2d;
extern crate sdl2;
extern crate sdl2_image;
use time::precise_time_ns;
pub mod gfx;
pub mod ui;
pub mod view;
pub struct TimeTracker {
pub last_time: u64,
pub now_time: u64,
pub next_fps_time: u64,
pub fps_ctr: uint,
pub curr_fps: uint
}
impl TimeTracker {
pub fn new() -> TimeTracker {
let curr_time = precise_time_ns() / 1000000u64;
let mut tt = TimeTracker {
last_time: curr_time,
now_time: curr_time,
next_fps_time: curr_time + 1000u64,
fps_ctr: 0,
curr_fps: 0
};
tt.update();
tt
}
pub fn update(&mut self) {
self.last_time = self.now_time;
self.now_time = precise_time_ns() / 1000000u64;
if self.now_time >= self.next_fps_time {
self.curr_fps = self.fps_ctr;
self.fps_ctr = 0;
self.next_fps_time = self.now_time + 1000u64;
} else {
self.fps_ctr += 1;
}
}
pub fn get_curr_fps(&self) -> uint { self.curr_fps }
pub fn get_ms_since(&self) -> uint
|
}
|
{ (self.now_time - self.last_time) as uint }
|
identifier_body
|
lib.rs
|
// Copyright 2013-2014 Jeffery Olson
//
// Licensed under the 3-Clause BSD License, see LICENSE.txt
// at the top-level of this repository.
// This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_id="p2dux#0.1"]
#![crate_type="rlib"]
#![desc = "All UX/Frontend-specific code in the p2d 2D-graphics library"]
#![license = "MIT"]
#![feature(globs)]
extern crate time;
extern crate serialize;
extern crate uuid;
extern crate collections;
extern crate p2d;
extern crate sdl2;
extern crate sdl2_image;
use time::precise_time_ns;
pub mod gfx;
pub mod ui;
pub mod view;
pub struct
|
{
pub last_time: u64,
pub now_time: u64,
pub next_fps_time: u64,
pub fps_ctr: uint,
pub curr_fps: uint
}
impl TimeTracker {
pub fn new() -> TimeTracker {
let curr_time = precise_time_ns() / 1000000u64;
let mut tt = TimeTracker {
last_time: curr_time,
now_time: curr_time,
next_fps_time: curr_time + 1000u64,
fps_ctr: 0,
curr_fps: 0
};
tt.update();
tt
}
pub fn update(&mut self) {
self.last_time = self.now_time;
self.now_time = precise_time_ns() / 1000000u64;
if self.now_time >= self.next_fps_time {
self.curr_fps = self.fps_ctr;
self.fps_ctr = 0;
self.next_fps_time = self.now_time + 1000u64;
} else {
self.fps_ctr += 1;
}
}
pub fn get_curr_fps(&self) -> uint { self.curr_fps }
pub fn get_ms_since(&self) -> uint { (self.now_time - self.last_time) as uint }
}
|
TimeTracker
|
identifier_name
|
lib.rs
|
// Copyright 2013-2014 Jeffery Olson
//
// Licensed under the 3-Clause BSD License, see LICENSE.txt
// at the top-level of this repository.
// This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_id="p2dux#0.1"]
#![crate_type="rlib"]
#![desc = "All UX/Frontend-specific code in the p2d 2D-graphics library"]
#![license = "MIT"]
#![feature(globs)]
extern crate time;
extern crate serialize;
extern crate uuid;
extern crate collections;
extern crate p2d;
extern crate sdl2;
extern crate sdl2_image;
use time::precise_time_ns;
pub mod gfx;
pub mod ui;
pub mod view;
pub struct TimeTracker {
|
pub next_fps_time: u64,
pub fps_ctr: uint,
pub curr_fps: uint
}
impl TimeTracker {
pub fn new() -> TimeTracker {
let curr_time = precise_time_ns() / 1000000u64;
let mut tt = TimeTracker {
last_time: curr_time,
now_time: curr_time,
next_fps_time: curr_time + 1000u64,
fps_ctr: 0,
curr_fps: 0
};
tt.update();
tt
}
pub fn update(&mut self) {
self.last_time = self.now_time;
self.now_time = precise_time_ns() / 1000000u64;
if self.now_time >= self.next_fps_time {
self.curr_fps = self.fps_ctr;
self.fps_ctr = 0;
self.next_fps_time = self.now_time + 1000u64;
} else {
self.fps_ctr += 1;
}
}
pub fn get_curr_fps(&self) -> uint { self.curr_fps }
pub fn get_ms_since(&self) -> uint { (self.now_time - self.last_time) as uint }
}
|
pub last_time: u64,
pub now_time: u64,
|
random_line_split
|
import.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Traces import request.
use util::H256;
use header::BlockNumber;
use trace::FlatBlockTraces;
/// Traces import request.
pub struct ImportRequest {
/// Traces to import.
pub traces: FlatBlockTraces,
/// Hash of traces block.
pub block_hash: H256,
/// Number of traces block.
pub block_number: BlockNumber,
|
/// Blocks enacted by this import.
///
/// They should be ordered from oldest to newest.
pub enacted: Vec<H256>,
/// Number of blocks retracted by this import.
pub retracted: usize,
}
|
random_line_split
|
|
import.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Traces import request.
use util::H256;
use header::BlockNumber;
use trace::FlatBlockTraces;
/// Traces import request.
pub struct
|
{
/// Traces to import.
pub traces: FlatBlockTraces,
/// Hash of traces block.
pub block_hash: H256,
/// Number of traces block.
pub block_number: BlockNumber,
/// Blocks enacted by this import.
///
/// They should be ordered from oldest to newest.
pub enacted: Vec<H256>,
/// Number of blocks retracted by this import.
pub retracted: usize,
}
|
ImportRequest
|
identifier_name
|
once.rs
|
use consumer::*;
use stream::*;
/// A stream that emits an element exactly once.
///
/// This `struct` is created by the [`once()`] function. See its documentation
/// for more.
///
/// [`once()`]: fn.once.html
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct Once<T> {
value: T,
}
/// Creates a stream that emits an element exactly once.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
///
/// let vec = once(5).into_vec();
/// assert!(vec == [5], "vec == {:?}", vec);
/// ```
pub fn
|
<T>(value: T) -> Once<T> {
Once { value: value }
}
impl<T> Stream for Once<T> {
type Item = T;
fn consume<C>(self, mut consumer: C)
where C: Consumer<Self::Item>
{
consumer.emit(self.value);
}
}
|
once
|
identifier_name
|
once.rs
|
use consumer::*;
use stream::*;
/// A stream that emits an element exactly once.
///
/// This `struct` is created by the [`once()`] function. See its documentation
/// for more.
///
/// [`once()`]: fn.once.html
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct Once<T> {
value: T,
}
/// Creates a stream that emits an element exactly once.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
///
/// let vec = once(5).into_vec();
/// assert!(vec == [5], "vec == {:?}", vec);
/// ```
pub fn once<T>(value: T) -> Once<T>
|
impl<T> Stream for Once<T> {
type Item = T;
fn consume<C>(self, mut consumer: C)
where C: Consumer<Self::Item>
{
consumer.emit(self.value);
}
}
|
{
Once { value: value }
}
|
identifier_body
|
once.rs
|
use consumer::*;
|
///
/// This `struct` is created by the [`once()`] function. See its documentation
/// for more.
///
/// [`once()`]: fn.once.html
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct Once<T> {
value: T,
}
/// Creates a stream that emits an element exactly once.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
///
/// let vec = once(5).into_vec();
/// assert!(vec == [5], "vec == {:?}", vec);
/// ```
pub fn once<T>(value: T) -> Once<T> {
Once { value: value }
}
impl<T> Stream for Once<T> {
type Item = T;
fn consume<C>(self, mut consumer: C)
where C: Consumer<Self::Item>
{
consumer.emit(self.value);
}
}
|
use stream::*;
/// A stream that emits an element exactly once.
|
random_line_split
|
selection_range.rs
|
use serde::{Deserialize, Serialize};
use crate::{
PartialResultParams, Position, Range, StaticTextDocumentRegistrationOptions,
TextDocumentIdentifier, WorkDoneProgressOptions, WorkDoneProgressParams,
};
#[derive(Debug, Eq, PartialEq, Clone, Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SelectionRangeClientCapabilities {
/// Whether implementation supports dynamic registration for selection range
/// providers. If this is set to `true` the client supports the new
/// `SelectionRangeRegistrationOptions` return value for the corresponding
/// server capability as well.
#[serde(skip_serializing_if = "Option::is_none")]
pub dynamic_registration: Option<bool>,
}
#[derive(Debug, Eq, PartialEq, Clone, Default, Deserialize, Serialize)]
pub struct SelectionRangeOptions {
#[serde(flatten)]
pub work_done_progress_options: WorkDoneProgressOptions,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
pub struct SelectionRangeRegistrationOptions {
#[serde(flatten)]
pub selection_range_options: SelectionRangeOptions,
#[serde(flatten)]
pub registration_options: StaticTextDocumentRegistrationOptions,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
#[serde(untagged)]
|
}
impl From<SelectionRangeRegistrationOptions> for SelectionRangeProviderCapability {
fn from(from: SelectionRangeRegistrationOptions) -> Self {
Self::RegistrationOptions(from)
}
}
impl From<SelectionRangeOptions> for SelectionRangeProviderCapability {
fn from(from: SelectionRangeOptions) -> Self {
Self::Options(from)
}
}
impl From<bool> for SelectionRangeProviderCapability {
fn from(from: bool) -> Self {
Self::Simple(from)
}
}
/// A parameter literal used in selection range requests.
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SelectionRangeParams {
/// The text document.
pub text_document: TextDocumentIdentifier,
/// The positions inside the text document.
pub positions: Vec<Position>,
#[serde(flatten)]
pub work_done_progress_params: WorkDoneProgressParams,
#[serde(flatten)]
pub partial_result_params: PartialResultParams,
}
/// Represents a selection range.
#[derive(Debug, Eq, PartialEq, Clone, Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SelectionRange {
/// Range of the selection.
pub range: Range,
/// The parent selection range containing this range.
#[serde(skip_serializing_if = "Option::is_none")]
pub parent: Option<Box<SelectionRange>>,
}
|
pub enum SelectionRangeProviderCapability {
Simple(bool),
Options(SelectionRangeOptions),
RegistrationOptions(SelectionRangeRegistrationOptions),
|
random_line_split
|
selection_range.rs
|
use serde::{Deserialize, Serialize};
use crate::{
PartialResultParams, Position, Range, StaticTextDocumentRegistrationOptions,
TextDocumentIdentifier, WorkDoneProgressOptions, WorkDoneProgressParams,
};
#[derive(Debug, Eq, PartialEq, Clone, Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SelectionRangeClientCapabilities {
/// Whether implementation supports dynamic registration for selection range
/// providers. If this is set to `true` the client supports the new
/// `SelectionRangeRegistrationOptions` return value for the corresponding
/// server capability as well.
#[serde(skip_serializing_if = "Option::is_none")]
pub dynamic_registration: Option<bool>,
}
#[derive(Debug, Eq, PartialEq, Clone, Default, Deserialize, Serialize)]
pub struct SelectionRangeOptions {
#[serde(flatten)]
pub work_done_progress_options: WorkDoneProgressOptions,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
pub struct SelectionRangeRegistrationOptions {
#[serde(flatten)]
pub selection_range_options: SelectionRangeOptions,
#[serde(flatten)]
pub registration_options: StaticTextDocumentRegistrationOptions,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
#[serde(untagged)]
pub enum SelectionRangeProviderCapability {
Simple(bool),
Options(SelectionRangeOptions),
RegistrationOptions(SelectionRangeRegistrationOptions),
}
impl From<SelectionRangeRegistrationOptions> for SelectionRangeProviderCapability {
fn from(from: SelectionRangeRegistrationOptions) -> Self {
Self::RegistrationOptions(from)
}
}
impl From<SelectionRangeOptions> for SelectionRangeProviderCapability {
fn from(from: SelectionRangeOptions) -> Self {
Self::Options(from)
}
}
impl From<bool> for SelectionRangeProviderCapability {
fn from(from: bool) -> Self {
Self::Simple(from)
}
}
/// A parameter literal used in selection range requests.
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct
|
{
/// The text document.
pub text_document: TextDocumentIdentifier,
/// The positions inside the text document.
pub positions: Vec<Position>,
#[serde(flatten)]
pub work_done_progress_params: WorkDoneProgressParams,
#[serde(flatten)]
pub partial_result_params: PartialResultParams,
}
/// Represents a selection range.
#[derive(Debug, Eq, PartialEq, Clone, Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SelectionRange {
/// Range of the selection.
pub range: Range,
/// The parent selection range containing this range.
#[serde(skip_serializing_if = "Option::is_none")]
pub parent: Option<Box<SelectionRange>>,
}
|
SelectionRangeParams
|
identifier_name
|
base.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Codegen the completed AST to the LLVM IR.
//!
//! Some functions here, such as codegen_block and codegen_expr, return a value --
//! the result of the codegen to LLVM -- while others, such as codegen_fn
//! and mono_item, are called only for the side effect of adding a
//! particular definition to the LLVM IR output we're producing.
//!
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the Ty type of a Value. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty,
//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int,
//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type.
use super::ModuleLlvm;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use super::LlvmCodegenBackend;
use llvm;
use metadata;
use rustc::mir::mono::{Linkage, Visibility, Stats};
use rustc::middle::cstore::{EncodedMetadata};
use rustc::ty::TyCtxt;
use rustc::middle::exported_symbols;
use rustc::session::config::{self, DebugInfo};
use builder::Builder;
use common;
use context::CodegenCx;
use monomorphize::partitioning::CodegenUnitExt;
use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::back::write::submit_codegened_module_to_llvm;
use std::ffi::CString;
use std::time::Instant;
use syntax_pos::symbol::InternedString;
use rustc::hir::CodegenFnAttrs;
use value::Value;
pub fn write_metadata<'a, 'gcx>(
tcx: TyCtxt<'a, 'gcx, 'gcx>,
llvm_module: &ModuleLlvm
) -> EncodedMetadata {
use std::io::Write;
use flate2::Compression;
use flate2::write::DeflateEncoder;
let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod());
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum MetadataKind {
None,
Uncompressed,
Compressed
}
let kind = tcx.sess.crate_types.borrow().iter().map(|ty| {
match *ty {
config::CrateType::Executable |
config::CrateType::Staticlib |
config::CrateType::Cdylib => MetadataKind::None,
config::CrateType::Rlib => MetadataKind::Uncompressed,
config::CrateType::Dylib |
config::CrateType::ProcMacro => MetadataKind::Compressed,
}
}).max().unwrap_or(MetadataKind::None);
if kind == MetadataKind::None {
return EncodedMetadata::new();
}
let metadata = tcx.encode_metadata();
if kind == MetadataKind::Uncompressed {
return metadata;
}
assert!(kind == MetadataKind::Compressed);
let mut compressed = tcx.metadata_encoding_version();
DeflateEncoder::new(&mut compressed, Compression::fast())
.write_all(&metadata.raw_data).unwrap();
let llmeta = common::bytes_in_context(metadata_llcx, &compressed);
let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal = unsafe {
llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr())
};
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
let name = SmallCStr::new(section_name);
llvm::LLVMSetSection(llglobal, name.as_ptr());
// Also generate a.section directive to force no
// flags, at least for ELF outputs, so that the
// metadata doesn't get loaded into memory.
let directive = format!(".section {}", section_name);
let directive = CString::new(directive).unwrap();
llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr())
}
return metadata;
}
pub struct ValueIter<'ll> {
cur: Option<&'ll Value>,
step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
}
impl Iterator for ValueIter<'ll> {
type Item = &'ll Value;
fn next(&mut self) -> Option<&'ll Value> {
let old = self.cur;
if let Some(old) = old {
self.cur = unsafe { (self.step)(old) };
}
old
}
}
pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
unsafe {
ValueIter {
cur: llvm::LLVMGetFirstGlobal(llmod),
step: llvm::LLVMGetNextGlobal,
}
}
}
pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
cgu_name: InternedString)
-> Stats {
let start_time = Instant::now();
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
let ((stats, module), _) = tcx.dep_graph.with_task(dep_node,
tcx,
cgu_name,
module_codegen);
let time_to_codegen = start_time.elapsed();
// We assume that the cost to run LLVM on a CGU is proportional to
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 +
time_to_codegen.subsec_nanos() as u64;
submit_codegened_module_to_llvm(&LlvmCodegenBackend(()), tcx, module, cost);
return stats;
fn module_codegen<'ll, 'tcx>(
tcx: TyCtxt<'ll, 'tcx, 'tcx>,
cgu_name: InternedString)
-> (Stats, ModuleCodegen<ModuleLlvm>)
{
let backend = LlvmCodegenBackend(());
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str());
let stats = {
let cx = CodegenCx::new(tcx, cgu, &llvm_module);
let mono_items = cx.codegen_unit
.items_in_deterministic_order(cx.tcx);
for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine::<Builder>(&cx, linkage, visibility);
}
//... and now that we have everything pre-defined, fill out those definitions.
for &(mono_item, _) in &mono_items {
mono_item.define::<Builder>(&cx);
}
// If this codegen unit contains the main function, also create the
// wrapper here
maybe_create_entry_wrapper::<Builder>(&cx);
// Run replace-all-uses-with for statics that need it
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMDeleteGlobal(old_g);
}
}
// Create the llvm.used variable
// This variable has type [N x i8*] and is stored in the llvm.metadata section
if!cx.used_statics().borrow().is_empty() {
cx.create_used_variable()
}
// Finalize debuginfo
if cx.sess().opts.debuginfo!= DebugInfo::None
|
cx.consume_stats().into_inner()
};
(stats, ModuleCodegen {
name: cgu_name.to_string(),
module_llvm: llvm_module,
kind: ModuleKind::Regular,
})
}
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
};
unsafe {
let buf = SmallCStr::new(§.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
}
pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
match linkage {
Linkage::External => llvm::Linkage::ExternalLinkage,
Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
Linkage::Appending => llvm::Linkage::AppendingLinkage,
Linkage::Internal => llvm::Linkage::InternalLinkage,
Linkage::Private => llvm::Linkage::PrivateLinkage,
Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
Linkage::Common => llvm::Linkage::CommonLinkage,
}
}
pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
match linkage {
Visibility::Default => llvm::Visibility::Default,
Visibility::Hidden => llvm::Visibility::Hidden,
Visibility::Protected => llvm::Visibility::Protected,
}
}
|
{
cx.debuginfo_finalize();
}
|
conditional_block
|
base.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Codegen the completed AST to the LLVM IR.
//!
//! Some functions here, such as codegen_block and codegen_expr, return a value --
//! the result of the codegen to LLVM -- while others, such as codegen_fn
//! and mono_item, are called only for the side effect of adding a
//! particular definition to the LLVM IR output we're producing.
//!
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the Ty type of a Value. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty,
//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int,
//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type.
use super::ModuleLlvm;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use super::LlvmCodegenBackend;
use llvm;
use metadata;
use rustc::mir::mono::{Linkage, Visibility, Stats};
use rustc::middle::cstore::{EncodedMetadata};
use rustc::ty::TyCtxt;
use rustc::middle::exported_symbols;
use rustc::session::config::{self, DebugInfo};
use builder::Builder;
use common;
use context::CodegenCx;
use monomorphize::partitioning::CodegenUnitExt;
use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::back::write::submit_codegened_module_to_llvm;
use std::ffi::CString;
use std::time::Instant;
use syntax_pos::symbol::InternedString;
use rustc::hir::CodegenFnAttrs;
use value::Value;
pub fn write_metadata<'a, 'gcx>(
tcx: TyCtxt<'a, 'gcx, 'gcx>,
llvm_module: &ModuleLlvm
) -> EncodedMetadata {
use std::io::Write;
use flate2::Compression;
use flate2::write::DeflateEncoder;
let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod());
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum MetadataKind {
None,
Uncompressed,
Compressed
}
let kind = tcx.sess.crate_types.borrow().iter().map(|ty| {
match *ty {
config::CrateType::Executable |
config::CrateType::Staticlib |
config::CrateType::Cdylib => MetadataKind::None,
config::CrateType::Rlib => MetadataKind::Uncompressed,
config::CrateType::Dylib |
config::CrateType::ProcMacro => MetadataKind::Compressed,
}
}).max().unwrap_or(MetadataKind::None);
if kind == MetadataKind::None {
return EncodedMetadata::new();
}
let metadata = tcx.encode_metadata();
if kind == MetadataKind::Uncompressed {
return metadata;
}
assert!(kind == MetadataKind::Compressed);
let mut compressed = tcx.metadata_encoding_version();
DeflateEncoder::new(&mut compressed, Compression::fast())
.write_all(&metadata.raw_data).unwrap();
let llmeta = common::bytes_in_context(metadata_llcx, &compressed);
let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal = unsafe {
llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr())
};
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
let name = SmallCStr::new(section_name);
llvm::LLVMSetSection(llglobal, name.as_ptr());
// Also generate a.section directive to force no
// flags, at least for ELF outputs, so that the
// metadata doesn't get loaded into memory.
let directive = format!(".section {}", section_name);
let directive = CString::new(directive).unwrap();
llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr())
}
return metadata;
}
pub struct ValueIter<'ll> {
cur: Option<&'ll Value>,
step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
}
impl Iterator for ValueIter<'ll> {
type Item = &'ll Value;
fn next(&mut self) -> Option<&'ll Value> {
let old = self.cur;
if let Some(old) = old {
self.cur = unsafe { (self.step)(old) };
}
old
}
}
pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
unsafe {
ValueIter {
cur: llvm::LLVMGetFirstGlobal(llmod),
step: llvm::LLVMGetNextGlobal,
}
}
}
pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
cgu_name: InternedString)
-> Stats
|
cgu_name: InternedString)
-> (Stats, ModuleCodegen<ModuleLlvm>)
{
let backend = LlvmCodegenBackend(());
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str());
let stats = {
let cx = CodegenCx::new(tcx, cgu, &llvm_module);
let mono_items = cx.codegen_unit
.items_in_deterministic_order(cx.tcx);
for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine::<Builder>(&cx, linkage, visibility);
}
//... and now that we have everything pre-defined, fill out those definitions.
for &(mono_item, _) in &mono_items {
mono_item.define::<Builder>(&cx);
}
// If this codegen unit contains the main function, also create the
// wrapper here
maybe_create_entry_wrapper::<Builder>(&cx);
// Run replace-all-uses-with for statics that need it
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMDeleteGlobal(old_g);
}
}
// Create the llvm.used variable
// This variable has type [N x i8*] and is stored in the llvm.metadata section
if!cx.used_statics().borrow().is_empty() {
cx.create_used_variable()
}
// Finalize debuginfo
if cx.sess().opts.debuginfo!= DebugInfo::None {
cx.debuginfo_finalize();
}
cx.consume_stats().into_inner()
};
(stats, ModuleCodegen {
name: cgu_name.to_string(),
module_llvm: llvm_module,
kind: ModuleKind::Regular,
})
}
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
};
unsafe {
let buf = SmallCStr::new(§.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
}
pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
match linkage {
Linkage::External => llvm::Linkage::ExternalLinkage,
Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
Linkage::Appending => llvm::Linkage::AppendingLinkage,
Linkage::Internal => llvm::Linkage::InternalLinkage,
Linkage::Private => llvm::Linkage::PrivateLinkage,
Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
Linkage::Common => llvm::Linkage::CommonLinkage,
}
}
pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
match linkage {
Visibility::Default => llvm::Visibility::Default,
Visibility::Hidden => llvm::Visibility::Hidden,
Visibility::Protected => llvm::Visibility::Protected,
}
}
|
{
let start_time = Instant::now();
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
let ((stats, module), _) = tcx.dep_graph.with_task(dep_node,
tcx,
cgu_name,
module_codegen);
let time_to_codegen = start_time.elapsed();
// We assume that the cost to run LLVM on a CGU is proportional to
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 +
time_to_codegen.subsec_nanos() as u64;
submit_codegened_module_to_llvm(&LlvmCodegenBackend(()), tcx, module, cost);
return stats;
fn module_codegen<'ll, 'tcx>(
tcx: TyCtxt<'ll, 'tcx, 'tcx>,
|
identifier_body
|
base.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Codegen the completed AST to the LLVM IR.
//!
//! Some functions here, such as codegen_block and codegen_expr, return a value --
//! the result of the codegen to LLVM -- while others, such as codegen_fn
//! and mono_item, are called only for the side effect of adding a
//! particular definition to the LLVM IR output we're producing.
//!
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the Ty type of a Value. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty,
//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int,
//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type.
use super::ModuleLlvm;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use super::LlvmCodegenBackend;
use llvm;
use metadata;
use rustc::mir::mono::{Linkage, Visibility, Stats};
use rustc::middle::cstore::{EncodedMetadata};
use rustc::ty::TyCtxt;
use rustc::middle::exported_symbols;
use rustc::session::config::{self, DebugInfo};
use builder::Builder;
use common;
use context::CodegenCx;
use monomorphize::partitioning::CodegenUnitExt;
use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::back::write::submit_codegened_module_to_llvm;
use std::ffi::CString;
use std::time::Instant;
use syntax_pos::symbol::InternedString;
use rustc::hir::CodegenFnAttrs;
use value::Value;
pub fn write_metadata<'a, 'gcx>(
tcx: TyCtxt<'a, 'gcx, 'gcx>,
llvm_module: &ModuleLlvm
) -> EncodedMetadata {
use std::io::Write;
use flate2::Compression;
use flate2::write::DeflateEncoder;
let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod());
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum MetadataKind {
None,
Uncompressed,
Compressed
}
let kind = tcx.sess.crate_types.borrow().iter().map(|ty| {
match *ty {
config::CrateType::Executable |
config::CrateType::Staticlib |
config::CrateType::Cdylib => MetadataKind::None,
config::CrateType::Rlib => MetadataKind::Uncompressed,
config::CrateType::Dylib |
config::CrateType::ProcMacro => MetadataKind::Compressed,
}
}).max().unwrap_or(MetadataKind::None);
if kind == MetadataKind::None {
return EncodedMetadata::new();
}
let metadata = tcx.encode_metadata();
if kind == MetadataKind::Uncompressed {
return metadata;
}
assert!(kind == MetadataKind::Compressed);
let mut compressed = tcx.metadata_encoding_version();
DeflateEncoder::new(&mut compressed, Compression::fast())
.write_all(&metadata.raw_data).unwrap();
let llmeta = common::bytes_in_context(metadata_llcx, &compressed);
let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal = unsafe {
llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr())
};
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
let name = SmallCStr::new(section_name);
llvm::LLVMSetSection(llglobal, name.as_ptr());
// Also generate a.section directive to force no
// flags, at least for ELF outputs, so that the
// metadata doesn't get loaded into memory.
let directive = format!(".section {}", section_name);
let directive = CString::new(directive).unwrap();
llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr())
}
return metadata;
}
pub struct ValueIter<'ll> {
cur: Option<&'ll Value>,
step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
}
impl Iterator for ValueIter<'ll> {
type Item = &'ll Value;
fn
|
(&mut self) -> Option<&'ll Value> {
let old = self.cur;
if let Some(old) = old {
self.cur = unsafe { (self.step)(old) };
}
old
}
}
pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
unsafe {
ValueIter {
cur: llvm::LLVMGetFirstGlobal(llmod),
step: llvm::LLVMGetNextGlobal,
}
}
}
pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
cgu_name: InternedString)
-> Stats {
let start_time = Instant::now();
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
let ((stats, module), _) = tcx.dep_graph.with_task(dep_node,
tcx,
cgu_name,
module_codegen);
let time_to_codegen = start_time.elapsed();
// We assume that the cost to run LLVM on a CGU is proportional to
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 +
time_to_codegen.subsec_nanos() as u64;
submit_codegened_module_to_llvm(&LlvmCodegenBackend(()), tcx, module, cost);
return stats;
fn module_codegen<'ll, 'tcx>(
tcx: TyCtxt<'ll, 'tcx, 'tcx>,
cgu_name: InternedString)
-> (Stats, ModuleCodegen<ModuleLlvm>)
{
let backend = LlvmCodegenBackend(());
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str());
let stats = {
let cx = CodegenCx::new(tcx, cgu, &llvm_module);
let mono_items = cx.codegen_unit
.items_in_deterministic_order(cx.tcx);
for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine::<Builder>(&cx, linkage, visibility);
}
//... and now that we have everything pre-defined, fill out those definitions.
for &(mono_item, _) in &mono_items {
mono_item.define::<Builder>(&cx);
}
// If this codegen unit contains the main function, also create the
// wrapper here
maybe_create_entry_wrapper::<Builder>(&cx);
// Run replace-all-uses-with for statics that need it
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMDeleteGlobal(old_g);
}
}
// Create the llvm.used variable
// This variable has type [N x i8*] and is stored in the llvm.metadata section
if!cx.used_statics().borrow().is_empty() {
cx.create_used_variable()
}
// Finalize debuginfo
if cx.sess().opts.debuginfo!= DebugInfo::None {
cx.debuginfo_finalize();
}
cx.consume_stats().into_inner()
};
(stats, ModuleCodegen {
name: cgu_name.to_string(),
module_llvm: llvm_module,
kind: ModuleKind::Regular,
})
}
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
};
unsafe {
let buf = SmallCStr::new(§.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
}
pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
match linkage {
Linkage::External => llvm::Linkage::ExternalLinkage,
Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
Linkage::Appending => llvm::Linkage::AppendingLinkage,
Linkage::Internal => llvm::Linkage::InternalLinkage,
Linkage::Private => llvm::Linkage::PrivateLinkage,
Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
Linkage::Common => llvm::Linkage::CommonLinkage,
}
}
pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
match linkage {
Visibility::Default => llvm::Visibility::Default,
Visibility::Hidden => llvm::Visibility::Hidden,
Visibility::Protected => llvm::Visibility::Protected,
}
}
|
next
|
identifier_name
|
base.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Codegen the completed AST to the LLVM IR.
//!
//! Some functions here, such as codegen_block and codegen_expr, return a value --
//! the result of the codegen to LLVM -- while others, such as codegen_fn
//! and mono_item, are called only for the side effect of adding a
//! particular definition to the LLVM IR output we're producing.
//!
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the Ty type of a Value. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty,
//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int,
//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type.
use super::ModuleLlvm;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use super::LlvmCodegenBackend;
use llvm;
use metadata;
use rustc::mir::mono::{Linkage, Visibility, Stats};
use rustc::middle::cstore::{EncodedMetadata};
use rustc::ty::TyCtxt;
use rustc::middle::exported_symbols;
use rustc::session::config::{self, DebugInfo};
use builder::Builder;
use common;
use context::CodegenCx;
use monomorphize::partitioning::CodegenUnitExt;
use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::back::write::submit_codegened_module_to_llvm;
use std::ffi::CString;
use std::time::Instant;
use syntax_pos::symbol::InternedString;
use rustc::hir::CodegenFnAttrs;
use value::Value;
pub fn write_metadata<'a, 'gcx>(
tcx: TyCtxt<'a, 'gcx, 'gcx>,
llvm_module: &ModuleLlvm
) -> EncodedMetadata {
use std::io::Write;
use flate2::Compression;
use flate2::write::DeflateEncoder;
let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod());
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum MetadataKind {
None,
Uncompressed,
Compressed
}
let kind = tcx.sess.crate_types.borrow().iter().map(|ty| {
match *ty {
config::CrateType::Executable |
config::CrateType::Staticlib |
config::CrateType::Cdylib => MetadataKind::None,
config::CrateType::Rlib => MetadataKind::Uncompressed,
config::CrateType::Dylib |
config::CrateType::ProcMacro => MetadataKind::Compressed,
}
}).max().unwrap_or(MetadataKind::None);
if kind == MetadataKind::None {
return EncodedMetadata::new();
}
let metadata = tcx.encode_metadata();
if kind == MetadataKind::Uncompressed {
return metadata;
}
assert!(kind == MetadataKind::Compressed);
let mut compressed = tcx.metadata_encoding_version();
DeflateEncoder::new(&mut compressed, Compression::fast())
.write_all(&metadata.raw_data).unwrap();
let llmeta = common::bytes_in_context(metadata_llcx, &compressed);
let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal = unsafe {
llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr())
};
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
let name = SmallCStr::new(section_name);
llvm::LLVMSetSection(llglobal, name.as_ptr());
// Also generate a.section directive to force no
// flags, at least for ELF outputs, so that the
// metadata doesn't get loaded into memory.
let directive = format!(".section {}", section_name);
let directive = CString::new(directive).unwrap();
llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr())
}
return metadata;
}
pub struct ValueIter<'ll> {
cur: Option<&'ll Value>,
step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
}
impl Iterator for ValueIter<'ll> {
type Item = &'ll Value;
fn next(&mut self) -> Option<&'ll Value> {
let old = self.cur;
if let Some(old) = old {
self.cur = unsafe { (self.step)(old) };
}
old
}
}
pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
unsafe {
ValueIter {
cur: llvm::LLVMGetFirstGlobal(llmod),
step: llvm::LLVMGetNextGlobal,
}
}
}
pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
cgu_name: InternedString)
-> Stats {
let start_time = Instant::now();
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
let ((stats, module), _) = tcx.dep_graph.with_task(dep_node,
tcx,
cgu_name,
module_codegen);
let time_to_codegen = start_time.elapsed();
// We assume that the cost to run LLVM on a CGU is proportional to
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 +
time_to_codegen.subsec_nanos() as u64;
submit_codegened_module_to_llvm(&LlvmCodegenBackend(()), tcx, module, cost);
return stats;
fn module_codegen<'ll, 'tcx>(
tcx: TyCtxt<'ll, 'tcx, 'tcx>,
cgu_name: InternedString)
-> (Stats, ModuleCodegen<ModuleLlvm>)
{
let backend = LlvmCodegenBackend(());
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str());
let stats = {
let cx = CodegenCx::new(tcx, cgu, &llvm_module);
let mono_items = cx.codegen_unit
.items_in_deterministic_order(cx.tcx);
for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine::<Builder>(&cx, linkage, visibility);
}
//... and now that we have everything pre-defined, fill out those definitions.
for &(mono_item, _) in &mono_items {
mono_item.define::<Builder>(&cx);
|
// If this codegen unit contains the main function, also create the
// wrapper here
maybe_create_entry_wrapper::<Builder>(&cx);
// Run replace-all-uses-with for statics that need it
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMDeleteGlobal(old_g);
}
}
// Create the llvm.used variable
// This variable has type [N x i8*] and is stored in the llvm.metadata section
if!cx.used_statics().borrow().is_empty() {
cx.create_used_variable()
}
// Finalize debuginfo
if cx.sess().opts.debuginfo!= DebugInfo::None {
cx.debuginfo_finalize();
}
cx.consume_stats().into_inner()
};
(stats, ModuleCodegen {
name: cgu_name.to_string(),
module_llvm: llvm_module,
kind: ModuleKind::Regular,
})
}
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
};
unsafe {
let buf = SmallCStr::new(§.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
}
pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
match linkage {
Linkage::External => llvm::Linkage::ExternalLinkage,
Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
Linkage::Appending => llvm::Linkage::AppendingLinkage,
Linkage::Internal => llvm::Linkage::InternalLinkage,
Linkage::Private => llvm::Linkage::PrivateLinkage,
Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
Linkage::Common => llvm::Linkage::CommonLinkage,
}
}
pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
match linkage {
Visibility::Default => llvm::Visibility::Default,
Visibility::Hidden => llvm::Visibility::Hidden,
Visibility::Protected => llvm::Visibility::Protected,
}
}
|
}
|
random_line_split
|
missing_inline.rs
|
use clippy_utils::diagnostics::span_lint;
use rustc_ast::ast;
use rustc_hir as hir;
use rustc_lint::{self, LateContext, LateLintPass, LintContext};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// It lints if an exported function, method, trait method with default impl,
/// or trait method impl is not `#[inline]`.
///
/// ### Why is this bad?
/// In general, it is not. Functions can be inlined across
/// crates when that's profitable as long as any form of LTO is used. When LTO is disabled,
/// functions that are not `#[inline]` cannot be inlined across crates. Certain types of crates
/// might intend for most of the methods in their public API to be able to be inlined across
/// crates even when LTO is disabled. For these types of crates, enabling this lint might make
/// sense. It allows the crate to require all exported methods to be `#[inline]` by default, and
/// then opt out for specific methods where this might not make sense.
///
/// ### Example
/// ```rust
/// pub fn foo() {} // missing #[inline]
/// fn ok() {} // ok
/// #[inline] pub fn bar() {} // ok
/// #[inline(always)] pub fn baz() {} // ok
///
/// pub trait Bar {
/// fn bar(); // ok
/// fn def_bar() {} // missing #[inline]
/// }
///
/// struct Baz;
/// impl Baz {
/// fn private() {} // ok
/// }
///
/// impl Bar for Baz {
/// fn bar() {} // ok - Baz is not exported
/// }
///
/// pub struct PubBaz;
/// impl PubBaz {
/// fn private() {} // ok
/// pub fn not_ptrivate() {} // missing #[inline]
/// }
///
/// impl Bar for PubBaz {
/// fn bar() {} // missing #[inline]
/// fn def_bar() {} // missing #[inline]
/// }
/// ```
pub MISSING_INLINE_IN_PUBLIC_ITEMS,
restriction,
"detects missing `#[inline]` attribute for public callables (functions, trait methods, methods...)"
}
fn check_missing_inline_attrs(cx: &LateContext<'_>, attrs: &[ast::Attribute], sp: Span, desc: &'static str) {
let has_inline = attrs.iter().any(|a| a.has_name(sym::inline));
if!has_inline {
span_lint(
cx,
MISSING_INLINE_IN_PUBLIC_ITEMS,
sp,
&format!("missing `#[inline]` for {}", desc),
);
}
}
fn is_executable_or_proc_macro(cx: &LateContext<'_>) -> bool {
use rustc_session::config::CrateType;
cx.tcx
.sess
.crate_types()
.iter()
.any(|t: &CrateType| matches!(t, CrateType::Executable | CrateType::ProcMacro))
}
declare_lint_pass!(MissingInline => [MISSING_INLINE_IN_PUBLIC_ITEMS]);
impl<'tcx> LateLintPass<'tcx> for MissingInline {
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx hir::Item<'_>) {
if rustc_middle::lint::in_external_macro(cx.sess(), it.span) || is_executable_or_proc_macro(cx) {
return;
}
if!cx.access_levels.is_exported(it.def_id) {
return;
}
match it.kind {
hir::ItemKind::Fn(..) =>
|
,
hir::ItemKind::Trait(ref _is_auto, ref _unsafe, ref _generics, _bounds, trait_items) => {
// note: we need to check if the trait is exported so we can't use
// `LateLintPass::check_trait_item` here.
for tit in trait_items {
let tit_ = cx.tcx.hir().trait_item(tit.id);
match tit_.kind {
hir::TraitItemKind::Const(..) | hir::TraitItemKind::Type(..) => {},
hir::TraitItemKind::Fn(..) => {
if tit.defaultness.has_value() {
// trait method with default body needs inline in case
// an impl is not provided
let desc = "a default trait method";
let item = cx.tcx.hir().trait_item(tit.id);
let attrs = cx.tcx.hir().attrs(item.hir_id());
check_missing_inline_attrs(cx, attrs, item.span, desc);
}
},
}
}
},
hir::ItemKind::Const(..)
| hir::ItemKind::Enum(..)
| hir::ItemKind::Macro(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Union(..)
| hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::ExternCrate(..)
| hir::ItemKind::ForeignMod {.. }
| hir::ItemKind::Impl {.. }
| hir::ItemKind::Use(..) => {},
};
}
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx hir::ImplItem<'_>) {
use rustc_middle::ty::{ImplContainer, TraitContainer};
if rustc_middle::lint::in_external_macro(cx.sess(), impl_item.span) || is_executable_or_proc_macro(cx) {
return;
}
// If the item being implemented is not exported, then we don't need #[inline]
if!cx.access_levels.is_exported(impl_item.def_id) {
return;
}
let desc = match impl_item.kind {
hir::ImplItemKind::Fn(..) => "a method",
hir::ImplItemKind::Const(..) | hir::ImplItemKind::TyAlias(_) => return,
};
let trait_def_id = match cx.tcx.associated_item(impl_item.def_id).container {
TraitContainer(cid) => Some(cid),
ImplContainer(cid) => cx.tcx.impl_trait_ref(cid).map(|t| t.def_id),
};
if let Some(trait_def_id) = trait_def_id {
if trait_def_id.is_local() &&!cx.access_levels.is_exported(impl_item.def_id) {
// If a trait is being implemented for an item, and the
// trait is not exported, we don't need #[inline]
return;
}
}
let attrs = cx.tcx.hir().attrs(impl_item.hir_id());
check_missing_inline_attrs(cx, attrs, impl_item.span, desc);
}
}
|
{
let desc = "a function";
let attrs = cx.tcx.hir().attrs(it.hir_id());
check_missing_inline_attrs(cx, attrs, it.span, desc);
}
|
conditional_block
|
missing_inline.rs
|
use clippy_utils::diagnostics::span_lint;
use rustc_ast::ast;
use rustc_hir as hir;
use rustc_lint::{self, LateContext, LateLintPass, LintContext};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// It lints if an exported function, method, trait method with default impl,
/// or trait method impl is not `#[inline]`.
///
/// ### Why is this bad?
/// In general, it is not. Functions can be inlined across
/// crates when that's profitable as long as any form of LTO is used. When LTO is disabled,
/// functions that are not `#[inline]` cannot be inlined across crates. Certain types of crates
/// might intend for most of the methods in their public API to be able to be inlined across
/// crates even when LTO is disabled. For these types of crates, enabling this lint might make
/// sense. It allows the crate to require all exported methods to be `#[inline]` by default, and
/// then opt out for specific methods where this might not make sense.
///
/// ### Example
/// ```rust
/// pub fn foo() {} // missing #[inline]
/// fn ok() {} // ok
/// #[inline] pub fn bar() {} // ok
/// #[inline(always)] pub fn baz() {} // ok
///
/// pub trait Bar {
/// fn bar(); // ok
/// fn def_bar() {} // missing #[inline]
/// }
///
/// struct Baz;
/// impl Baz {
/// fn private() {} // ok
/// }
///
/// impl Bar for Baz {
/// fn bar() {} // ok - Baz is not exported
/// }
///
/// pub struct PubBaz;
/// impl PubBaz {
/// fn private() {} // ok
/// pub fn not_ptrivate() {} // missing #[inline]
/// }
///
/// impl Bar for PubBaz {
/// fn bar() {} // missing #[inline]
/// fn def_bar() {} // missing #[inline]
/// }
/// ```
pub MISSING_INLINE_IN_PUBLIC_ITEMS,
restriction,
"detects missing `#[inline]` attribute for public callables (functions, trait methods, methods...)"
}
fn check_missing_inline_attrs(cx: &LateContext<'_>, attrs: &[ast::Attribute], sp: Span, desc: &'static str) {
let has_inline = attrs.iter().any(|a| a.has_name(sym::inline));
if!has_inline {
span_lint(
cx,
MISSING_INLINE_IN_PUBLIC_ITEMS,
sp,
&format!("missing `#[inline]` for {}", desc),
);
}
}
fn is_executable_or_proc_macro(cx: &LateContext<'_>) -> bool {
use rustc_session::config::CrateType;
cx.tcx
.sess
.crate_types()
.iter()
.any(|t: &CrateType| matches!(t, CrateType::Executable | CrateType::ProcMacro))
}
declare_lint_pass!(MissingInline => [MISSING_INLINE_IN_PUBLIC_ITEMS]);
impl<'tcx> LateLintPass<'tcx> for MissingInline {
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx hir::Item<'_>) {
if rustc_middle::lint::in_external_macro(cx.sess(), it.span) || is_executable_or_proc_macro(cx) {
return;
}
if!cx.access_levels.is_exported(it.def_id) {
return;
}
match it.kind {
hir::ItemKind::Fn(..) => {
let desc = "a function";
let attrs = cx.tcx.hir().attrs(it.hir_id());
check_missing_inline_attrs(cx, attrs, it.span, desc);
},
hir::ItemKind::Trait(ref _is_auto, ref _unsafe, ref _generics, _bounds, trait_items) => {
// note: we need to check if the trait is exported so we can't use
// `LateLintPass::check_trait_item` here.
for tit in trait_items {
let tit_ = cx.tcx.hir().trait_item(tit.id);
match tit_.kind {
hir::TraitItemKind::Const(..) | hir::TraitItemKind::Type(..) => {},
hir::TraitItemKind::Fn(..) => {
if tit.defaultness.has_value() {
// trait method with default body needs inline in case
// an impl is not provided
let desc = "a default trait method";
let item = cx.tcx.hir().trait_item(tit.id);
let attrs = cx.tcx.hir().attrs(item.hir_id());
check_missing_inline_attrs(cx, attrs, item.span, desc);
}
},
}
}
},
hir::ItemKind::Const(..)
| hir::ItemKind::Enum(..)
| hir::ItemKind::Macro(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Union(..)
| hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::ExternCrate(..)
| hir::ItemKind::ForeignMod {.. }
| hir::ItemKind::Impl {.. }
| hir::ItemKind::Use(..) => {},
};
}
fn
|
(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx hir::ImplItem<'_>) {
use rustc_middle::ty::{ImplContainer, TraitContainer};
if rustc_middle::lint::in_external_macro(cx.sess(), impl_item.span) || is_executable_or_proc_macro(cx) {
return;
}
// If the item being implemented is not exported, then we don't need #[inline]
if!cx.access_levels.is_exported(impl_item.def_id) {
return;
}
let desc = match impl_item.kind {
hir::ImplItemKind::Fn(..) => "a method",
hir::ImplItemKind::Const(..) | hir::ImplItemKind::TyAlias(_) => return,
};
let trait_def_id = match cx.tcx.associated_item(impl_item.def_id).container {
TraitContainer(cid) => Some(cid),
ImplContainer(cid) => cx.tcx.impl_trait_ref(cid).map(|t| t.def_id),
};
if let Some(trait_def_id) = trait_def_id {
if trait_def_id.is_local() &&!cx.access_levels.is_exported(impl_item.def_id) {
// If a trait is being implemented for an item, and the
// trait is not exported, we don't need #[inline]
return;
}
}
let attrs = cx.tcx.hir().attrs(impl_item.hir_id());
check_missing_inline_attrs(cx, attrs, impl_item.span, desc);
}
}
|
check_impl_item
|
identifier_name
|
missing_inline.rs
|
use clippy_utils::diagnostics::span_lint;
use rustc_ast::ast;
use rustc_hir as hir;
use rustc_lint::{self, LateContext, LateLintPass, LintContext};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// It lints if an exported function, method, trait method with default impl,
/// or trait method impl is not `#[inline]`.
///
/// ### Why is this bad?
/// In general, it is not. Functions can be inlined across
/// crates when that's profitable as long as any form of LTO is used. When LTO is disabled,
/// functions that are not `#[inline]` cannot be inlined across crates. Certain types of crates
/// might intend for most of the methods in their public API to be able to be inlined across
/// crates even when LTO is disabled. For these types of crates, enabling this lint might make
/// sense. It allows the crate to require all exported methods to be `#[inline]` by default, and
/// then opt out for specific methods where this might not make sense.
///
/// ### Example
/// ```rust
/// pub fn foo() {} // missing #[inline]
/// fn ok() {} // ok
/// #[inline] pub fn bar() {} // ok
/// #[inline(always)] pub fn baz() {} // ok
///
/// pub trait Bar {
/// fn bar(); // ok
/// fn def_bar() {} // missing #[inline]
/// }
///
/// struct Baz;
/// impl Baz {
/// fn private() {} // ok
/// }
///
/// impl Bar for Baz {
/// fn bar() {} // ok - Baz is not exported
/// }
///
/// pub struct PubBaz;
/// impl PubBaz {
/// fn private() {} // ok
/// pub fn not_ptrivate() {} // missing #[inline]
/// }
///
/// impl Bar for PubBaz {
/// fn bar() {} // missing #[inline]
/// fn def_bar() {} // missing #[inline]
/// }
/// ```
pub MISSING_INLINE_IN_PUBLIC_ITEMS,
restriction,
"detects missing `#[inline]` attribute for public callables (functions, trait methods, methods...)"
}
fn check_missing_inline_attrs(cx: &LateContext<'_>, attrs: &[ast::Attribute], sp: Span, desc: &'static str) {
let has_inline = attrs.iter().any(|a| a.has_name(sym::inline));
if!has_inline {
span_lint(
cx,
MISSING_INLINE_IN_PUBLIC_ITEMS,
sp,
&format!("missing `#[inline]` for {}", desc),
);
}
}
fn is_executable_or_proc_macro(cx: &LateContext<'_>) -> bool {
use rustc_session::config::CrateType;
cx.tcx
.sess
.crate_types()
.iter()
.any(|t: &CrateType| matches!(t, CrateType::Executable | CrateType::ProcMacro))
}
declare_lint_pass!(MissingInline => [MISSING_INLINE_IN_PUBLIC_ITEMS]);
impl<'tcx> LateLintPass<'tcx> for MissingInline {
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx hir::Item<'_>) {
if rustc_middle::lint::in_external_macro(cx.sess(), it.span) || is_executable_or_proc_macro(cx) {
return;
}
if!cx.access_levels.is_exported(it.def_id) {
return;
}
match it.kind {
hir::ItemKind::Fn(..) => {
|
hir::ItemKind::Trait(ref _is_auto, ref _unsafe, ref _generics, _bounds, trait_items) => {
// note: we need to check if the trait is exported so we can't use
// `LateLintPass::check_trait_item` here.
for tit in trait_items {
let tit_ = cx.tcx.hir().trait_item(tit.id);
match tit_.kind {
hir::TraitItemKind::Const(..) | hir::TraitItemKind::Type(..) => {},
hir::TraitItemKind::Fn(..) => {
if tit.defaultness.has_value() {
// trait method with default body needs inline in case
// an impl is not provided
let desc = "a default trait method";
let item = cx.tcx.hir().trait_item(tit.id);
let attrs = cx.tcx.hir().attrs(item.hir_id());
check_missing_inline_attrs(cx, attrs, item.span, desc);
}
},
}
}
},
hir::ItemKind::Const(..)
| hir::ItemKind::Enum(..)
| hir::ItemKind::Macro(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Union(..)
| hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::ExternCrate(..)
| hir::ItemKind::ForeignMod {.. }
| hir::ItemKind::Impl {.. }
| hir::ItemKind::Use(..) => {},
};
}
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx hir::ImplItem<'_>) {
use rustc_middle::ty::{ImplContainer, TraitContainer};
if rustc_middle::lint::in_external_macro(cx.sess(), impl_item.span) || is_executable_or_proc_macro(cx) {
return;
}
// If the item being implemented is not exported, then we don't need #[inline]
if!cx.access_levels.is_exported(impl_item.def_id) {
return;
}
let desc = match impl_item.kind {
hir::ImplItemKind::Fn(..) => "a method",
hir::ImplItemKind::Const(..) | hir::ImplItemKind::TyAlias(_) => return,
};
let trait_def_id = match cx.tcx.associated_item(impl_item.def_id).container {
TraitContainer(cid) => Some(cid),
ImplContainer(cid) => cx.tcx.impl_trait_ref(cid).map(|t| t.def_id),
};
if let Some(trait_def_id) = trait_def_id {
if trait_def_id.is_local() &&!cx.access_levels.is_exported(impl_item.def_id) {
// If a trait is being implemented for an item, and the
// trait is not exported, we don't need #[inline]
return;
}
}
let attrs = cx.tcx.hir().attrs(impl_item.hir_id());
check_missing_inline_attrs(cx, attrs, impl_item.span, desc);
}
}
|
let desc = "a function";
let attrs = cx.tcx.hir().attrs(it.hir_id());
check_missing_inline_attrs(cx, attrs, it.span, desc);
},
|
random_line_split
|
missing_inline.rs
|
use clippy_utils::diagnostics::span_lint;
use rustc_ast::ast;
use rustc_hir as hir;
use rustc_lint::{self, LateContext, LateLintPass, LintContext};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// It lints if an exported function, method, trait method with default impl,
/// or trait method impl is not `#[inline]`.
///
/// ### Why is this bad?
/// In general, it is not. Functions can be inlined across
/// crates when that's profitable as long as any form of LTO is used. When LTO is disabled,
/// functions that are not `#[inline]` cannot be inlined across crates. Certain types of crates
/// might intend for most of the methods in their public API to be able to be inlined across
/// crates even when LTO is disabled. For these types of crates, enabling this lint might make
/// sense. It allows the crate to require all exported methods to be `#[inline]` by default, and
/// then opt out for specific methods where this might not make sense.
///
/// ### Example
/// ```rust
/// pub fn foo() {} // missing #[inline]
/// fn ok() {} // ok
/// #[inline] pub fn bar() {} // ok
/// #[inline(always)] pub fn baz() {} // ok
///
/// pub trait Bar {
/// fn bar(); // ok
/// fn def_bar() {} // missing #[inline]
/// }
///
/// struct Baz;
/// impl Baz {
/// fn private() {} // ok
/// }
///
/// impl Bar for Baz {
/// fn bar() {} // ok - Baz is not exported
/// }
///
/// pub struct PubBaz;
/// impl PubBaz {
/// fn private() {} // ok
/// pub fn not_ptrivate() {} // missing #[inline]
/// }
///
/// impl Bar for PubBaz {
/// fn bar() {} // missing #[inline]
/// fn def_bar() {} // missing #[inline]
/// }
/// ```
pub MISSING_INLINE_IN_PUBLIC_ITEMS,
restriction,
"detects missing `#[inline]` attribute for public callables (functions, trait methods, methods...)"
}
fn check_missing_inline_attrs(cx: &LateContext<'_>, attrs: &[ast::Attribute], sp: Span, desc: &'static str) {
let has_inline = attrs.iter().any(|a| a.has_name(sym::inline));
if!has_inline {
span_lint(
cx,
MISSING_INLINE_IN_PUBLIC_ITEMS,
sp,
&format!("missing `#[inline]` for {}", desc),
);
}
}
fn is_executable_or_proc_macro(cx: &LateContext<'_>) -> bool {
use rustc_session::config::CrateType;
cx.tcx
.sess
.crate_types()
.iter()
.any(|t: &CrateType| matches!(t, CrateType::Executable | CrateType::ProcMacro))
}
declare_lint_pass!(MissingInline => [MISSING_INLINE_IN_PUBLIC_ITEMS]);
impl<'tcx> LateLintPass<'tcx> for MissingInline {
fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx hir::Item<'_>)
|
hir::TraitItemKind::Const(..) | hir::TraitItemKind::Type(..) => {},
hir::TraitItemKind::Fn(..) => {
if tit.defaultness.has_value() {
// trait method with default body needs inline in case
// an impl is not provided
let desc = "a default trait method";
let item = cx.tcx.hir().trait_item(tit.id);
let attrs = cx.tcx.hir().attrs(item.hir_id());
check_missing_inline_attrs(cx, attrs, item.span, desc);
}
},
}
}
},
hir::ItemKind::Const(..)
| hir::ItemKind::Enum(..)
| hir::ItemKind::Macro(..)
| hir::ItemKind::Mod(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Struct(..)
| hir::ItemKind::TraitAlias(..)
| hir::ItemKind::GlobalAsm(..)
| hir::ItemKind::TyAlias(..)
| hir::ItemKind::Union(..)
| hir::ItemKind::OpaqueTy(..)
| hir::ItemKind::ExternCrate(..)
| hir::ItemKind::ForeignMod {.. }
| hir::ItemKind::Impl {.. }
| hir::ItemKind::Use(..) => {},
};
}
fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx hir::ImplItem<'_>) {
use rustc_middle::ty::{ImplContainer, TraitContainer};
if rustc_middle::lint::in_external_macro(cx.sess(), impl_item.span) || is_executable_or_proc_macro(cx) {
return;
}
// If the item being implemented is not exported, then we don't need #[inline]
if!cx.access_levels.is_exported(impl_item.def_id) {
return;
}
let desc = match impl_item.kind {
hir::ImplItemKind::Fn(..) => "a method",
hir::ImplItemKind::Const(..) | hir::ImplItemKind::TyAlias(_) => return,
};
let trait_def_id = match cx.tcx.associated_item(impl_item.def_id).container {
TraitContainer(cid) => Some(cid),
ImplContainer(cid) => cx.tcx.impl_trait_ref(cid).map(|t| t.def_id),
};
if let Some(trait_def_id) = trait_def_id {
if trait_def_id.is_local() &&!cx.access_levels.is_exported(impl_item.def_id) {
// If a trait is being implemented for an item, and the
// trait is not exported, we don't need #[inline]
return;
}
}
let attrs = cx.tcx.hir().attrs(impl_item.hir_id());
check_missing_inline_attrs(cx, attrs, impl_item.span, desc);
}
}
|
{
if rustc_middle::lint::in_external_macro(cx.sess(), it.span) || is_executable_or_proc_macro(cx) {
return;
}
if !cx.access_levels.is_exported(it.def_id) {
return;
}
match it.kind {
hir::ItemKind::Fn(..) => {
let desc = "a function";
let attrs = cx.tcx.hir().attrs(it.hir_id());
check_missing_inline_attrs(cx, attrs, it.span, desc);
},
hir::ItemKind::Trait(ref _is_auto, ref _unsafe, ref _generics, _bounds, trait_items) => {
// note: we need to check if the trait is exported so we can't use
// `LateLintPass::check_trait_item` here.
for tit in trait_items {
let tit_ = cx.tcx.hir().trait_item(tit.id);
match tit_.kind {
|
identifier_body
|
entry.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use error::Result;
use error::GPSErrorKind as GPSEK;
use error::ResultExt;
use types::*;
use libimagstore::store::Entry;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use toml_query::delete::TomlValueDeleteExt;
pub trait GPSEntry {
fn set_coordinates(&mut self, c: Coordinates) -> Result<()>;
fn get_coordinates(&self) -> Result<Option<Coordinates>>;
/// Remove the coordinates from the entry
///
/// # Returns
///
/// The return type is a bit complicated, but that has a reason:
///
/// The outer Result<_> is used for notifying a failure during the header read/write action.
/// If the Option<_> is Some(_), the value was deleted.
/// The inner Result<_> is used for parsing failures during the parsing of the deleted value.
///
/// So:
///
/// * Ok(Some(Ok(_))) if the coordinates were deleted, returning the deleted value
/// * Ok(Some(Err(_))) if the coordinates were deleted, but the deleted value couldn't be parsed
/// * Ok(None) if there were no coordinates to delete
/// * Err(e) if the deleting failed
///
fn remove_coordinates(&mut self) -> Result<Option<Result<Coordinates>>>;
}
impl GPSEntry for Entry {
fn
|
(&mut self, c: Coordinates) -> Result<()> {
self.get_header_mut()
.insert("gps.coordinates", c.into())
.map(|_| ())
.chain_err(|| GPSEK::HeaderWriteError)
}
fn get_coordinates(&self) -> Result<Option<Coordinates>> {
match self.get_header().read("gps.coordinates").chain_err(|| GPSEK::HeaderWriteError)? {
Some(hdr) => Coordinates::from_value(hdr).map(Some),
None => Ok(None),
}
}
fn remove_coordinates(&mut self) -> Result<Option<Result<Coordinates>>> {
let coordinates = self.get_coordinates();
let patterns = [
"gps.coordinates.latitude.degree",
"gps.coordinates.latitude.minutes",
"gps.coordinates.latitude.seconds",
"gps.coordinates.longitude.degree",
"gps.coordinates.longitude.minutes",
"gps.coordinates.longitude.seconds",
"gps.coordinates.latitude",
"gps.coordinates.longitude",
"gps.coordinates",
"gps",
];
let hdr = self.get_header_mut();
for pattern in patterns.iter() {
let _ = hdr.delete(pattern).chain_err(|| GPSEK::HeaderWriteError)?;
}
match coordinates {
Ok(None) => Ok(None),
Ok(Some(some)) => Ok(Some(Ok(some))),
Err(e) => Ok(Some(Err(e))),
}
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use libimagstore::store::Store;
use entry::*;
fn setup_logging() {
use env_logger;
let _ = env_logger::init().unwrap_or(());
}
fn get_store() -> Store {
use libimagstore::file_abstraction::InMemoryFileAbstraction;
let backend = Box::new(InMemoryFileAbstraction::new());
Store::new_with_backend(PathBuf::from("/"), &None, backend).unwrap()
}
#[test]
fn test_set_gps() {
setup_logging();
let store = get_store();
let mut entry = store.create(PathBuf::from("test_set_gps")).unwrap();
let coordinates = Coordinates {
latitude: GPSValue::new(0, 0, 0),
longitude: GPSValue::new(0, 0, 0),
};
let res = entry.set_coordinates(coordinates);
assert!(res.is_ok());
}
#[test]
fn test_setget_gps() {
setup_logging();
let store = get_store();
let mut entry = store.create(PathBuf::from("test_setget_gps")).unwrap();
let coordinates = Coordinates {
latitude: GPSValue::new(0, 0, 0),
longitude: GPSValue::new(0, 0, 0),
};
let res = entry.set_coordinates(coordinates);
assert!(res.is_ok());
let coordinates = entry.get_coordinates();
assert!(coordinates.is_ok());
let coordinates = coordinates.unwrap();
assert!(coordinates.is_some());
let coordinates = coordinates.unwrap();
assert_eq!(0, coordinates.longitude.degree);
assert_eq!(0, coordinates.longitude.minutes);
assert_eq!(0, coordinates.longitude.seconds);
assert_eq!(0, coordinates.latitude.degree);
assert_eq!(0, coordinates.latitude.minutes);
assert_eq!(0, coordinates.latitude.seconds);
}
}
|
set_coordinates
|
identifier_name
|
entry.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use error::Result;
use error::GPSErrorKind as GPSEK;
use error::ResultExt;
use types::*;
use libimagstore::store::Entry;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use toml_query::delete::TomlValueDeleteExt;
pub trait GPSEntry {
fn set_coordinates(&mut self, c: Coordinates) -> Result<()>;
fn get_coordinates(&self) -> Result<Option<Coordinates>>;
/// Remove the coordinates from the entry
///
/// # Returns
///
/// The return type is a bit complicated, but that has a reason:
///
/// The outer Result<_> is used for notifying a failure during the header read/write action.
/// If the Option<_> is Some(_), the value was deleted.
/// The inner Result<_> is used for parsing failures during the parsing of the deleted value.
///
/// So:
///
/// * Ok(Some(Ok(_))) if the coordinates were deleted, returning the deleted value
/// * Ok(Some(Err(_))) if the coordinates were deleted, but the deleted value couldn't be parsed
/// * Ok(None) if there were no coordinates to delete
/// * Err(e) if the deleting failed
///
fn remove_coordinates(&mut self) -> Result<Option<Result<Coordinates>>>;
}
impl GPSEntry for Entry {
fn set_coordinates(&mut self, c: Coordinates) -> Result<()> {
self.get_header_mut()
.insert("gps.coordinates", c.into())
.map(|_| ())
.chain_err(|| GPSEK::HeaderWriteError)
}
fn get_coordinates(&self) -> Result<Option<Coordinates>> {
match self.get_header().read("gps.coordinates").chain_err(|| GPSEK::HeaderWriteError)? {
Some(hdr) => Coordinates::from_value(hdr).map(Some),
None => Ok(None),
}
}
fn remove_coordinates(&mut self) -> Result<Option<Result<Coordinates>>> {
let coordinates = self.get_coordinates();
let patterns = [
"gps.coordinates.latitude.degree",
"gps.coordinates.latitude.minutes",
"gps.coordinates.latitude.seconds",
"gps.coordinates.longitude.degree",
"gps.coordinates.longitude.minutes",
"gps.coordinates.longitude.seconds",
"gps.coordinates.latitude",
"gps.coordinates.longitude",
"gps.coordinates",
"gps",
];
let hdr = self.get_header_mut();
for pattern in patterns.iter() {
let _ = hdr.delete(pattern).chain_err(|| GPSEK::HeaderWriteError)?;
}
match coordinates {
Ok(None) => Ok(None),
Ok(Some(some)) => Ok(Some(Ok(some))),
Err(e) => Ok(Some(Err(e))),
}
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use libimagstore::store::Store;
use entry::*;
fn setup_logging()
|
fn get_store() -> Store {
use libimagstore::file_abstraction::InMemoryFileAbstraction;
let backend = Box::new(InMemoryFileAbstraction::new());
Store::new_with_backend(PathBuf::from("/"), &None, backend).unwrap()
}
#[test]
fn test_set_gps() {
setup_logging();
let store = get_store();
let mut entry = store.create(PathBuf::from("test_set_gps")).unwrap();
let coordinates = Coordinates {
latitude: GPSValue::new(0, 0, 0),
longitude: GPSValue::new(0, 0, 0),
};
let res = entry.set_coordinates(coordinates);
assert!(res.is_ok());
}
#[test]
fn test_setget_gps() {
setup_logging();
let store = get_store();
let mut entry = store.create(PathBuf::from("test_setget_gps")).unwrap();
let coordinates = Coordinates {
latitude: GPSValue::new(0, 0, 0),
longitude: GPSValue::new(0, 0, 0),
};
let res = entry.set_coordinates(coordinates);
assert!(res.is_ok());
let coordinates = entry.get_coordinates();
assert!(coordinates.is_ok());
let coordinates = coordinates.unwrap();
assert!(coordinates.is_some());
let coordinates = coordinates.unwrap();
assert_eq!(0, coordinates.longitude.degree);
assert_eq!(0, coordinates.longitude.minutes);
assert_eq!(0, coordinates.longitude.seconds);
assert_eq!(0, coordinates.latitude.degree);
assert_eq!(0, coordinates.latitude.minutes);
assert_eq!(0, coordinates.latitude.seconds);
}
}
|
{
use env_logger;
let _ = env_logger::init().unwrap_or(());
}
|
identifier_body
|
entry.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use error::Result;
use error::GPSErrorKind as GPSEK;
use error::ResultExt;
use types::*;
use libimagstore::store::Entry;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use toml_query::delete::TomlValueDeleteExt;
pub trait GPSEntry {
fn set_coordinates(&mut self, c: Coordinates) -> Result<()>;
fn get_coordinates(&self) -> Result<Option<Coordinates>>;
/// Remove the coordinates from the entry
///
/// # Returns
///
/// The return type is a bit complicated, but that has a reason:
///
/// The outer Result<_> is used for notifying a failure during the header read/write action.
/// If the Option<_> is Some(_), the value was deleted.
/// The inner Result<_> is used for parsing failures during the parsing of the deleted value.
///
/// So:
///
/// * Ok(Some(Ok(_))) if the coordinates were deleted, returning the deleted value
/// * Ok(Some(Err(_))) if the coordinates were deleted, but the deleted value couldn't be parsed
/// * Ok(None) if there were no coordinates to delete
/// * Err(e) if the deleting failed
///
fn remove_coordinates(&mut self) -> Result<Option<Result<Coordinates>>>;
}
impl GPSEntry for Entry {
fn set_coordinates(&mut self, c: Coordinates) -> Result<()> {
self.get_header_mut()
.insert("gps.coordinates", c.into())
.map(|_| ())
.chain_err(|| GPSEK::HeaderWriteError)
}
fn get_coordinates(&self) -> Result<Option<Coordinates>> {
match self.get_header().read("gps.coordinates").chain_err(|| GPSEK::HeaderWriteError)? {
Some(hdr) => Coordinates::from_value(hdr).map(Some),
None => Ok(None),
}
}
fn remove_coordinates(&mut self) -> Result<Option<Result<Coordinates>>> {
let coordinates = self.get_coordinates();
let patterns = [
"gps.coordinates.latitude.degree",
"gps.coordinates.latitude.minutes",
"gps.coordinates.latitude.seconds",
"gps.coordinates.longitude.degree",
"gps.coordinates.longitude.minutes",
"gps.coordinates.longitude.seconds",
"gps.coordinates.latitude",
"gps.coordinates.longitude",
"gps.coordinates",
"gps",
];
let hdr = self.get_header_mut();
for pattern in patterns.iter() {
let _ = hdr.delete(pattern).chain_err(|| GPSEK::HeaderWriteError)?;
}
match coordinates {
Ok(None) => Ok(None),
Ok(Some(some)) => Ok(Some(Ok(some))),
Err(e) => Ok(Some(Err(e))),
}
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use libimagstore::store::Store;
use entry::*;
fn setup_logging() {
use env_logger;
let _ = env_logger::init().unwrap_or(());
}
fn get_store() -> Store {
use libimagstore::file_abstraction::InMemoryFileAbstraction;
let backend = Box::new(InMemoryFileAbstraction::new());
Store::new_with_backend(PathBuf::from("/"), &None, backend).unwrap()
}
#[test]
fn test_set_gps() {
setup_logging();
let store = get_store();
let mut entry = store.create(PathBuf::from("test_set_gps")).unwrap();
let coordinates = Coordinates {
latitude: GPSValue::new(0, 0, 0),
longitude: GPSValue::new(0, 0, 0),
};
let res = entry.set_coordinates(coordinates);
assert!(res.is_ok());
}
#[test]
fn test_setget_gps() {
setup_logging();
|
let store = get_store();
let mut entry = store.create(PathBuf::from("test_setget_gps")).unwrap();
let coordinates = Coordinates {
latitude: GPSValue::new(0, 0, 0),
longitude: GPSValue::new(0, 0, 0),
};
let res = entry.set_coordinates(coordinates);
assert!(res.is_ok());
let coordinates = entry.get_coordinates();
assert!(coordinates.is_ok());
let coordinates = coordinates.unwrap();
assert!(coordinates.is_some());
let coordinates = coordinates.unwrap();
assert_eq!(0, coordinates.longitude.degree);
assert_eq!(0, coordinates.longitude.minutes);
assert_eq!(0, coordinates.longitude.seconds);
assert_eq!(0, coordinates.latitude.degree);
assert_eq!(0, coordinates.latitude.minutes);
assert_eq!(0, coordinates.latitude.seconds);
}
}
|
random_line_split
|
|
tvec.rs
|
option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
use back::abi;
use llvm;
use llvm::ValueRef;
use trans::base::*;
use trans::base;
use trans::build::*;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::consts;
use trans::datum::*;
use trans::debuginfo::DebugLoc;
use trans::expr::{Dest, Ignore, SaveIn};
use trans::expr;
use trans::machine::llsize_of_alloc;
use trans::type_::Type;
use trans::type_of;
use middle::ty::{self, Ty};
use util::ppaux::ty_to_string;
use syntax::ast;
use syntax::parse::token::InternedString;
#[derive(Copy, Clone)]
struct VecTypes<'tcx> {
unit_ty: Ty<'tcx>,
llunit_ty: Type
}
impl<'tcx> VecTypes<'tcx> {
pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
format!("VecTypes {{unit_ty={}, llunit_ty={}}}",
ty_to_string(ccx.tcx(), self.unit_ty),
ccx.tn().type_to_string(self.llunit_ty))
}
}
pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
dest: expr::Dest)
-> Block<'blk, 'tcx> {
//!
//
// [...] allocates a fixed-size array and moves it around "by value".
// In this case, it means that the caller has already given us a location
// to store the array of the suitable size, so all we have to do is
// generate the content.
debug!("trans_fixed_vstore(expr={}, dest={})",
bcx.expr_to_string(expr), dest.to_string(bcx.ccx()));
let vt = vec_types_from_expr(bcx, expr);
return match dest {
Ignore => write_content(bcx, &vt, expr, expr, dest),
SaveIn(lldest) => {
// lldest will have type *[T x N], but we want the type *T,
// so use GEP to convert:
let lldest = GEPi(bcx, lldest, &[0, 0]);
write_content(bcx, &vt, expr, expr, SaveIn(lldest))
}
};
}
/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
/// caller must make the reference). "..." is similar except that the memory can be statically
/// allocated and we return a reference (strings are always by-ref).
pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
slice_expr: &ast::Expr,
content_expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let mut bcx = bcx;
debug!("trans_slice_vec(slice_expr={})",
bcx.expr_to_string(slice_expr));
let vec_ty = node_id_type(bcx, slice_expr.id);
// Handle the "..." case (returns a slice since strings are always unsized):
if let ast::ExprLit(ref lit) = content_expr.node {
if let ast::LitStr(ref s, _) = lit.node {
let scratch = rvalue_scratch_datum(bcx, vec_ty, "");
bcx = trans_lit_str(bcx,
content_expr,
s.clone(),
SaveIn(scratch.val));
return DatumBlock::new(bcx, scratch.to_expr_datum());
}
}
// Handle the &[...] case:
let vt = vec_types_from_expr(bcx, content_expr);
let count = elements_required(bcx, content_expr);
debug!(" vt={}, count={}", vt.to_string(ccx), count);
let fixed_ty = ty::mk_vec(bcx.tcx(),
vt.unit_ty,
Some(count));
let llfixed_ty = type_of::type_of(bcx.ccx(), fixed_ty);
// Always create an alloca even if zero-sized, to preserve
// the non-null invariant of the inner slice ptr
let llfixed = base::alloca(bcx, llfixed_ty, "");
if count > 0 {
// Arrange for the backing array to be cleaned up.
let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
fcx.schedule_lifetime_end(cleanup_scope, llfixed);
fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty);
// Generate the content into the backing array.
// llfixed has type *[T x N], but we want the type *T,
// so use GEP to convert
bcx = write_content(bcx, &vt, slice_expr, content_expr,
SaveIn(GEPi(bcx, llfixed, &[0, 0])));
};
immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
}
/// Literal strings translate to slices into static memory. This is different from
/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lit_expr: &ast::Expr,
str_lit: InternedString,
dest: Dest)
-> Block<'blk, 'tcx> {
debug!("trans_lit_str(lit_expr={}, dest={})",
bcx.expr_to_string(lit_expr),
dest.to_string(bcx.ccx()));
match dest {
Ignore => bcx,
SaveIn(lldest) => {
let bytes = str_lit.len();
let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), str_lit, false);
let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx()));
Store(bcx, llcstr, GEPi(bcx, lldest, &[0, abi::FAT_PTR_ADDR]));
Store(bcx, llbytes, GEPi(bcx, lldest, &[0, abi::FAT_PTR_EXTRA]));
bcx
}
}
}
fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
vt: &VecTypes<'tcx>,
vstore_expr: &ast::Expr,
content_expr: &ast::Expr,
dest: Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::write_content");
let fcx = bcx.fcx;
let mut bcx = bcx;
debug!("write_content(vt={}, dest={}, vstore_expr={})",
vt.to_string(bcx.ccx()),
dest.to_string(bcx.ccx()),
bcx.expr_to_string(vstore_expr));
match content_expr.node {
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(ref s, _) => {
match dest {
Ignore => return bcx,
SaveIn(lldest) => {
let bytes = s.len();
let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
base::call_memcpy(bcx,
lldest,
llcstr,
llbytes,
1);
return bcx;
}
}
}
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected evec content");
}
}
}
ast::ExprVec(ref elements) => {
match dest {
Ignore => {
for element in elements {
bcx = expr::trans_into(bcx, &**element, Ignore);
}
}
SaveIn(lldest) => {
let temp_scope = fcx.push_custom_cleanup_scope();
for (i, element) in elements.iter().enumerate() {
let lleltptr = GEPi(bcx, lldest, &[i]);
debug!("writing index {} with lleltptr={}",
i, bcx.val_to_string(lleltptr));
bcx = expr::trans_into(bcx, &**element,
SaveIn(lleltptr));
let scope = cleanup::CustomScope(temp_scope);
fcx.schedule_lifetime_end(scope, lleltptr);
fcx.schedule_drop_mem(scope, lleltptr, vt.unit_ty);
}
fcx.pop_custom_cleanup_scope(temp_scope);
}
}
return bcx;
}
ast::ExprRepeat(ref element, ref count_expr) => {
match dest {
Ignore => {
return expr::trans_into(bcx, &**element, Ignore);
}
SaveIn(lldest) => {
match ty::eval_repeat_count(bcx.tcx(), &**count_expr) {
0 => expr::trans_into(bcx, &**element, Ignore),
1 => expr::trans_into(bcx, &**element, SaveIn(lldest)),
count => {
let elem = unpack_datum!(bcx, expr::trans(bcx, &**element));
let bcx = iter_vec_loop(bcx, lldest, vt,
C_uint(bcx.ccx(), count),
|set_bcx, lleltptr, _| {
elem.shallow_copy(set_bcx, lleltptr)
});
bcx
}
}
}
}
}
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected vec content");
}
}
}
fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &ast::Expr)
-> VecTypes<'tcx> {
let vec_ty = node_id_type(bcx, vec_expr.id);
vec_types(bcx, ty::sequence_element_type(bcx.tcx(), vec_ty))
}
fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
-> VecTypes<'tcx> {
VecTypes {
unit_ty: unit_ty,
llunit_ty: type_of::type_of(bcx.ccx(), unit_ty)
}
}
fn elements_required(bcx: Block, content_expr: &ast::Expr) -> usize {
//! Figure out the number of elements we need to store this content
match content_expr.node {
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(ref s, _) => s.len(),
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected evec content")
}
}
},
ast::ExprVec(ref es) => es.len(),
ast::ExprRepeat(_, ref count_expr) => {
ty::eval_repeat_count(bcx.tcx(), &**count_expr)
}
_ => bcx.tcx().sess.span_bug(content_expr.span,
"unexpected vec content")
}
}
/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
/// which should be by ref.
pub fn
|
(bcx: Block,
llval: ValueRef,
vec_length: usize)
-> (ValueRef, ValueRef) {
let ccx = bcx.ccx();
let base = expr::get_dataptr(bcx, llval);
let len = C_uint(ccx, vec_length);
(base, len)
}
/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be
/// by-reference. If you have a datum, you would probably prefer to call
/// `Datum::get_base_and_len()` which will handle any conversions for you.
pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llval: ValueRef,
vec_ty: Ty<'tcx>)
-> (ValueRef, ValueRef) {
let ccx = bcx.ccx();
match vec_ty.sty {
ty::ty_vec(_, Some(n)) => get_fixed_base_and_len(bcx, llval, n),
ty::ty_vec(_, None) | ty::ty_str => {
let base = Load(bcx, expr::get_dataptr(bcx, llval));
let len = Load(bcx, expr::get_len(bcx, llval));
(base, len)
}
// Only used for pattern matching.
ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty,..}) => {
let inner = if type_is_sized(bcx.tcx(), ty) {
Load(bcx, llval)
} else {
llval
};
get_base_and_len(bcx, inner, ty)
},
_ => ccx.sess().bug("unexpected type in get_base_and_len"),
}
}
fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
vt: &VecTypes<'tcx>,
count: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
{
let _icx = push_ctxt("tvec::iter_vec_loop");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
let loop_bcx = fcx.new_temp_block("expr_repeat");
let next_bcx = fcx.new_temp_block("expr_repeat: next");
Br(bcx, loop_bcx.llbb, DebugLoc::None);
let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
&[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
let bcx = loop_bcx;
let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
data_ptr
} else {
InBoundsGEP(bcx, data_ptr, &[loop_counter])
};
let bcx = f(bcx, lleltptr, vt.unit_ty);
let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1us), DebugLoc::None);
AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None);
CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
next_bcx
}
pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
{
let _icx = push_ctxt("tvec::iter_vec_raw");
let fcx = bcx.fcx;
let vt = vec_types(bcx, unit_ty);
if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
iter_vec_loop(bcx, data_ptr, &vt, len, f)
} else {
// Calculate the last pointer address we want to handle.
let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
// Now perform the iteration.
let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
Br(bcx, header_bcx.llbb, DebugLoc::None);
let data_ptr =
Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
let not_yet_at_end =
ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
let next_bcx = fcx.new_temp_block("iter_vec_next");
CondBr(
|
get_fixed_base_and_len
|
identifier_name
|
tvec.rs
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
use back::abi;
use llvm;
use llvm::ValueRef;
use trans::base::*;
use trans::base;
use trans::build::*;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::consts;
use trans::datum::*;
use trans::debuginfo::DebugLoc;
use trans::expr::{Dest, Ignore, SaveIn};
use trans::expr;
use trans::machine::llsize_of_alloc;
use trans::type_::Type;
use trans::type_of;
use middle::ty::{self, Ty};
use util::ppaux::ty_to_string;
use syntax::ast;
use syntax::parse::token::InternedString;
#[derive(Copy, Clone)]
struct VecTypes<'tcx> {
unit_ty: Ty<'tcx>,
llunit_ty: Type
}
impl<'tcx> VecTypes<'tcx> {
pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
format!("VecTypes {{unit_ty={}, llunit_ty={}}}",
ty_to_string(ccx.tcx(), self.unit_ty),
ccx.tn().type_to_string(self.llunit_ty))
}
}
pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
dest: expr::Dest)
-> Block<'blk, 'tcx> {
//!
//
// [...] allocates a fixed-size array and moves it around "by value".
// In this case, it means that the caller has already given us a location
// to store the array of the suitable size, so all we have to do is
// generate the content.
debug!("trans_fixed_vstore(expr={}, dest={})",
bcx.expr_to_string(expr), dest.to_string(bcx.ccx()));
let vt = vec_types_from_expr(bcx, expr);
return match dest {
Ignore => write_content(bcx, &vt, expr, expr, dest),
SaveIn(lldest) => {
// lldest will have type *[T x N], but we want the type *T,
// so use GEP to convert:
let lldest = GEPi(bcx, lldest, &[0, 0]);
write_content(bcx, &vt, expr, expr, SaveIn(lldest))
}
};
}
|
slice_expr: &ast::Expr,
content_expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let mut bcx = bcx;
debug!("trans_slice_vec(slice_expr={})",
bcx.expr_to_string(slice_expr));
let vec_ty = node_id_type(bcx, slice_expr.id);
// Handle the "..." case (returns a slice since strings are always unsized):
if let ast::ExprLit(ref lit) = content_expr.node {
if let ast::LitStr(ref s, _) = lit.node {
let scratch = rvalue_scratch_datum(bcx, vec_ty, "");
bcx = trans_lit_str(bcx,
content_expr,
s.clone(),
SaveIn(scratch.val));
return DatumBlock::new(bcx, scratch.to_expr_datum());
}
}
// Handle the &[...] case:
let vt = vec_types_from_expr(bcx, content_expr);
let count = elements_required(bcx, content_expr);
debug!(" vt={}, count={}", vt.to_string(ccx), count);
let fixed_ty = ty::mk_vec(bcx.tcx(),
vt.unit_ty,
Some(count));
let llfixed_ty = type_of::type_of(bcx.ccx(), fixed_ty);
// Always create an alloca even if zero-sized, to preserve
// the non-null invariant of the inner slice ptr
let llfixed = base::alloca(bcx, llfixed_ty, "");
if count > 0 {
// Arrange for the backing array to be cleaned up.
let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
fcx.schedule_lifetime_end(cleanup_scope, llfixed);
fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty);
// Generate the content into the backing array.
// llfixed has type *[T x N], but we want the type *T,
// so use GEP to convert
bcx = write_content(bcx, &vt, slice_expr, content_expr,
SaveIn(GEPi(bcx, llfixed, &[0, 0])));
};
immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
}
/// Literal strings translate to slices into static memory. This is different from
/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lit_expr: &ast::Expr,
str_lit: InternedString,
dest: Dest)
-> Block<'blk, 'tcx> {
debug!("trans_lit_str(lit_expr={}, dest={})",
bcx.expr_to_string(lit_expr),
dest.to_string(bcx.ccx()));
match dest {
Ignore => bcx,
SaveIn(lldest) => {
let bytes = str_lit.len();
let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), str_lit, false);
let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx()));
Store(bcx, llcstr, GEPi(bcx, lldest, &[0, abi::FAT_PTR_ADDR]));
Store(bcx, llbytes, GEPi(bcx, lldest, &[0, abi::FAT_PTR_EXTRA]));
bcx
}
}
}
fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
vt: &VecTypes<'tcx>,
vstore_expr: &ast::Expr,
content_expr: &ast::Expr,
dest: Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::write_content");
let fcx = bcx.fcx;
let mut bcx = bcx;
debug!("write_content(vt={}, dest={}, vstore_expr={})",
vt.to_string(bcx.ccx()),
dest.to_string(bcx.ccx()),
bcx.expr_to_string(vstore_expr));
match content_expr.node {
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(ref s, _) => {
match dest {
Ignore => return bcx,
SaveIn(lldest) => {
let bytes = s.len();
let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
base::call_memcpy(bcx,
lldest,
llcstr,
llbytes,
1);
return bcx;
}
}
}
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected evec content");
}
}
}
ast::ExprVec(ref elements) => {
match dest {
Ignore => {
for element in elements {
bcx = expr::trans_into(bcx, &**element, Ignore);
}
}
SaveIn(lldest) => {
let temp_scope = fcx.push_custom_cleanup_scope();
for (i, element) in elements.iter().enumerate() {
let lleltptr = GEPi(bcx, lldest, &[i]);
debug!("writing index {} with lleltptr={}",
i, bcx.val_to_string(lleltptr));
bcx = expr::trans_into(bcx, &**element,
SaveIn(lleltptr));
let scope = cleanup::CustomScope(temp_scope);
fcx.schedule_lifetime_end(scope, lleltptr);
fcx.schedule_drop_mem(scope, lleltptr, vt.unit_ty);
}
fcx.pop_custom_cleanup_scope(temp_scope);
}
}
return bcx;
}
ast::ExprRepeat(ref element, ref count_expr) => {
match dest {
Ignore => {
return expr::trans_into(bcx, &**element, Ignore);
}
SaveIn(lldest) => {
match ty::eval_repeat_count(bcx.tcx(), &**count_expr) {
0 => expr::trans_into(bcx, &**element, Ignore),
1 => expr::trans_into(bcx, &**element, SaveIn(lldest)),
count => {
let elem = unpack_datum!(bcx, expr::trans(bcx, &**element));
let bcx = iter_vec_loop(bcx, lldest, vt,
C_uint(bcx.ccx(), count),
|set_bcx, lleltptr, _| {
elem.shallow_copy(set_bcx, lleltptr)
});
bcx
}
}
}
}
}
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected vec content");
}
}
}
fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &ast::Expr)
-> VecTypes<'tcx> {
let vec_ty = node_id_type(bcx, vec_expr.id);
vec_types(bcx, ty::sequence_element_type(bcx.tcx(), vec_ty))
}
fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
-> VecTypes<'tcx> {
VecTypes {
unit_ty: unit_ty,
llunit_ty: type_of::type_of(bcx.ccx(), unit_ty)
}
}
fn elements_required(bcx: Block, content_expr: &ast::Expr) -> usize {
//! Figure out the number of elements we need to store this content
match content_expr.node {
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(ref s, _) => s.len(),
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected evec content")
}
}
},
ast::ExprVec(ref es) => es.len(),
ast::ExprRepeat(_, ref count_expr) => {
ty::eval_repeat_count(bcx.tcx(), &**count_expr)
}
_ => bcx.tcx().sess.span_bug(content_expr.span,
"unexpected vec content")
}
}
/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
/// which should be by ref.
pub fn get_fixed_base_and_len(bcx: Block,
llval: ValueRef,
vec_length: usize)
-> (ValueRef, ValueRef) {
let ccx = bcx.ccx();
let base = expr::get_dataptr(bcx, llval);
let len = C_uint(ccx, vec_length);
(base, len)
}
/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be
/// by-reference. If you have a datum, you would probably prefer to call
/// `Datum::get_base_and_len()` which will handle any conversions for you.
pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llval: ValueRef,
vec_ty: Ty<'tcx>)
-> (ValueRef, ValueRef) {
let ccx = bcx.ccx();
match vec_ty.sty {
ty::ty_vec(_, Some(n)) => get_fixed_base_and_len(bcx, llval, n),
ty::ty_vec(_, None) | ty::ty_str => {
let base = Load(bcx, expr::get_dataptr(bcx, llval));
let len = Load(bcx, expr::get_len(bcx, llval));
(base, len)
}
// Only used for pattern matching.
ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty,..}) => {
let inner = if type_is_sized(bcx.tcx(), ty) {
Load(bcx, llval)
} else {
llval
};
get_base_and_len(bcx, inner, ty)
},
_ => ccx.sess().bug("unexpected type in get_base_and_len"),
}
}
fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
vt: &VecTypes<'tcx>,
count: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
{
let _icx = push_ctxt("tvec::iter_vec_loop");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
let loop_bcx = fcx.new_temp_block("expr_repeat");
let next_bcx = fcx.new_temp_block("expr_repeat: next");
Br(bcx, loop_bcx.llbb, DebugLoc::None);
let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
&[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
let bcx = loop_bcx;
let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
data_ptr
} else {
InBoundsGEP(bcx, data_ptr, &[loop_counter])
};
let bcx = f(bcx, lleltptr, vt.unit_ty);
let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1us), DebugLoc::None);
AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None);
CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
next_bcx
}
pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
{
let _icx = push_ctxt("tvec::iter_vec_raw");
let fcx = bcx.fcx;
let vt = vec_types(bcx, unit_ty);
if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
iter_vec_loop(bcx, data_ptr, &vt, len, f)
} else {
// Calculate the last pointer address we want to handle.
let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
// Now perform the iteration.
let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
Br(bcx, header_bcx.llbb, DebugLoc::None);
let data_ptr =
Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
let not_yet_at_end =
ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
let next_bcx = fcx.new_temp_block("iter_vec_next");
CondBr(header_
|
/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
/// caller must make the reference). "..." is similar except that the memory can be statically
/// allocated and we return a reference (strings are always by-ref).
pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
random_line_split
|
tvec.rs
|
option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
use back::abi;
use llvm;
use llvm::ValueRef;
use trans::base::*;
use trans::base;
use trans::build::*;
use trans::cleanup;
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::consts;
use trans::datum::*;
use trans::debuginfo::DebugLoc;
use trans::expr::{Dest, Ignore, SaveIn};
use trans::expr;
use trans::machine::llsize_of_alloc;
use trans::type_::Type;
use trans::type_of;
use middle::ty::{self, Ty};
use util::ppaux::ty_to_string;
use syntax::ast;
use syntax::parse::token::InternedString;
#[derive(Copy, Clone)]
struct VecTypes<'tcx> {
unit_ty: Ty<'tcx>,
llunit_ty: Type
}
impl<'tcx> VecTypes<'tcx> {
pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
format!("VecTypes {{unit_ty={}, llunit_ty={}}}",
ty_to_string(ccx.tcx(), self.unit_ty),
ccx.tn().type_to_string(self.llunit_ty))
}
}
pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &ast::Expr,
dest: expr::Dest)
-> Block<'blk, 'tcx>
|
}
};
}
/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
/// caller must make the reference). "..." is similar except that the memory can be statically
/// allocated and we return a reference (strings are always by-ref).
pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
slice_expr: &ast::Expr,
content_expr: &ast::Expr)
-> DatumBlock<'blk, 'tcx, Expr> {
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let mut bcx = bcx;
debug!("trans_slice_vec(slice_expr={})",
bcx.expr_to_string(slice_expr));
let vec_ty = node_id_type(bcx, slice_expr.id);
// Handle the "..." case (returns a slice since strings are always unsized):
if let ast::ExprLit(ref lit) = content_expr.node {
if let ast::LitStr(ref s, _) = lit.node {
let scratch = rvalue_scratch_datum(bcx, vec_ty, "");
bcx = trans_lit_str(bcx,
content_expr,
s.clone(),
SaveIn(scratch.val));
return DatumBlock::new(bcx, scratch.to_expr_datum());
}
}
// Handle the &[...] case:
let vt = vec_types_from_expr(bcx, content_expr);
let count = elements_required(bcx, content_expr);
debug!(" vt={}, count={}", vt.to_string(ccx), count);
let fixed_ty = ty::mk_vec(bcx.tcx(),
vt.unit_ty,
Some(count));
let llfixed_ty = type_of::type_of(bcx.ccx(), fixed_ty);
// Always create an alloca even if zero-sized, to preserve
// the non-null invariant of the inner slice ptr
let llfixed = base::alloca(bcx, llfixed_ty, "");
if count > 0 {
// Arrange for the backing array to be cleaned up.
let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
fcx.schedule_lifetime_end(cleanup_scope, llfixed);
fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty);
// Generate the content into the backing array.
// llfixed has type *[T x N], but we want the type *T,
// so use GEP to convert
bcx = write_content(bcx, &vt, slice_expr, content_expr,
SaveIn(GEPi(bcx, llfixed, &[0, 0])));
};
immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
}
/// Literal strings translate to slices into static memory. This is different from
/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lit_expr: &ast::Expr,
str_lit: InternedString,
dest: Dest)
-> Block<'blk, 'tcx> {
debug!("trans_lit_str(lit_expr={}, dest={})",
bcx.expr_to_string(lit_expr),
dest.to_string(bcx.ccx()));
match dest {
Ignore => bcx,
SaveIn(lldest) => {
let bytes = str_lit.len();
let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), str_lit, false);
let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx()));
Store(bcx, llcstr, GEPi(bcx, lldest, &[0, abi::FAT_PTR_ADDR]));
Store(bcx, llbytes, GEPi(bcx, lldest, &[0, abi::FAT_PTR_EXTRA]));
bcx
}
}
}
fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
vt: &VecTypes<'tcx>,
vstore_expr: &ast::Expr,
content_expr: &ast::Expr,
dest: Dest)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::write_content");
let fcx = bcx.fcx;
let mut bcx = bcx;
debug!("write_content(vt={}, dest={}, vstore_expr={})",
vt.to_string(bcx.ccx()),
dest.to_string(bcx.ccx()),
bcx.expr_to_string(vstore_expr));
match content_expr.node {
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(ref s, _) => {
match dest {
Ignore => return bcx,
SaveIn(lldest) => {
let bytes = s.len();
let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
base::call_memcpy(bcx,
lldest,
llcstr,
llbytes,
1);
return bcx;
}
}
}
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected evec content");
}
}
}
ast::ExprVec(ref elements) => {
match dest {
Ignore => {
for element in elements {
bcx = expr::trans_into(bcx, &**element, Ignore);
}
}
SaveIn(lldest) => {
let temp_scope = fcx.push_custom_cleanup_scope();
for (i, element) in elements.iter().enumerate() {
let lleltptr = GEPi(bcx, lldest, &[i]);
debug!("writing index {} with lleltptr={}",
i, bcx.val_to_string(lleltptr));
bcx = expr::trans_into(bcx, &**element,
SaveIn(lleltptr));
let scope = cleanup::CustomScope(temp_scope);
fcx.schedule_lifetime_end(scope, lleltptr);
fcx.schedule_drop_mem(scope, lleltptr, vt.unit_ty);
}
fcx.pop_custom_cleanup_scope(temp_scope);
}
}
return bcx;
}
ast::ExprRepeat(ref element, ref count_expr) => {
match dest {
Ignore => {
return expr::trans_into(bcx, &**element, Ignore);
}
SaveIn(lldest) => {
match ty::eval_repeat_count(bcx.tcx(), &**count_expr) {
0 => expr::trans_into(bcx, &**element, Ignore),
1 => expr::trans_into(bcx, &**element, SaveIn(lldest)),
count => {
let elem = unpack_datum!(bcx, expr::trans(bcx, &**element));
let bcx = iter_vec_loop(bcx, lldest, vt,
C_uint(bcx.ccx(), count),
|set_bcx, lleltptr, _| {
elem.shallow_copy(set_bcx, lleltptr)
});
bcx
}
}
}
}
}
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected vec content");
}
}
}
fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &ast::Expr)
-> VecTypes<'tcx> {
let vec_ty = node_id_type(bcx, vec_expr.id);
vec_types(bcx, ty::sequence_element_type(bcx.tcx(), vec_ty))
}
fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
-> VecTypes<'tcx> {
VecTypes {
unit_ty: unit_ty,
llunit_ty: type_of::type_of(bcx.ccx(), unit_ty)
}
}
fn elements_required(bcx: Block, content_expr: &ast::Expr) -> usize {
//! Figure out the number of elements we need to store this content
match content_expr.node {
ast::ExprLit(ref lit) => {
match lit.node {
ast::LitStr(ref s, _) => s.len(),
_ => {
bcx.tcx().sess.span_bug(content_expr.span,
"unexpected evec content")
}
}
},
ast::ExprVec(ref es) => es.len(),
ast::ExprRepeat(_, ref count_expr) => {
ty::eval_repeat_count(bcx.tcx(), &**count_expr)
}
_ => bcx.tcx().sess.span_bug(content_expr.span,
"unexpected vec content")
}
}
/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
/// which should be by ref.
pub fn get_fixed_base_and_len(bcx: Block,
llval: ValueRef,
vec_length: usize)
-> (ValueRef, ValueRef) {
let ccx = bcx.ccx();
let base = expr::get_dataptr(bcx, llval);
let len = C_uint(ccx, vec_length);
(base, len)
}
/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be
/// by-reference. If you have a datum, you would probably prefer to call
/// `Datum::get_base_and_len()` which will handle any conversions for you.
pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llval: ValueRef,
vec_ty: Ty<'tcx>)
-> (ValueRef, ValueRef) {
let ccx = bcx.ccx();
match vec_ty.sty {
ty::ty_vec(_, Some(n)) => get_fixed_base_and_len(bcx, llval, n),
ty::ty_vec(_, None) | ty::ty_str => {
let base = Load(bcx, expr::get_dataptr(bcx, llval));
let len = Load(bcx, expr::get_len(bcx, llval));
(base, len)
}
// Only used for pattern matching.
ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty,..}) => {
let inner = if type_is_sized(bcx.tcx(), ty) {
Load(bcx, llval)
} else {
llval
};
get_base_and_len(bcx, inner, ty)
},
_ => ccx.sess().bug("unexpected type in get_base_and_len"),
}
}
fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
vt: &VecTypes<'tcx>,
count: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
{
let _icx = push_ctxt("tvec::iter_vec_loop");
if bcx.unreachable.get() {
return bcx;
}
let fcx = bcx.fcx;
let loop_bcx = fcx.new_temp_block("expr_repeat");
let next_bcx = fcx.new_temp_block("expr_repeat: next");
Br(bcx, loop_bcx.llbb, DebugLoc::None);
let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
&[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
let bcx = loop_bcx;
let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
data_ptr
} else {
InBoundsGEP(bcx, data_ptr, &[loop_counter])
};
let bcx = f(bcx, lleltptr, vt.unit_ty);
let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1us), DebugLoc::None);
AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None);
CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
next_bcx
}
pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
unit_ty: Ty<'tcx>,
len: ValueRef,
f: F)
-> Block<'blk, 'tcx> where
F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
{
let _icx = push_ctxt("tvec::iter_vec_raw");
let fcx = bcx.fcx;
let vt = vec_types(bcx, unit_ty);
if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
iter_vec_loop(bcx, data_ptr, &vt, len, f)
} else {
// Calculate the last pointer address we want to handle.
let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
// Now perform the iteration.
let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
Br(bcx, header_bcx.llbb, DebugLoc::None);
let data_ptr =
Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
let not_yet_at_end =
ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
let next_bcx = fcx.new_temp_block("iter_vec_next");
CondBr(
|
{
//!
//
// [...] allocates a fixed-size array and moves it around "by value".
// In this case, it means that the caller has already given us a location
// to store the array of the suitable size, so all we have to do is
// generate the content.
debug!("trans_fixed_vstore(expr={}, dest={})",
bcx.expr_to_string(expr), dest.to_string(bcx.ccx()));
let vt = vec_types_from_expr(bcx, expr);
return match dest {
Ignore => write_content(bcx, &vt, expr, expr, dest),
SaveIn(lldest) => {
// lldest will have type *[T x N], but we want the type *T,
// so use GEP to convert:
let lldest = GEPi(bcx, lldest, &[0, 0]);
write_content(bcx, &vt, expr, expr, SaveIn(lldest))
|
identifier_body
|
loc.rs
|
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Loc<T, P> {
pub inner: T,
pub pos: P,
}
impl<T, P> ::std::ops::Deref for Loc<T, P> {
type Target = T;
fn deref(&self) -> &T {
&self.inner
}
}
impl<T, P> ::std::fmt::Display for Loc<T, P>
where
T: ::std::fmt::Display,
{
fn
|
(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}", self.inner)
}
}
impl<T, P> ::std::fmt::Debug for Loc<T, P>
where
T: ::std::fmt::Debug,
P: ::std::fmt::Debug,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "<{:?}@{:?}>", self.inner, self.pos)
}
}
|
fmt
|
identifier_name
|
loc.rs
|
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Loc<T, P> {
pub inner: T,
pub pos: P,
}
impl<T, P> ::std::ops::Deref for Loc<T, P> {
type Target = T;
fn deref(&self) -> &T {
&self.inner
}
}
impl<T, P> ::std::fmt::Display for Loc<T, P>
where
T: ::std::fmt::Display,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{}", self.inner)
}
}
impl<T, P> ::std::fmt::Debug for Loc<T, P>
where
T: ::std::fmt::Debug,
P: ::std::fmt::Debug,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
write!(f, "<{:?}@{:?}>", self.inner, self.pos)
}
}
|
random_line_split
|
|
loc.rs
|
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Loc<T, P> {
pub inner: T,
pub pos: P,
}
impl<T, P> ::std::ops::Deref for Loc<T, P> {
type Target = T;
fn deref(&self) -> &T {
&self.inner
}
}
impl<T, P> ::std::fmt::Display for Loc<T, P>
where
T: ::std::fmt::Display,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result
|
}
impl<T, P> ::std::fmt::Debug for Loc<T, P>
where
T: ::std::fmt::Debug,
P: ::std::fmt::Debug,
{
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "<{:?}@{:?}>", self.inner, self.pos)
}
}
|
{
write!(f, "{}", self.inner)
}
|
identifier_body
|
test_optimizers.rs
|
extern crate autograd as ag;
extern crate ndarray;
use ag::prelude::*;
use ag::tensor_ops as T;
use ag::variable::NamespaceTrait;
use ag::{optimizers, VariableEnvironment};
use ndarray::array;
#[test]
fn test_adam() {
let mut env = make_env();
let opt = optimizers::Adam::default(
"my_unique_adam",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
#[test]
fn test_adagrad() {
let mut env = make_env();
let opt = optimizers::AdaGrad::default(
"my_unique_adagrad",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
#[test]
fn test_momentum() {
let mut env = make_env();
let opt = optimizers::MomentumSGD::default(
"my_momentum_sgd",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
fn make_env() -> VariableEnvironment<'static, f64> {
let mut env = ag::VariableEnvironment::new();
let rng = ag::ndarray_ext::ArrayRng::<f64>::default();
env.name("w").set(rng.glorot_uniform(&[2, 2]));
env.name("b").set(ag::ndarray_ext::zeros(&[1, 2]));
env
}
fn run<O: Optimizer<f64>>(opt: O, env: VariableEnvironment<f64>) {
env.run(|g| {
let x = T::convert_to_tensor(array![[0.1, 0.2], [0.2, 0.1]], g).show();
let y = T::convert_to_tensor(array![1., 0.], g).show();
let w = g.variable("w");
let b = g.variable("b");
|
let mean_loss = T::reduce_mean(loss, &[0], false);
let ns = g.default_namespace();
let (vars, grads) = optimizers::grad_helper(&[mean_loss], &ns);
opt.update(&vars, &grads, g, ag::Feeder::new());
});
}
|
let z = T::matmul(x, w) + b;
let loss = T::sparse_softmax_cross_entropy(z, &y);
|
random_line_split
|
test_optimizers.rs
|
extern crate autograd as ag;
extern crate ndarray;
use ag::prelude::*;
use ag::tensor_ops as T;
use ag::variable::NamespaceTrait;
use ag::{optimizers, VariableEnvironment};
use ndarray::array;
#[test]
fn test_adam() {
let mut env = make_env();
let opt = optimizers::Adam::default(
"my_unique_adam",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
#[test]
fn test_adagrad() {
let mut env = make_env();
let opt = optimizers::AdaGrad::default(
"my_unique_adagrad",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
#[test]
fn test_momentum() {
let mut env = make_env();
let opt = optimizers::MomentumSGD::default(
"my_momentum_sgd",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
fn make_env() -> VariableEnvironment<'static, f64> {
let mut env = ag::VariableEnvironment::new();
let rng = ag::ndarray_ext::ArrayRng::<f64>::default();
env.name("w").set(rng.glorot_uniform(&[2, 2]));
env.name("b").set(ag::ndarray_ext::zeros(&[1, 2]));
env
}
fn run<O: Optimizer<f64>>(opt: O, env: VariableEnvironment<f64>)
|
{
env.run(|g| {
let x = T::convert_to_tensor(array![[0.1, 0.2], [0.2, 0.1]], g).show();
let y = T::convert_to_tensor(array![1., 0.], g).show();
let w = g.variable("w");
let b = g.variable("b");
let z = T::matmul(x, w) + b;
let loss = T::sparse_softmax_cross_entropy(z, &y);
let mean_loss = T::reduce_mean(loss, &[0], false);
let ns = g.default_namespace();
let (vars, grads) = optimizers::grad_helper(&[mean_loss], &ns);
opt.update(&vars, &grads, g, ag::Feeder::new());
});
}
|
identifier_body
|
|
test_optimizers.rs
|
extern crate autograd as ag;
extern crate ndarray;
use ag::prelude::*;
use ag::tensor_ops as T;
use ag::variable::NamespaceTrait;
use ag::{optimizers, VariableEnvironment};
use ndarray::array;
#[test]
fn test_adam() {
let mut env = make_env();
let opt = optimizers::Adam::default(
"my_unique_adam",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
#[test]
fn test_adagrad() {
let mut env = make_env();
let opt = optimizers::AdaGrad::default(
"my_unique_adagrad",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
#[test]
fn test_momentum() {
let mut env = make_env();
let opt = optimizers::MomentumSGD::default(
"my_momentum_sgd",
env.default_namespace().current_var_ids(),
&mut env, // mut env
);
run(opt, env);
}
fn
|
() -> VariableEnvironment<'static, f64> {
let mut env = ag::VariableEnvironment::new();
let rng = ag::ndarray_ext::ArrayRng::<f64>::default();
env.name("w").set(rng.glorot_uniform(&[2, 2]));
env.name("b").set(ag::ndarray_ext::zeros(&[1, 2]));
env
}
fn run<O: Optimizer<f64>>(opt: O, env: VariableEnvironment<f64>) {
env.run(|g| {
let x = T::convert_to_tensor(array![[0.1, 0.2], [0.2, 0.1]], g).show();
let y = T::convert_to_tensor(array![1., 0.], g).show();
let w = g.variable("w");
let b = g.variable("b");
let z = T::matmul(x, w) + b;
let loss = T::sparse_softmax_cross_entropy(z, &y);
let mean_loss = T::reduce_mean(loss, &[0], false);
let ns = g.default_namespace();
let (vars, grads) = optimizers::grad_helper(&[mean_loss], &ns);
opt.update(&vars, &grads, g, ag::Feeder::new());
});
}
|
make_env
|
identifier_name
|
term.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An abstraction of the terminal. Eventually, provide color and
//! verbosity support. For now, just a wrapper around stdout/stderr.
use std::env;
use std::io;
use std::io::prelude::*;
pub struct Term {
err: Box<Write +'static>
}
impl Term {
pub fn
|
() -> Term {
Term {
err: Box::new(io::stderr())
}
}
pub fn err(&mut self, msg: &str) {
// swallow any errors
let _ = writeln!(&mut self.err, "{}", msg);
env::set_exit_status(101);
}
}
|
new
|
identifier_name
|
term.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An abstraction of the terminal. Eventually, provide color and
//! verbosity support. For now, just a wrapper around stdout/stderr.
use std::env;
use std::io;
use std::io::prelude::*;
pub struct Term {
err: Box<Write +'static>
}
impl Term {
pub fn new() -> Term
|
pub fn err(&mut self, msg: &str) {
// swallow any errors
let _ = writeln!(&mut self.err, "{}", msg);
env::set_exit_status(101);
}
}
|
{
Term {
err: Box::new(io::stderr())
}
}
|
identifier_body
|
term.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! An abstraction of the terminal. Eventually, provide color and
//! verbosity support. For now, just a wrapper around stdout/stderr.
use std::env;
use std::io;
use std::io::prelude::*;
pub struct Term {
err: Box<Write +'static>
}
impl Term {
pub fn new() -> Term {
Term {
|
pub fn err(&mut self, msg: &str) {
// swallow any errors
let _ = writeln!(&mut self.err, "{}", msg);
env::set_exit_status(101);
}
}
|
err: Box::new(io::stderr())
}
}
|
random_line_split
|
mod.rs
|
// Copyright (c) 2019 Jason White
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
mod client;
mod codec;
mod daemon;
mod error;
mod protocol;
mod service;
mod shutdown;
mod transport;
pub use client::Client;
pub use daemon::{connect_or_spawn, daemonize, run, run_daemon, try_connect};
pub use error::Error;
pub use protocol::{Request, Response};
pub use transport::Message;
use std::env;
use std::io;
use std::net::{Ipv4Addr, SocketAddr};
use std::path::Path;
|
use std::time::Duration;
use bincode;
use futures::{sync::mpsc, Future, Stream};
use log;
use serde::Serialize;
use tokio::net::TcpListener;
use service::ButtonService;
use shutdown::{Shutdown, ShutdownCause};
#[cfg(unix)]
fn notify_server_startup<T>(path: &Path, message: &T) -> Result<(), io::Error>
where
T: Serialize,
{
use std::os::unix::net::UnixStream;
let mut stream = UnixStream::connect(path)?;
bincode::serialized_size(message)
.and_then(|size| bincode::serialize_into(&mut stream, &size))
.and_then(|_| bincode::serialize_into(&mut stream, message))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(())
}
#[cfg(windows)]
fn notify_server_startup<T>(path: &Path, message: &T) -> Result<(), io::Error>
where
T: Serialize,
{
use std::fs::OpenOptions;
let mut stream = OpenOptions::new().write(true).read(true).open(path)?;
bincode::serialized_size(message)
.and_then(|size| bincode::serialize_into(&mut stream, &size))
.and_then(|_| bincode::serialize_into(&mut stream, message))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(())
}
pub struct Server {
socket: TcpListener,
}
impl Server {
pub fn new(port: u16) -> Result<Self, io::Error> {
let addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), port);
let result = Self::bind(&addr);
// The client can set this environment variable to a Unix Domain Socket
// path (on Unix) or to a named pipe (on Windows). We notify the client
// that the server has been started by writing a message back to the
// client. Then, the client can start making requests as soon as the
// server is ready instead of retrying the connection on a loop.
if let Some(path) = env::var_os("BUTTON_STARTUP_NOTIFY") {
let message = match &result {
Ok(server) => Ok(server.port()),
Err(err) => Err(err.to_string()),
};
notify_server_startup(path.as_ref(), &message)?;
}
result
}
fn bind(addr: &SocketAddr) -> Result<Self, io::Error> {
Ok(Server {
socket: TcpListener::bind(addr)?,
})
}
pub fn addr(&self) -> SocketAddr {
self.socket.local_addr().unwrap()
}
pub fn port(&self) -> u16 {
self.addr().port()
}
pub fn run(self, idle: Duration) {
let (tx, rx) = mpsc::channel(0);
let service = ButtonService::new(tx);
let timeout = Shutdown::new(idle, rx).map(|message| match message {
ShutdownCause::Idle(duration) => {
log::info!(
"Shutting down due to being idle for {:#?}.",
duration
);
}
ShutdownCause::ShutdownRequested => {
log::info!("Shutdown requested. Bye bye!");
}
});
let server = self
.socket
.incoming()
.map_err(|e| {
log::error!("failed to accept socket; error = {:?}", e)
})
.for_each(move |socket| {
let task = service.clone().bind(socket).map_err(|_| ());
tokio::spawn(task)
});
let task = server.select2(timeout).map(|_| ()).map_err(|_| ());
tokio::run(task);
}
}
|
random_line_split
|
|
mod.rs
|
// Copyright (c) 2019 Jason White
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
mod client;
mod codec;
mod daemon;
mod error;
mod protocol;
mod service;
mod shutdown;
mod transport;
pub use client::Client;
pub use daemon::{connect_or_spawn, daemonize, run, run_daemon, try_connect};
pub use error::Error;
pub use protocol::{Request, Response};
pub use transport::Message;
use std::env;
use std::io;
use std::net::{Ipv4Addr, SocketAddr};
use std::path::Path;
use std::time::Duration;
use bincode;
use futures::{sync::mpsc, Future, Stream};
use log;
use serde::Serialize;
use tokio::net::TcpListener;
use service::ButtonService;
use shutdown::{Shutdown, ShutdownCause};
#[cfg(unix)]
fn notify_server_startup<T>(path: &Path, message: &T) -> Result<(), io::Error>
where
T: Serialize,
{
use std::os::unix::net::UnixStream;
let mut stream = UnixStream::connect(path)?;
bincode::serialized_size(message)
.and_then(|size| bincode::serialize_into(&mut stream, &size))
.and_then(|_| bincode::serialize_into(&mut stream, message))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(())
}
#[cfg(windows)]
fn notify_server_startup<T>(path: &Path, message: &T) -> Result<(), io::Error>
where
T: Serialize,
{
use std::fs::OpenOptions;
let mut stream = OpenOptions::new().write(true).read(true).open(path)?;
bincode::serialized_size(message)
.and_then(|size| bincode::serialize_into(&mut stream, &size))
.and_then(|_| bincode::serialize_into(&mut stream, message))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(())
}
pub struct Server {
socket: TcpListener,
}
impl Server {
pub fn new(port: u16) -> Result<Self, io::Error> {
let addr = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), port);
let result = Self::bind(&addr);
// The client can set this environment variable to a Unix Domain Socket
// path (on Unix) or to a named pipe (on Windows). We notify the client
// that the server has been started by writing a message back to the
// client. Then, the client can start making requests as soon as the
// server is ready instead of retrying the connection on a loop.
if let Some(path) = env::var_os("BUTTON_STARTUP_NOTIFY") {
let message = match &result {
Ok(server) => Ok(server.port()),
Err(err) => Err(err.to_string()),
};
notify_server_startup(path.as_ref(), &message)?;
}
result
}
fn bind(addr: &SocketAddr) -> Result<Self, io::Error> {
Ok(Server {
socket: TcpListener::bind(addr)?,
})
}
pub fn addr(&self) -> SocketAddr {
self.socket.local_addr().unwrap()
}
pub fn port(&self) -> u16 {
self.addr().port()
}
pub fn
|
(self, idle: Duration) {
let (tx, rx) = mpsc::channel(0);
let service = ButtonService::new(tx);
let timeout = Shutdown::new(idle, rx).map(|message| match message {
ShutdownCause::Idle(duration) => {
log::info!(
"Shutting down due to being idle for {:#?}.",
duration
);
}
ShutdownCause::ShutdownRequested => {
log::info!("Shutdown requested. Bye bye!");
}
});
let server = self
.socket
.incoming()
.map_err(|e| {
log::error!("failed to accept socket; error = {:?}", e)
})
.for_each(move |socket| {
let task = service.clone().bind(socket).map_err(|_| ());
tokio::spawn(task)
});
let task = server.select2(timeout).map(|_| ()).map_err(|_| ());
tokio::run(task);
}
}
|
run
|
identifier_name
|
kmesg_buffer.rs
|
extern crate errno;
extern crate libc;
#[cfg(target_os = "macos")]
use std::str;
#[cfg(target_os = "macos")]
use self::libc::{c_int, c_void};
#[cfg(target_os = "linux")]
use std::fs::File;
#[cfg(target_os = "linux")]
|
#[cfg(target_os = "linux")]
use std::sync::mpsc;
#[cfg(target_os = "linux")]
use std::sync::mpsc::Receiver;
#[cfg(target_os = "linux")]
use std::{thread, time};
// See https://opensource.apple.com/source/xnu/xnu-1456.1.26/bsd/sys/msgbuf.h
#[cfg(target_os = "macos")]
const MAX_MSG_BSIZE: usize = 1024 * 1024;
// this extern block links to the libproc library
// Original signatures of functions can be found at http://opensource.apple.com/source/Libc/Libc-594.9.4/darwin/libproc.c
#[cfg(target_os = "macos")]
#[link(name = "proc", kind = "dylib")]
extern {
// This method is supported in the minimum version of Mac OS X which is 10.5
fn proc_kmsgbuf(buffer: *mut c_void, buffersize: u32) -> c_int;
}
/// Get the contents of the kernel message buffer
///
/// Entries are in the format:
/// faclev,seqnum,timestamp[optional,...];message\n
/// TAGNAME=value (0 or more Tags)
/// See http://opensource.apple.com//source/system_cmds/system_cmds-336.6/dmesg.tproj/dmesg.c// See http://opensource.apple.com//source/system_cmds/system_cmds-336.6/dmesg.tproj/dmesg.c
#[cfg(target_os = "macos")]
pub fn kmsgbuf() -> Result<String, String> {
let mut message_buffer: Vec<u8> = Vec::with_capacity(MAX_MSG_BSIZE);
let buffer_ptr = message_buffer.as_mut_ptr() as *mut c_void;
let ret: i32;
unsafe {
ret = proc_kmsgbuf(buffer_ptr, message_buffer.capacity() as u32);
if ret > 0 {
message_buffer.set_len(ret as usize - 1);
}
}
if!message_buffer.is_empty() {
let msg = str::from_utf8(&message_buffer)
.map_err(|_| "Could not convert kernel message buffer from utf8".to_string())?
.parse().map_err(|_| "Could not parse kernel message")?;
Ok(msg)
} else {
Err("Could not read kernel message buffer".to_string())
}
}
/// Get a message (String) from the kernel message ring buffer
/// Turns out that reading to the end of an "infinite file" like "/dev/kmsg" with standard file
/// reading methods will block at the end of file, so a workaround is required. Do the blocking
/// reads on a thread that sends lines read back through a channel, and then return when the thread
/// has blocked and can't send anymore. Returning will end the thread and the channel.
#[cfg(target_os = "linux")]
pub fn kmsgbuf() -> Result<String, String> {
let file = File::open("/dev/kmsg").map_err(|_| "Could not open /dev/kmsg file '{}'")?;
let kmsg_channel = spawn_kmsg_channel(file);
let duration = time::Duration::from_millis(1);
let mut buf = String::new();
while let Ok(line) = kmsg_channel.recv_timeout(duration) {
buf.push_str(&line)
}
Ok(buf)
}
// Create a channel to return lines read from a file on, then create a thread that reads the lines
// and sends them back on the channel one by one. Eventually it will get to EOF or block
#[cfg(target_os = "linux")]
fn spawn_kmsg_channel(file: File) -> Receiver<String> {
let mut reader = BufReader::new(file);
let (tx, rx) = mpsc::channel::<String>();
thread::spawn(move || loop {
let mut line = String::new();
match reader.read_line(&mut line) {
Ok(_) => {
if tx.send(line).is_err() { break; }
}
_ => break
}
});
rx
}
#[cfg(test)]
mod test {
use std::io;
use std::io::Write;
use crate::libproc::proc_pid::am_root;
use super::kmsgbuf;
#[test]
fn kmessage_buffer_test() {
if am_root() {
match kmsgbuf() {
Ok(_) => { },
Err(message) => panic!("{}", message)
}
} else {
writeln!(&mut io::stdout(), "test libproc::kmesg_buffer::kmessage_buffer_test... skipped as it needs to be run as root").unwrap();
}
}
}
|
use std::io::{BufRead, BufReader};
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.