file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
service.rs | use std::future::Future;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use crate::futures;
use crate::jsonrpc::{middleware, MetaIoHandler, Metadata, Middleware};
pub struct Service<M: Metadata = (), S: Middleware<M> = middleware::Noop> {
handler: Arc<MetaIoHandler<M, S>>,
peer_addr: SocketAddr,
meta: M,
}
impl<M: Metadata, S: Middleware<M>> Service<M, S> {
pub fn new(peer_addr: SocketAddr, handler: Arc<MetaIoHandler<M, S>>, meta: M) -> Self |
}
impl<M: Metadata, S: Middleware<M>> tower_service::Service<String> for Service<M, S>
where
S::Future: Unpin,
S::CallFuture: Unpin,
{
// These types must match the corresponding protocol types:
type Response = Option<String>;
// For non-streaming protocols, service errors are always io::Error
type Error = ();
// The future for computing the response; box it for simplicity.
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
// Produce a future for computing a response from a request.
fn call(&mut self, req: String) -> Self::Future {
use futures::FutureExt;
trace!(target: "tcp", "Accepted request from peer {}: {}", &self.peer_addr, req);
Box::pin(self.handler.handle_request(&req, self.meta.clone()).map(Ok))
}
}
| {
Service {
handler,
peer_addr,
meta,
}
} | identifier_body |
deriving-primitive.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::num::FromPrimitive;
use std::int;
#[deriving(Eq, FromPrimitive)]
enum A {
Foo = int::max_value,
Bar = 1, |
pub fn main() {
let x: Option<A> = FromPrimitive::from_int(int::max_value);
assert_eq!(x, Some(Foo));
let x: Option<A> = FromPrimitive::from_int(1);
assert_eq!(x, Some(Bar));
let x: Option<A> = FromPrimitive::from_int(3);
assert_eq!(x, Some(Baz));
let x: Option<A> = FromPrimitive::from_int(4);
assert_eq!(x, Some(Qux));
let x: Option<A> = FromPrimitive::from_int(5);
assert_eq!(x, None);
} | Baz = 3,
Qux,
} | random_line_split |
deriving-primitive.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::num::FromPrimitive;
use std::int;
#[deriving(Eq, FromPrimitive)]
enum | {
Foo = int::max_value,
Bar = 1,
Baz = 3,
Qux,
}
pub fn main() {
let x: Option<A> = FromPrimitive::from_int(int::max_value);
assert_eq!(x, Some(Foo));
let x: Option<A> = FromPrimitive::from_int(1);
assert_eq!(x, Some(Bar));
let x: Option<A> = FromPrimitive::from_int(3);
assert_eq!(x, Some(Baz));
let x: Option<A> = FromPrimitive::from_int(4);
assert_eq!(x, Some(Qux));
let x: Option<A> = FromPrimitive::from_int(5);
assert_eq!(x, None);
}
| A | identifier_name |
context.rs | use std::path::Path;
use std::fs;
use std::io;
use std::path::PathBuf;
use std::result;
use std::time::Duration;
use uuid;
pub use io::KeyWrapper;
use io::{FileExtensions, Disks, TerminalPrompt};
use model::{DbLocation, PeroxideDb, YubikeySlot, YubikeyEntryType};
use cryptsetup_rs::device::CryptDevice;
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
pub enum Error {
KeyfileInputError { cause: io::Error },
PasswordInputError { cause: io::Error },
DatabaseIoError { path: PathBuf, cause: io::Error },
DiskIoError {
path: Option<PathBuf>,
cause: io::Error,
},
YubikeyError { message: String },
UnknownCryptoError,
FeatureNotAvailable,
}
pub trait HasDbLocation {
fn db_location<'a>(&'a self) -> &'a DbLocation;
}
pub trait KeyfileInput: Sized {
fn read_keyfile(&self, path: &Path) -> Result<KeyWrapper>;
}
pub trait PasswordInput: Sized {
fn read_password(&self, prompt: &str) -> Result<KeyWrapper>;
}
pub trait YubikeyInput: PasswordInput {
fn read_yubikey(&self, name: Option<&str>, uuid: &uuid::Uuid, slot: YubikeySlot, entry_type: YubikeyEntryType) -> Result<KeyWrapper>;
}
pub trait DiskSelector {
fn all_disk_uuids(&self) -> Result<Vec<uuid::Uuid>>;
fn disk_uuid_path(&self, uuid: &uuid::Uuid) -> Result<PathBuf>;
}
pub trait PeroxideDbReader: HasDbLocation {
fn open_peroxide_db(&self) -> Result<PeroxideDb>;
}
pub trait PeroxideDbWriter: HasDbLocation { | db_location: DbLocation,
password_input_timeout: Option<Duration>,
}
impl MainContext {
pub fn new(location: DbLocation) -> MainContext {
MainContext {
db_location: location,
password_input_timeout: Some(Duration::new(30, 0)),
}
}
pub fn trace_on() {
CryptDevice::enable_debug(true);
}
}
impl HasDbLocation for MainContext {
fn db_location<'a>(&'a self) -> &'a DbLocation {
&self.db_location
}
}
impl KeyfileInput for MainContext {
fn read_keyfile(&self, path: &Path) -> Result<KeyWrapper> {
self.db_location
.open_relative_path(path)
.and_then(|mut file| KeyWrapper::read(&mut file))
.map_err(|err| Error::KeyfileInputError { cause: err })
}
}
impl PasswordInput for MainContext {
fn read_password(&self, prompt: &str) -> Result<KeyWrapper> {
TerminalPrompt::prompt_passphrase(prompt, self.password_input_timeout.as_ref())
.map_err(|err| Error::PasswordInputError { cause: err })
}
}
#[cfg(not(feature = "yubikey"))]
impl YubikeyInput for MainContext {
#[allow(unused)]
fn read_yubikey(&self, name: Option<&str>, uuid: &uuid::Uuid, slot: YubikeySlot, entry_type: YubikeyEntryType) -> Result<KeyWrapper> {
Err(Error::FeatureNotAvailable)
}
}
impl DiskSelector for MainContext {
fn all_disk_uuids(&self) -> Result<Vec<uuid::Uuid>> {
Disks::all_disk_uuids().map_err(|err| {
Error::DiskIoError {
path: None,
cause: err,
}
})
}
fn disk_uuid_path(&self, uuid: &uuid::Uuid) -> Result<PathBuf> {
Disks::disk_uuid_path(uuid).map_err(|err| {
Error::DiskIoError {
path: None,
cause: err,
}
})
}
}
impl PeroxideDbReader for MainContext {
fn open_peroxide_db(&self) -> Result<PeroxideDb> {
fs::File::open(&self.db_location.path)
.and_then(|file| PeroxideDb::from(file))
.map_err(|err| {
Error::DatabaseIoError {
path: self.db_location.path.clone(),
cause: err,
}
})
}
}
impl PeroxideDbWriter for MainContext {
fn save_peroxide_db(&self, db: &PeroxideDb) -> Result<()> {
fs::File::create(&self.db_location.path)
.and_then(|mut file| db.save(&mut file))
.map_err(|err| {
Error::DatabaseIoError {
path: self.db_location.path.clone(),
cause: err,
}
})
}
}
pub trait ReaderContext: HasDbLocation + PeroxideDbReader {}
pub trait WriterContext: ReaderContext + PeroxideDbWriter {}
pub trait InputContext: KeyfileInput + PasswordInput + YubikeyInput {}
impl ReaderContext for MainContext {}
impl WriterContext for MainContext {}
impl InputContext for MainContext {} | fn save_peroxide_db(&self, db: &PeroxideDb) -> Result<()>;
}
#[derive(Debug)]
pub struct MainContext { | random_line_split |
context.rs | use std::path::Path;
use std::fs;
use std::io;
use std::path::PathBuf;
use std::result;
use std::time::Duration;
use uuid;
pub use io::KeyWrapper;
use io::{FileExtensions, Disks, TerminalPrompt};
use model::{DbLocation, PeroxideDb, YubikeySlot, YubikeyEntryType};
use cryptsetup_rs::device::CryptDevice;
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
pub enum Error {
KeyfileInputError { cause: io::Error },
PasswordInputError { cause: io::Error },
DatabaseIoError { path: PathBuf, cause: io::Error },
DiskIoError {
path: Option<PathBuf>,
cause: io::Error,
},
YubikeyError { message: String },
UnknownCryptoError,
FeatureNotAvailable,
}
pub trait HasDbLocation {
fn db_location<'a>(&'a self) -> &'a DbLocation;
}
pub trait KeyfileInput: Sized {
fn read_keyfile(&self, path: &Path) -> Result<KeyWrapper>;
}
pub trait PasswordInput: Sized {
fn read_password(&self, prompt: &str) -> Result<KeyWrapper>;
}
pub trait YubikeyInput: PasswordInput {
fn read_yubikey(&self, name: Option<&str>, uuid: &uuid::Uuid, slot: YubikeySlot, entry_type: YubikeyEntryType) -> Result<KeyWrapper>;
}
pub trait DiskSelector {
fn all_disk_uuids(&self) -> Result<Vec<uuid::Uuid>>;
fn disk_uuid_path(&self, uuid: &uuid::Uuid) -> Result<PathBuf>;
}
pub trait PeroxideDbReader: HasDbLocation {
fn open_peroxide_db(&self) -> Result<PeroxideDb>;
}
pub trait PeroxideDbWriter: HasDbLocation {
fn save_peroxide_db(&self, db: &PeroxideDb) -> Result<()>;
}
#[derive(Debug)]
pub struct MainContext {
db_location: DbLocation,
password_input_timeout: Option<Duration>,
}
impl MainContext {
pub fn new(location: DbLocation) -> MainContext {
MainContext {
db_location: location,
password_input_timeout: Some(Duration::new(30, 0)),
}
}
pub fn trace_on() {
CryptDevice::enable_debug(true);
}
}
impl HasDbLocation for MainContext {
fn | <'a>(&'a self) -> &'a DbLocation {
&self.db_location
}
}
impl KeyfileInput for MainContext {
fn read_keyfile(&self, path: &Path) -> Result<KeyWrapper> {
self.db_location
.open_relative_path(path)
.and_then(|mut file| KeyWrapper::read(&mut file))
.map_err(|err| Error::KeyfileInputError { cause: err })
}
}
impl PasswordInput for MainContext {
fn read_password(&self, prompt: &str) -> Result<KeyWrapper> {
TerminalPrompt::prompt_passphrase(prompt, self.password_input_timeout.as_ref())
.map_err(|err| Error::PasswordInputError { cause: err })
}
}
#[cfg(not(feature = "yubikey"))]
impl YubikeyInput for MainContext {
#[allow(unused)]
fn read_yubikey(&self, name: Option<&str>, uuid: &uuid::Uuid, slot: YubikeySlot, entry_type: YubikeyEntryType) -> Result<KeyWrapper> {
Err(Error::FeatureNotAvailable)
}
}
impl DiskSelector for MainContext {
fn all_disk_uuids(&self) -> Result<Vec<uuid::Uuid>> {
Disks::all_disk_uuids().map_err(|err| {
Error::DiskIoError {
path: None,
cause: err,
}
})
}
fn disk_uuid_path(&self, uuid: &uuid::Uuid) -> Result<PathBuf> {
Disks::disk_uuid_path(uuid).map_err(|err| {
Error::DiskIoError {
path: None,
cause: err,
}
})
}
}
impl PeroxideDbReader for MainContext {
fn open_peroxide_db(&self) -> Result<PeroxideDb> {
fs::File::open(&self.db_location.path)
.and_then(|file| PeroxideDb::from(file))
.map_err(|err| {
Error::DatabaseIoError {
path: self.db_location.path.clone(),
cause: err,
}
})
}
}
impl PeroxideDbWriter for MainContext {
fn save_peroxide_db(&self, db: &PeroxideDb) -> Result<()> {
fs::File::create(&self.db_location.path)
.and_then(|mut file| db.save(&mut file))
.map_err(|err| {
Error::DatabaseIoError {
path: self.db_location.path.clone(),
cause: err,
}
})
}
}
pub trait ReaderContext: HasDbLocation + PeroxideDbReader {}
pub trait WriterContext: ReaderContext + PeroxideDbWriter {}
pub trait InputContext: KeyfileInput + PasswordInput + YubikeyInput {}
impl ReaderContext for MainContext {}
impl WriterContext for MainContext {}
impl InputContext for MainContext {}
| db_location | identifier_name |
context.rs | use std::path::Path;
use std::fs;
use std::io;
use std::path::PathBuf;
use std::result;
use std::time::Duration;
use uuid;
pub use io::KeyWrapper;
use io::{FileExtensions, Disks, TerminalPrompt};
use model::{DbLocation, PeroxideDb, YubikeySlot, YubikeyEntryType};
use cryptsetup_rs::device::CryptDevice;
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
pub enum Error {
KeyfileInputError { cause: io::Error },
PasswordInputError { cause: io::Error },
DatabaseIoError { path: PathBuf, cause: io::Error },
DiskIoError {
path: Option<PathBuf>,
cause: io::Error,
},
YubikeyError { message: String },
UnknownCryptoError,
FeatureNotAvailable,
}
pub trait HasDbLocation {
fn db_location<'a>(&'a self) -> &'a DbLocation;
}
pub trait KeyfileInput: Sized {
fn read_keyfile(&self, path: &Path) -> Result<KeyWrapper>;
}
pub trait PasswordInput: Sized {
fn read_password(&self, prompt: &str) -> Result<KeyWrapper>;
}
pub trait YubikeyInput: PasswordInput {
fn read_yubikey(&self, name: Option<&str>, uuid: &uuid::Uuid, slot: YubikeySlot, entry_type: YubikeyEntryType) -> Result<KeyWrapper>;
}
pub trait DiskSelector {
fn all_disk_uuids(&self) -> Result<Vec<uuid::Uuid>>;
fn disk_uuid_path(&self, uuid: &uuid::Uuid) -> Result<PathBuf>;
}
pub trait PeroxideDbReader: HasDbLocation {
fn open_peroxide_db(&self) -> Result<PeroxideDb>;
}
pub trait PeroxideDbWriter: HasDbLocation {
fn save_peroxide_db(&self, db: &PeroxideDb) -> Result<()>;
}
#[derive(Debug)]
pub struct MainContext {
db_location: DbLocation,
password_input_timeout: Option<Duration>,
}
impl MainContext {
pub fn new(location: DbLocation) -> MainContext |
pub fn trace_on() {
CryptDevice::enable_debug(true);
}
}
impl HasDbLocation for MainContext {
fn db_location<'a>(&'a self) -> &'a DbLocation {
&self.db_location
}
}
impl KeyfileInput for MainContext {
fn read_keyfile(&self, path: &Path) -> Result<KeyWrapper> {
self.db_location
.open_relative_path(path)
.and_then(|mut file| KeyWrapper::read(&mut file))
.map_err(|err| Error::KeyfileInputError { cause: err })
}
}
impl PasswordInput for MainContext {
fn read_password(&self, prompt: &str) -> Result<KeyWrapper> {
TerminalPrompt::prompt_passphrase(prompt, self.password_input_timeout.as_ref())
.map_err(|err| Error::PasswordInputError { cause: err })
}
}
#[cfg(not(feature = "yubikey"))]
impl YubikeyInput for MainContext {
#[allow(unused)]
fn read_yubikey(&self, name: Option<&str>, uuid: &uuid::Uuid, slot: YubikeySlot, entry_type: YubikeyEntryType) -> Result<KeyWrapper> {
Err(Error::FeatureNotAvailable)
}
}
impl DiskSelector for MainContext {
fn all_disk_uuids(&self) -> Result<Vec<uuid::Uuid>> {
Disks::all_disk_uuids().map_err(|err| {
Error::DiskIoError {
path: None,
cause: err,
}
})
}
fn disk_uuid_path(&self, uuid: &uuid::Uuid) -> Result<PathBuf> {
Disks::disk_uuid_path(uuid).map_err(|err| {
Error::DiskIoError {
path: None,
cause: err,
}
})
}
}
impl PeroxideDbReader for MainContext {
fn open_peroxide_db(&self) -> Result<PeroxideDb> {
fs::File::open(&self.db_location.path)
.and_then(|file| PeroxideDb::from(file))
.map_err(|err| {
Error::DatabaseIoError {
path: self.db_location.path.clone(),
cause: err,
}
})
}
}
impl PeroxideDbWriter for MainContext {
fn save_peroxide_db(&self, db: &PeroxideDb) -> Result<()> {
fs::File::create(&self.db_location.path)
.and_then(|mut file| db.save(&mut file))
.map_err(|err| {
Error::DatabaseIoError {
path: self.db_location.path.clone(),
cause: err,
}
})
}
}
pub trait ReaderContext: HasDbLocation + PeroxideDbReader {}
pub trait WriterContext: ReaderContext + PeroxideDbWriter {}
pub trait InputContext: KeyfileInput + PasswordInput + YubikeyInput {}
impl ReaderContext for MainContext {}
impl WriterContext for MainContext {}
impl InputContext for MainContext {}
| {
MainContext {
db_location: location,
password_input_timeout: Some(Duration::new(30, 0)),
}
} | identifier_body |
macro-crate-does-hygiene-work.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:macro_crate_test.rs
// ignore-stage1
// Issue #15750: a macro that internally parses its input and then
// uses `quote_expr!` to rearrange it should be hygiene-preserving.
#![feature(plugin)]
#![plugin(macro_crate_test)]
fn main() | {
let x = 3;
assert_eq!(3, identity!(x));
assert_eq!(6, identity!(x+x));
let x = 4;
assert_eq!(4, identity!(x));
} | identifier_body |
|
macro-crate-does-hygiene-work.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:macro_crate_test.rs
// ignore-stage1 | // Issue #15750: a macro that internally parses its input and then
// uses `quote_expr!` to rearrange it should be hygiene-preserving.
#![feature(plugin)]
#![plugin(macro_crate_test)]
fn main() {
let x = 3;
assert_eq!(3, identity!(x));
assert_eq!(6, identity!(x+x));
let x = 4;
assert_eq!(4, identity!(x));
} | random_line_split |
|
macro-crate-does-hygiene-work.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:macro_crate_test.rs
// ignore-stage1
// Issue #15750: a macro that internally parses its input and then
// uses `quote_expr!` to rearrange it should be hygiene-preserving.
#![feature(plugin)]
#![plugin(macro_crate_test)]
fn | () {
let x = 3;
assert_eq!(3, identity!(x));
assert_eq!(6, identity!(x+x));
let x = 4;
assert_eq!(4, identity!(x));
}
| main | identifier_name |
sphere.rs | // Copyright Colin Sherratt 2014
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::f32::consts::PI;
use super::{Quad, Triangle, Polygon};
use super::Polygon::{PolyTri, PolyQuad};
use super::generators::{SharedVertex, IndexedPolygon};
/// Represents a sphere with radius of 1, centered at (0, 0, 0)
#[derive(Clone, Copy)]
pub struct SphereUV {
u: usize,
v: usize,
sub_u: usize,
sub_v: usize
}
impl SphereUV {
/// Create a new sphere.
/// `u` is the number of points across the equator of the sphere.
/// `v` is the number of points from pole to pole.
pub fn new(u: usize, v: usize) -> SphereUV {
SphereUV {
u: 0,
v: 0,
sub_u: u,
sub_v: v
}
}
fn | (&self, u: usize, v: usize) -> (f32, f32, f32) {
let u = (u as f32 / self.sub_u as f32) * PI * 2.;
let v = (v as f32 / self.sub_v as f32) * PI;
(u.cos() * v.sin(),
u.sin() * v.sin(),
v.cos())
}
}
impl Iterator for SphereUV {
type Item = Polygon<(f32, f32, f32)>;
fn size_hint(&self) -> (usize, Option<usize>) {
let n = (self.sub_v - self.v) * self.sub_u + (self.sub_u - self.u);
(n, Some(n))
}
fn next(&mut self) -> Option<Polygon<(f32, f32, f32)>> {
if self.u == self.sub_u {
self.u = 0;
self.v += 1;
if self.v == self.sub_v {
return None;
}
}
let x = self.vert(self.u, self.v);
let y = self.vert(self.u, self.v+1);
let z = self.vert(self.u+1, self.v+1);
let w = self.vert(self.u+1, self.v);
let v = self.v;
self.u += 1;
if v == 0 {
Some(PolyTri(Triangle::new(x, y, z)))
} else if v == self.sub_v - 1 {
Some(PolyTri(Triangle::new(z, w, x)))
} else {
Some(PolyQuad(Quad::new(x, y, z, w)))
}
}
}
impl SharedVertex<(f32, f32, f32)> for SphereUV {
fn shared_vertex(&self, idx: usize) -> (f32, f32, f32) {
if idx == 0 {
self.vert(0, 0)
} else if idx == self.shared_vertex_count() - 1 {
self.vert(0, self.sub_v)
} else {
// since the bottom verts all map to the same
// we jump over them in index space
let idx = idx - 1;
let u = idx % (self.sub_u);
let v = idx / (self.sub_u);
self.vert(u, v+1)
}
}
fn shared_vertex_count(&self) -> usize {
(self.sub_v - 1) * (self.sub_u) + 2
}
}
impl IndexedPolygon<Polygon<usize>> for SphereUV {
fn indexed_polygon(&self, idx: usize) -> Polygon<usize> {
let u = idx % self.sub_u;
let v = idx / self.sub_u;
let f = |u: usize, v: usize| {
if v == 0 {
0
} else if self.sub_v == v {
(self.sub_v-1) * (self.sub_u) + 1
} else {
(v-1) * self.sub_u + (u % self.sub_u) + 1
}
};
if v == 0 {
PolyTri(Triangle::new(f(u, v),
f(u, v+1),
f(u+1, v+1)))
} else if self.sub_v - 1 == v {
PolyTri(Triangle::new(f(u+1, v+1),
f(u+1, v),
f(u, v)))
} else {
PolyQuad(Quad::new(f(u, v),
f(u, v+1),
f(u+1, v+1),
f(u+1, v)))
}
}
fn indexed_polygon_count(&self) -> usize {
self.sub_v * self.sub_u
}
}
| vert | identifier_name |
sphere.rs | // Copyright Colin Sherratt 2014
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::f32::consts::PI;
use super::{Quad, Triangle, Polygon};
use super::Polygon::{PolyTri, PolyQuad};
use super::generators::{SharedVertex, IndexedPolygon};
/// Represents a sphere with radius of 1, centered at (0, 0, 0)
#[derive(Clone, Copy)]
pub struct SphereUV {
u: usize,
v: usize,
sub_u: usize,
sub_v: usize
}
impl SphereUV {
/// Create a new sphere.
/// `u` is the number of points across the equator of the sphere.
/// `v` is the number of points from pole to pole.
pub fn new(u: usize, v: usize) -> SphereUV {
SphereUV {
u: 0,
v: 0,
sub_u: u,
sub_v: v
}
}
fn vert(&self, u: usize, v: usize) -> (f32, f32, f32) |
}
impl Iterator for SphereUV {
type Item = Polygon<(f32, f32, f32)>;
fn size_hint(&self) -> (usize, Option<usize>) {
let n = (self.sub_v - self.v) * self.sub_u + (self.sub_u - self.u);
(n, Some(n))
}
fn next(&mut self) -> Option<Polygon<(f32, f32, f32)>> {
if self.u == self.sub_u {
self.u = 0;
self.v += 1;
if self.v == self.sub_v {
return None;
}
}
let x = self.vert(self.u, self.v);
let y = self.vert(self.u, self.v+1);
let z = self.vert(self.u+1, self.v+1);
let w = self.vert(self.u+1, self.v);
let v = self.v;
self.u += 1;
if v == 0 {
Some(PolyTri(Triangle::new(x, y, z)))
} else if v == self.sub_v - 1 {
Some(PolyTri(Triangle::new(z, w, x)))
} else {
Some(PolyQuad(Quad::new(x, y, z, w)))
}
}
}
impl SharedVertex<(f32, f32, f32)> for SphereUV {
fn shared_vertex(&self, idx: usize) -> (f32, f32, f32) {
if idx == 0 {
self.vert(0, 0)
} else if idx == self.shared_vertex_count() - 1 {
self.vert(0, self.sub_v)
} else {
// since the bottom verts all map to the same
// we jump over them in index space
let idx = idx - 1;
let u = idx % (self.sub_u);
let v = idx / (self.sub_u);
self.vert(u, v+1)
}
}
fn shared_vertex_count(&self) -> usize {
(self.sub_v - 1) * (self.sub_u) + 2
}
}
impl IndexedPolygon<Polygon<usize>> for SphereUV {
fn indexed_polygon(&self, idx: usize) -> Polygon<usize> {
let u = idx % self.sub_u;
let v = idx / self.sub_u;
let f = |u: usize, v: usize| {
if v == 0 {
0
} else if self.sub_v == v {
(self.sub_v-1) * (self.sub_u) + 1
} else {
(v-1) * self.sub_u + (u % self.sub_u) + 1
}
};
if v == 0 {
PolyTri(Triangle::new(f(u, v),
f(u, v+1),
f(u+1, v+1)))
} else if self.sub_v - 1 == v {
PolyTri(Triangle::new(f(u+1, v+1),
f(u+1, v),
f(u, v)))
} else {
PolyQuad(Quad::new(f(u, v),
f(u, v+1),
f(u+1, v+1),
f(u+1, v)))
}
}
fn indexed_polygon_count(&self) -> usize {
self.sub_v * self.sub_u
}
}
| {
let u = (u as f32 / self.sub_u as f32) * PI * 2.;
let v = (v as f32 / self.sub_v as f32) * PI;
(u.cos() * v.sin(),
u.sin() * v.sin(),
v.cos())
} | identifier_body |
sphere.rs | // Copyright Colin Sherratt 2014
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::f32::consts::PI;
use super::{Quad, Triangle, Polygon};
use super::Polygon::{PolyTri, PolyQuad};
use super::generators::{SharedVertex, IndexedPolygon};
/// Represents a sphere with radius of 1, centered at (0, 0, 0)
#[derive(Clone, Copy)]
pub struct SphereUV {
u: usize,
v: usize,
sub_u: usize,
sub_v: usize
}
impl SphereUV { | pub fn new(u: usize, v: usize) -> SphereUV {
SphereUV {
u: 0,
v: 0,
sub_u: u,
sub_v: v
}
}
fn vert(&self, u: usize, v: usize) -> (f32, f32, f32) {
let u = (u as f32 / self.sub_u as f32) * PI * 2.;
let v = (v as f32 / self.sub_v as f32) * PI;
(u.cos() * v.sin(),
u.sin() * v.sin(),
v.cos())
}
}
impl Iterator for SphereUV {
type Item = Polygon<(f32, f32, f32)>;
fn size_hint(&self) -> (usize, Option<usize>) {
let n = (self.sub_v - self.v) * self.sub_u + (self.sub_u - self.u);
(n, Some(n))
}
fn next(&mut self) -> Option<Polygon<(f32, f32, f32)>> {
if self.u == self.sub_u {
self.u = 0;
self.v += 1;
if self.v == self.sub_v {
return None;
}
}
let x = self.vert(self.u, self.v);
let y = self.vert(self.u, self.v+1);
let z = self.vert(self.u+1, self.v+1);
let w = self.vert(self.u+1, self.v);
let v = self.v;
self.u += 1;
if v == 0 {
Some(PolyTri(Triangle::new(x, y, z)))
} else if v == self.sub_v - 1 {
Some(PolyTri(Triangle::new(z, w, x)))
} else {
Some(PolyQuad(Quad::new(x, y, z, w)))
}
}
}
impl SharedVertex<(f32, f32, f32)> for SphereUV {
fn shared_vertex(&self, idx: usize) -> (f32, f32, f32) {
if idx == 0 {
self.vert(0, 0)
} else if idx == self.shared_vertex_count() - 1 {
self.vert(0, self.sub_v)
} else {
// since the bottom verts all map to the same
// we jump over them in index space
let idx = idx - 1;
let u = idx % (self.sub_u);
let v = idx / (self.sub_u);
self.vert(u, v+1)
}
}
fn shared_vertex_count(&self) -> usize {
(self.sub_v - 1) * (self.sub_u) + 2
}
}
impl IndexedPolygon<Polygon<usize>> for SphereUV {
fn indexed_polygon(&self, idx: usize) -> Polygon<usize> {
let u = idx % self.sub_u;
let v = idx / self.sub_u;
let f = |u: usize, v: usize| {
if v == 0 {
0
} else if self.sub_v == v {
(self.sub_v-1) * (self.sub_u) + 1
} else {
(v-1) * self.sub_u + (u % self.sub_u) + 1
}
};
if v == 0 {
PolyTri(Triangle::new(f(u, v),
f(u, v+1),
f(u+1, v+1)))
} else if self.sub_v - 1 == v {
PolyTri(Triangle::new(f(u+1, v+1),
f(u+1, v),
f(u, v)))
} else {
PolyQuad(Quad::new(f(u, v),
f(u, v+1),
f(u+1, v+1),
f(u+1, v)))
}
}
fn indexed_polygon_count(&self) -> usize {
self.sub_v * self.sub_u
}
} | /// Create a new sphere.
/// `u` is the number of points across the equator of the sphere.
/// `v` is the number of points from pole to pole. | random_line_split |
mod.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Collection types.
//!
//! Rust's standard collection library provides efficient implementations of the
//! most common general purpose programming data structures. By using the
//! standard implementations, it should be possible for two libraries to
//! communicate without significant data conversion.
//!
//! To get this out of the way: you should probably just use `Vec` or `HashMap`.
//! These two collections cover most use cases for generic data storage and
//! processing. They are exceptionally good at doing what they do. All the other
//! collections in the standard library have specific use cases where they are
//! the optimal choice, but these cases are borderline *niche* in comparison.
//! Even when `Vec` and `HashMap` are technically suboptimal, they're probably a
//! good enough choice to get started.
//!
//! Rust's collections can be grouped into four major categories:
//!
//! * Sequences: `Vec`, `VecDeque`, `LinkedList`, `BitVec`
//! * Maps: `HashMap`, `BTreeMap`, `VecMap`
//! * Sets: `HashSet`, `BTreeSet`, `BitSet`
//! * Misc: `BinaryHeap`
//!
//! # When Should You Use Which Collection?
//!
//! These are fairly high-level and quick break-downs of when each collection
//! should be considered. Detailed discussions of strengths and weaknesses of
//! individual collections can be found on their own documentation pages.
//!
//! ### Use a `Vec` when:
//! * You want to collect items up to be processed or sent elsewhere later, and
//! don't care about any properties of the actual values being stored.
//! * You want a sequence of elements in a particular order, and will only be
//! appending to (or near) the end.
//! * You want a stack.
//! * You want a resizable array.
//! * You want a heap-allocated array.
//!
//! ### Use a `VecDeque` when:
//! * You want a `Vec` that supports efficient insertion at both ends of the
//! sequence.
//! * You want a queue.
//! * You want a double-ended queue (deque).
//!
//! ### Use a `LinkedList` when:
//! * You want a `Vec` or `VecDeque` of unknown size, and can't tolerate
//! amortization.
//! * You want to efficiently split and append lists.
//! * You are *absolutely* certain you *really*, *truly*, want a doubly linked
//! list.
//!
//! ### Use a `HashMap` when:
//! * You want to associate arbitrary keys with an arbitrary value.
//! * You want a cache.
//! * You want a map, with no extra functionality.
//!
//! ### Use a `BTreeMap` when:
//! * You're interested in what the smallest or largest key-value pair is.
//! * You want to find the largest or smallest key that is smaller or larger
//! than something.
//! * You want to be able to get all of the entries in order on-demand.
//! * You want a sorted map.
//!
//! ### Use a `VecMap` when:
//! * You want a `HashMap` but with known to be small `usize` keys.
//! * You want a `BTreeMap`, but with known to be small `usize` keys.
//!
//! ### Use the `Set` variant of any of these `Map`s when:
//! * You just want to remember which keys you've seen.
//! * There is no meaningful value to associate with your keys.
//! * You just want a set.
//!
//! ### Use a `BitVec` when:
//! * You want to store an unbounded number of booleans in a small space.
//! * You want a bit vector.
//!
//! ### Use a `BitSet` when:
//! * You want a `BitVec`, but want `Set` properties
//!
//! ### Use a `BinaryHeap` when:
//!
//! * You want to store a bunch of elements, but only ever want to process the
//! "biggest" or "most important" one at any given time.
//! * You want a priority queue.
//!
//! # Performance
//!
//! Choosing the right collection for the job requires an understanding of what
//! each collection is good at. Here we briefly summarize the performance of
//! different collections for certain important operations. For further details,
//! see each type's documentation, and note that the names of actual methods may
//! differ from the tables below on certain collections.
//!
//! Throughout the documentation, we will follow a few conventions. For all
//! operations, the collection's size is denoted by n. If another collection is
//! involved in the operation, it contains m elements. Operations which have an
//! *amortized* cost are suffixed with a `*`. Operations with an *expected*
//! cost are suffixed with a `~`.
//!
//! All amortized costs are for the potential need to resize when capacity is
//! exhausted. If a resize occurs it will take O(n) time. Our collections never
//! automatically shrink, so removal operations aren't amortized. Over a
//! sufficiently large series of operations, the average cost per operation will
//! deterministically equal the given cost.
//!
//! Only HashMap has expected costs, due to the probabilistic nature of hashing.
//! It is theoretically possible, though very unlikely, for HashMap to
//! experience worse performance.
//!
//! ## Sequences
//!
//! | | get(i) | insert(i) | remove(i) | append | split_off(i) |
//! |--------------|----------------|-----------------|----------------|--------|----------------|
//! | Vec | O(1) | O(n-i)* | O(n-i) | O(m)* | O(n-i) |
//! | VecDeque | O(1) | O(min(i, n-i))* | O(min(i, n-i)) | O(m)* | O(min(i, n-i)) |
//! | LinkedList | O(min(i, n-i)) | O(min(i, n-i)) | O(min(i, n-i)) | O(1) | O(min(i, n-i)) |
//! | BitVec | O(1) | O(n-i)* | O(n-i) | O(m)* | O(n-i) |
//!
//! Note that where ties occur, Vec is generally going to be faster than VecDeque, and VecDeque
//! is generally going to be faster than LinkedList. BitVec is not a general purpose collection, and
//! therefore cannot reasonably be compared.
//!
//! ## Maps
//!
//! For Sets, all operations have the cost of the equivalent Map operation. For
//! BitSet,
//! refer to VecMap.
//!
//! | | get | insert | remove | predecessor |
//! |----------|-----------|----------|----------|-------------|
//! | HashMap | O(1)~ | O(1)~* | O(1)~ | N/A |
//! | BTreeMap | O(log n) | O(log n) | O(log n) | O(log n) |
//! | VecMap | O(1) | O(1)? | O(1) | O(n) |
//!
//! Note that VecMap is *incredibly* inefficient in terms of space. The O(1)
//! insertion time assumes space for the element is already allocated.
//! Otherwise, a large key may require a massive reallocation, with no direct
//! relation to the number of elements in the collection. VecMap should only be
//! seriously considered for small keys.
//!
//! Note also that BTreeMap's precise performance depends on the value of B.
//!
//! # Correct and Efficient Usage of Collections
//!
//! Of course, knowing which collection is the right one for the job doesn't
//! instantly permit you to use it correctly. Here are some quick tips for
//! efficient and correct usage of the standard collections in general. If
//! you're interested in how to use a specific collection in particular, consult
//! its documentation for detailed discussion and code examples.
//!
//! ## Capacity Management
//!
//! Many collections provide several constructors and methods that refer to
//! "capacity". These collections are generally built on top of an array.
//! Optimally, this array would be exactly the right size to fit only the
//! elements stored in the collection, but for the collection to do this would
//! be very inefficient. If the backing array was exactly the right size at all
//! times, then every time an element is inserted, the collection would have to
//! grow the array to fit it. Due to the way memory is allocated and managed on
//! most computers, this would almost surely require allocating an entirely new
//! array and copying every single element from the old one into the new one.
//! Hopefully you can see that this wouldn't be very efficient to do on every
//! operation.
//!
//! Most collections therefore use an *amortized* allocation strategy. They
//! generally let themselves have a fair amount of unoccupied space so that they
//! only have to grow on occasion. When they do grow, they allocate a
//! substantially larger array to move the elements into so that it will take a
//! while for another grow to be required. While this strategy is great in
//! general, it would be even better if the collection *never* had to resize its
//! backing array. Unfortunately, the collection itself doesn't have enough
//! information to do this itself. Therefore, it is up to us programmers to give
//! it hints.
//!
//! Any `with_capacity` constructor will instruct the collection to allocate
//! enough space for the specified number of elements. Ideally this will be for
//! exactly that many elements, but some implementation details may prevent
//! this. `Vec` and `VecDeque` can be relied on to allocate exactly the
//! requested amount, though. Use `with_capacity` when you know exactly how many
//! elements will be inserted, or at least have a reasonable upper-bound on that
//! number.
//!
//! When anticipating a large influx of elements, the `reserve` family of
//! methods can be used to hint to the collection how much room it should make
//! for the coming items. As with `with_capacity`, the precise behavior of
//! these methods will be specific to the collection of interest.
//!
//! For optimal performance, collections will generally avoid shrinking
//! themselves. If you believe that a collection will not soon contain any more
//! elements, or just really need the memory, the `shrink_to_fit` method prompts
//! the collection to shrink the backing array to the minimum size capable of
//! holding its elements.
//!
//! Finally, if ever you're interested in what the actual capacity of the
//! collection is, most collections provide a `capacity` method to query this
//! information on demand. This can be useful for debugging purposes, or for
//! use with the `reserve` methods.
//!
//! ## Iterators
//!
//! Iterators are a powerful and robust mechanism used throughout Rust's
//! standard libraries. Iterators provide a sequence of values in a generic,
//! safe, efficient and convenient way. The contents of an iterator are usually
//! *lazily* evaluated, so that only the values that are actually needed are
//! ever actually produced, and no allocation need be done to temporarily store
//! them. Iterators are primarily consumed using a `for` loop, although many
//! functions also take iterators where a collection or sequence of values is
//! desired.
//!
//! All of the standard collections provide several iterators for performing
//! bulk manipulation of their contents. The three primary iterators almost
//! every collection should provide are `iter`, `iter_mut`, and `into_iter`.
//! Some of these are not provided on collections where it would be unsound or
//! unreasonable to provide them.
//!
//! `iter` provides an iterator of immutable references to all the contents of a
//! collection in the most "natural" order. For sequence collections like `Vec`,
//! this means the items will be yielded in increasing order of index starting
//! at 0. For ordered collections like `BTreeMap`, this means that the items
//! will be yielded in sorted order. For unordered collections like `HashMap`,
//! the items will be yielded in whatever order the internal representation made
//! most convenient. This is great for reading through all the contents of the
//! collection.
//!
//! ```
//! let vec = vec![1, 2, 3, 4];
//! for x in vec.iter() {
//! println!("vec contained {}", x);
//! }
//! ```
//!
//! `iter_mut` provides an iterator of *mutable* references in the same order as
//! `iter`. This is great for mutating all the contents of the collection.
//!
//! ```
//! let mut vec = vec![1, 2, 3, 4];
//! for x in vec.iter_mut() {
//! *x += 1;
//! }
//! ```
//!
//! `into_iter` transforms the actual collection into an iterator over its
//! contents by-value. This is great when the collection itself is no longer
//! needed, and the values are needed elsewhere. Using `extend` with `into_iter`
//! is the main way that contents of one collection are moved into another.
//! `extend` automatically calls `into_iter`, and takes any `T: IntoIterator`.
//! Calling `collect` on an iterator itself is also a great way to convert one
//! collection into another. Both of these methods should internally use the
//! capacity management tools discussed in the previous section to do this as
//! efficiently as possible.
//!
//! ```
//! let mut vec1 = vec![1, 2, 3, 4];
//! let vec2 = vec![10, 20, 30, 40];
//! vec1.extend(vec2);
//! ```
//!
//! ```
//! use std::collections::VecDeque;
//!
//! let vec = vec![1, 2, 3, 4];
//! let buf: VecDeque<_> = vec.into_iter().collect();
//! ```
//!
//! Iterators also provide a series of *adapter* methods for performing common
//! threads to sequences. Among the adapters are functional favorites like `map`,
//! `fold`, `skip`, and `take`. Of particular interest to collections is the
//! `rev` adapter, that reverses any iterator that supports this operation. Most
//! collections provide reversible iterators as the way to iterate over them in
//! reverse order.
//!
//! ```
//! let vec = vec![1, 2, 3, 4];
//! for x in vec.iter().rev() {
//! println!("vec contained {}", x); | //!
//! Several other collection methods also return iterators to yield a sequence
//! of results but avoid allocating an entire collection to store the result in.
//! This provides maximum flexibility as `collect` or `extend` can be called to
//! "pipe" the sequence into any collection if desired. Otherwise, the sequence
//! can be looped over with a `for` loop. The iterator can also be discarded
//! after partial use, preventing the computation of the unused items.
//!
//! ## Entries
//!
//! The `entry` API is intended to provide an efficient mechanism for
//! manipulating the contents of a map conditionally on the presence of a key or
//! not. The primary motivating use case for this is to provide efficient
//! accumulator maps. For instance, if one wishes to maintain a count of the
//! number of times each key has been seen, they will have to perform some
//! conditional logic on whether this is the first time the key has been seen or
//! not. Normally, this would require a `find` followed by an `insert`,
//! effectively duplicating the search effort on each insertion.
//!
//! When a user calls `map.entry(&key)`, the map will search for the key and
//! then yield a variant of the `Entry` enum.
//!
//! If a `Vacant(entry)` is yielded, then the key *was not* found. In this case
//! the only valid operation is to `insert` a value into the entry. When this is
//! done, the vacant entry is consumed and converted into a mutable reference to
//! the value that was inserted. This allows for further manipulation of the
//! value beyond the lifetime of the search itself. This is useful if complex
//! logic needs to be performed on the value regardless of whether the value was
//! just inserted.
//!
//! If an `Occupied(entry)` is yielded, then the key *was* found. In this case,
//! the user has several options: they can `get`, `insert`, or `remove` the
//! value of the occupied entry. Additionally, they can convert the occupied
//! entry into a mutable reference to its value, providing symmetry to the
//! vacant `insert` case.
//!
//! ### Examples
//!
//! Here are the two primary ways in which `entry` is used. First, a simple
//! example where the logic performed on the values is trivial.
//!
//! #### Counting the number of times each character in a string occurs
//!
//! ```
//! use std::collections::btree_map::BTreeMap;
//!
//! let mut count = BTreeMap::new();
//! let message = "she sells sea shells by the sea shore";
//!
//! for c in message.chars() {
//! *count.entry(c).or_insert(0) += 1;
//! }
//!
//! assert_eq!(count.get(&'s'), Some(&8));
//!
//! println!("Number of occurrences of each character");
//! for (char, count) in &count {
//! println!("{}: {}", char, count);
//! }
//! ```
//!
//! When the logic to be performed on the value is more complex, we may simply
//! use the `entry` API to ensure that the value is initialized, and perform the
//! logic afterwards.
//!
//! #### Tracking the inebriation of customers at a bar
//!
//! ```
//! use std::collections::btree_map::BTreeMap;
//!
//! // A client of the bar. They have an id and a blood alcohol level.
//! struct Person { id: u32, blood_alcohol: f32 }
//!
//! // All the orders made to the bar, by client id.
//! let orders = vec![1,2,1,2,3,4,1,2,2,3,4,1,1,1];
//!
//! // Our clients.
//! let mut blood_alcohol = BTreeMap::new();
//!
//! for id in orders {
//! // If this is the first time we've seen this customer, initialize them
//! // with no blood alcohol. Otherwise, just retrieve them.
//! let person = blood_alcohol.entry(id).or_insert(Person{id: id, blood_alcohol: 0.0});
//!
//! // Reduce their blood alcohol level. It takes time to order and drink a beer!
//! person.blood_alcohol *= 0.9;
//!
//! // Check if they're sober enough to have another beer.
//! if person.blood_alcohol > 0.3 {
//! // Too drunk... for now.
//! println!("Sorry {}, I have to cut you off", person.id);
//! } else {
//! // Have another!
//! person.blood_alcohol += 0.1;
//! }
//! }
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
pub use core_collections::Bound;
pub use core_collections::{BinaryHeap, BTreeMap, BTreeSet};
pub use core_collections::{LinkedList, VecDeque};
pub use core_collections::{binary_heap, btree_map, btree_set};
pub use core_collections::{linked_list, vec_deque};
pub use self::hash_map::HashMap;
pub use self::hash_set::HashSet;
mod hash;
#[stable(feature = "rust1", since = "1.0.0")]
pub mod hash_map {
//! A hashmap
pub use super::hash::map::*;
}
#[stable(feature = "rust1", since = "1.0.0")]
pub mod hash_set {
//! A hashset
pub use super::hash::set::*;
}
/// Experimental support for providing custom hash algorithms to a HashMap and
/// HashSet.
#[unstable(feature = "hashmap_hasher", reason = "module was recently added",
issue = "27713")]
pub mod hash_state {
pub use super::hash::state::*;
} | //! }
//! ``` | random_line_split |
resolve_lock_lite.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::storage::kv::WriteData;
use crate::storage::lock_manager::LockManager;
use crate::storage::mvcc::MvccTxn;
use crate::storage::txn::commands::{
Command, CommandExt, ReleasedLocks, ResponsePolicy, TypedCommand, WriteCommand, WriteContext,
WriteResult,
};
use crate::storage::txn::{cleanup, commit, Result};
use crate::storage::{ProcessResult, Snapshot};
use txn_types::{Key, TimeStamp};
command! {
/// Resolve locks on `resolve_keys` according to `start_ts` and `commit_ts`.
ResolveLockLite:
cmd_ty => (),
display => "kv::resolve_lock_lite", (),
content => {
/// The transaction timestamp.
start_ts: TimeStamp,
/// The transaction commit timestamp.
commit_ts: TimeStamp,
/// The keys to resolve.
resolve_keys: Vec<Key>,
}
}
impl CommandExt for ResolveLockLite {
ctx!();
tag!(resolve_lock_lite);
ts!(start_ts);
command_method!(is_sys_cmd, bool, true);
write_bytes!(resolve_keys: multiple);
gen_lock!(resolve_keys: multiple);
}
impl<S: Snapshot, L: LockManager> WriteCommand<S, L> for ResolveLockLite {
fn process_write(self, snapshot: S, context: WriteContext<'_, L>) -> Result<WriteResult> |
context.statistics.add(&txn.take_statistics());
let write_data = WriteData::from_modifies(txn.into_modifies());
Ok(WriteResult {
ctx: self.ctx,
to_be_write: write_data,
rows,
pr: ProcessResult::Res,
lock_info: None,
lock_guards: vec![],
response_policy: ResponsePolicy::OnApplied,
})
}
}
| {
let mut txn = MvccTxn::new(
snapshot,
self.start_ts,
!self.ctx.get_not_fill_cache(),
context.concurrency_manager,
);
let rows = self.resolve_keys.len();
// ti-client guarantees the size of resolve_keys will not too large, so no necessary
// to control the write_size as ResolveLock.
let mut released_locks = ReleasedLocks::new(self.start_ts, self.commit_ts);
for key in self.resolve_keys {
released_locks.push(if !self.commit_ts.is_zero() {
commit(&mut txn, key, self.commit_ts)?
} else {
cleanup(&mut txn, key, TimeStamp::zero(), false)?
});
}
released_locks.wake_up(context.lock_mgr); | identifier_body |
resolve_lock_lite.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::storage::kv::WriteData;
use crate::storage::lock_manager::LockManager;
use crate::storage::mvcc::MvccTxn;
use crate::storage::txn::commands::{
Command, CommandExt, ReleasedLocks, ResponsePolicy, TypedCommand, WriteCommand, WriteContext,
WriteResult,
};
use crate::storage::txn::{cleanup, commit, Result};
use crate::storage::{ProcessResult, Snapshot};
use txn_types::{Key, TimeStamp};
command! {
/// Resolve locks on `resolve_keys` according to `start_ts` and `commit_ts`.
ResolveLockLite:
cmd_ty => (),
display => "kv::resolve_lock_lite", (),
content => {
/// The transaction timestamp.
start_ts: TimeStamp,
/// The transaction commit timestamp.
commit_ts: TimeStamp,
/// The keys to resolve.
resolve_keys: Vec<Key>,
}
}
impl CommandExt for ResolveLockLite {
ctx!();
tag!(resolve_lock_lite);
ts!(start_ts);
command_method!(is_sys_cmd, bool, true);
write_bytes!(resolve_keys: multiple);
gen_lock!(resolve_keys: multiple);
}
impl<S: Snapshot, L: LockManager> WriteCommand<S, L> for ResolveLockLite {
fn | (self, snapshot: S, context: WriteContext<'_, L>) -> Result<WriteResult> {
let mut txn = MvccTxn::new(
snapshot,
self.start_ts,
!self.ctx.get_not_fill_cache(),
context.concurrency_manager,
);
let rows = self.resolve_keys.len();
// ti-client guarantees the size of resolve_keys will not too large, so no necessary
// to control the write_size as ResolveLock.
let mut released_locks = ReleasedLocks::new(self.start_ts, self.commit_ts);
for key in self.resolve_keys {
released_locks.push(if!self.commit_ts.is_zero() {
commit(&mut txn, key, self.commit_ts)?
} else {
cleanup(&mut txn, key, TimeStamp::zero(), false)?
});
}
released_locks.wake_up(context.lock_mgr);
context.statistics.add(&txn.take_statistics());
let write_data = WriteData::from_modifies(txn.into_modifies());
Ok(WriteResult {
ctx: self.ctx,
to_be_write: write_data,
rows,
pr: ProcessResult::Res,
lock_info: None,
lock_guards: vec![],
response_policy: ResponsePolicy::OnApplied,
})
}
}
| process_write | identifier_name |
resolve_lock_lite.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::storage::kv::WriteData;
use crate::storage::lock_manager::LockManager;
use crate::storage::mvcc::MvccTxn;
use crate::storage::txn::commands::{
Command, CommandExt, ReleasedLocks, ResponsePolicy, TypedCommand, WriteCommand, WriteContext,
WriteResult,
};
use crate::storage::txn::{cleanup, commit, Result};
use crate::storage::{ProcessResult, Snapshot};
use txn_types::{Key, TimeStamp};
command! {
/// Resolve locks on `resolve_keys` according to `start_ts` and `commit_ts`.
ResolveLockLite:
cmd_ty => (),
display => "kv::resolve_lock_lite", (),
content => {
/// The transaction timestamp.
start_ts: TimeStamp,
/// The transaction commit timestamp.
commit_ts: TimeStamp,
/// The keys to resolve.
resolve_keys: Vec<Key>,
}
}
impl CommandExt for ResolveLockLite {
ctx!();
tag!(resolve_lock_lite);
ts!(start_ts);
command_method!(is_sys_cmd, bool, true);
write_bytes!(resolve_keys: multiple);
gen_lock!(resolve_keys: multiple);
}
impl<S: Snapshot, L: LockManager> WriteCommand<S, L> for ResolveLockLite {
fn process_write(self, snapshot: S, context: WriteContext<'_, L>) -> Result<WriteResult> {
let mut txn = MvccTxn::new(
snapshot,
self.start_ts,
!self.ctx.get_not_fill_cache(),
context.concurrency_manager,
);
let rows = self.resolve_keys.len();
// ti-client guarantees the size of resolve_keys will not too large, so no necessary
// to control the write_size as ResolveLock.
let mut released_locks = ReleasedLocks::new(self.start_ts, self.commit_ts);
for key in self.resolve_keys {
released_locks.push(if!self.commit_ts.is_zero() {
commit(&mut txn, key, self.commit_ts)?
} else | );
}
released_locks.wake_up(context.lock_mgr);
context.statistics.add(&txn.take_statistics());
let write_data = WriteData::from_modifies(txn.into_modifies());
Ok(WriteResult {
ctx: self.ctx,
to_be_write: write_data,
rows,
pr: ProcessResult::Res,
lock_info: None,
lock_guards: vec![],
response_policy: ResponsePolicy::OnApplied,
})
}
}
| {
cleanup(&mut txn, key, TimeStamp::zero(), false)?
} | conditional_block |
resolve_lock_lite.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::storage::kv::WriteData;
use crate::storage::lock_manager::LockManager; | WriteResult,
};
use crate::storage::txn::{cleanup, commit, Result};
use crate::storage::{ProcessResult, Snapshot};
use txn_types::{Key, TimeStamp};
command! {
/// Resolve locks on `resolve_keys` according to `start_ts` and `commit_ts`.
ResolveLockLite:
cmd_ty => (),
display => "kv::resolve_lock_lite", (),
content => {
/// The transaction timestamp.
start_ts: TimeStamp,
/// The transaction commit timestamp.
commit_ts: TimeStamp,
/// The keys to resolve.
resolve_keys: Vec<Key>,
}
}
impl CommandExt for ResolveLockLite {
ctx!();
tag!(resolve_lock_lite);
ts!(start_ts);
command_method!(is_sys_cmd, bool, true);
write_bytes!(resolve_keys: multiple);
gen_lock!(resolve_keys: multiple);
}
impl<S: Snapshot, L: LockManager> WriteCommand<S, L> for ResolveLockLite {
fn process_write(self, snapshot: S, context: WriteContext<'_, L>) -> Result<WriteResult> {
let mut txn = MvccTxn::new(
snapshot,
self.start_ts,
!self.ctx.get_not_fill_cache(),
context.concurrency_manager,
);
let rows = self.resolve_keys.len();
// ti-client guarantees the size of resolve_keys will not too large, so no necessary
// to control the write_size as ResolveLock.
let mut released_locks = ReleasedLocks::new(self.start_ts, self.commit_ts);
for key in self.resolve_keys {
released_locks.push(if!self.commit_ts.is_zero() {
commit(&mut txn, key, self.commit_ts)?
} else {
cleanup(&mut txn, key, TimeStamp::zero(), false)?
});
}
released_locks.wake_up(context.lock_mgr);
context.statistics.add(&txn.take_statistics());
let write_data = WriteData::from_modifies(txn.into_modifies());
Ok(WriteResult {
ctx: self.ctx,
to_be_write: write_data,
rows,
pr: ProcessResult::Res,
lock_info: None,
lock_guards: vec![],
response_policy: ResponsePolicy::OnApplied,
})
}
} | use crate::storage::mvcc::MvccTxn;
use crate::storage::txn::commands::{
Command, CommandExt, ReleasedLocks, ResponsePolicy, TypedCommand, WriteCommand, WriteContext, | random_line_split |
thread.rs | // Testing the the display of JoinHandle and Thread in cdb.
// cdb-only
// min-cdb-version: 10.0.18317.1001
// compile-flags:-g
// === CDB TESTS ==================================================================================
//
// cdb-command:g
//
// cdb-command:dx join_handle,d
// cdb-check:join_handle,d [Type: std::thread::JoinHandle<tuple$<> >]
// cdb-check: [...] __0 [Type: std::thread::JoinInner<tuple$<> >]
//
// cdb-command:dx t,d
// cdb-check:t,d : [...] [Type: std::thread::Thread *]
// cdb-check: [...] inner : {...} [Type: alloc::sync::Arc<std::thread::Inner>]
use std::thread;
#[allow(unused_variables)]
fn main()
{
let join_handle = thread::spawn(|| {
println!("Initialize a thread");
});
let t = join_handle.thread();
zzz(); // #break
}
fn | () {}
| zzz | identifier_name |
thread.rs | // Testing the the display of JoinHandle and Thread in cdb.
// cdb-only
// min-cdb-version: 10.0.18317.1001
// compile-flags:-g
// === CDB TESTS ==================================================================================
//
// cdb-command:g
//
// cdb-command:dx join_handle,d
// cdb-check:join_handle,d [Type: std::thread::JoinHandle<tuple$<> >]
// cdb-check: [...] __0 [Type: std::thread::JoinInner<tuple$<> >]
//
// cdb-command:dx t,d
// cdb-check:t,d : [...] [Type: std::thread::Thread *]
// cdb-check: [...] inner : {...} [Type: alloc::sync::Arc<std::thread::Inner>]
use std::thread;
#[allow(unused_variables)]
fn main()
{
let join_handle = thread::spawn(|| {
println!("Initialize a thread");
});
let t = join_handle.thread();
zzz(); // #break
}
fn zzz() | {} | identifier_body |
|
thread.rs | // Testing the the display of JoinHandle and Thread in cdb. | // min-cdb-version: 10.0.18317.1001
// compile-flags:-g
// === CDB TESTS ==================================================================================
//
// cdb-command:g
//
// cdb-command:dx join_handle,d
// cdb-check:join_handle,d [Type: std::thread::JoinHandle<tuple$<> >]
// cdb-check: [...] __0 [Type: std::thread::JoinInner<tuple$<> >]
//
// cdb-command:dx t,d
// cdb-check:t,d : [...] [Type: std::thread::Thread *]
// cdb-check: [...] inner : {...} [Type: alloc::sync::Arc<std::thread::Inner>]
use std::thread;
#[allow(unused_variables)]
fn main()
{
let join_handle = thread::spawn(|| {
println!("Initialize a thread");
});
let t = join_handle.thread();
zzz(); // #break
}
fn zzz() {} |
// cdb-only | random_line_split |
remove.rs | use libc::{c_ulong, c_ulonglong, c_void};
use super::super::error_type::ErrorType;
use super::super::instance::Instance;
use super::format_error;
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct RemoveInternal {
pub cookie: *mut c_void,
pub key: *const c_void,
pub nkey: c_ulong,
pub cas: c_ulonglong,
pub rc: ErrorType,
pub version: u16,
pub rflags: u16
}
impl RemoveInternal {
pub fn key(&self) -> Option<String> {
unsafe {
match self.rc {
ErrorType::Success => {
let bytes = ::std::slice::from_raw_parts(self.key as *mut u8, self.nkey as usize);
let text = ::std::str::from_utf8(bytes).unwrap();
Some(text.to_string())
},
_ => {
None
}
}
}
}
pub fn error(&self, instance: Instance) -> &'static str {
format_error(instance, &self.rc)
}
}
#[derive(Debug)]
pub struct Remove {
pub key: Option<String>,
pub cas: u64,
pub rc: ErrorType,
pub version: u16,
pub rflags: u16,
}
impl Remove {
pub fn new(internal: &RemoveInternal) -> Remove { | key: internal.key(),
cas: internal.cas,
rc: internal.rc,
version: internal.version,
rflags: internal.rflags
}
}
} | Remove { | random_line_split |
remove.rs | use libc::{c_ulong, c_ulonglong, c_void};
use super::super::error_type::ErrorType;
use super::super::instance::Instance;
use super::format_error;
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct RemoveInternal {
pub cookie: *mut c_void,
pub key: *const c_void,
pub nkey: c_ulong,
pub cas: c_ulonglong,
pub rc: ErrorType,
pub version: u16,
pub rflags: u16
}
impl RemoveInternal {
pub fn key(&self) -> Option<String> {
unsafe {
match self.rc {
ErrorType::Success => {
let bytes = ::std::slice::from_raw_parts(self.key as *mut u8, self.nkey as usize);
let text = ::std::str::from_utf8(bytes).unwrap();
Some(text.to_string())
},
_ => {
None
}
}
}
}
pub fn error(&self, instance: Instance) -> &'static str {
format_error(instance, &self.rc)
}
}
#[derive(Debug)]
pub struct | {
pub key: Option<String>,
pub cas: u64,
pub rc: ErrorType,
pub version: u16,
pub rflags: u16,
}
impl Remove {
pub fn new(internal: &RemoveInternal) -> Remove {
Remove {
key: internal.key(),
cas: internal.cas,
rc: internal.rc,
version: internal.version,
rflags: internal.rflags
}
}
}
| Remove | identifier_name |
remove.rs | use crate::database::models::ToU64;
use diesel::prelude::*;
use lalafell::commands::prelude::*;
use lalafell::error::*;
use serenity::model::id::GuildId;
pub struct RemoveCommand;
#[derive(Debug, StructOpt)]
pub struct Params {
#[structopt(help = "The ID of the auto reply to remove")]
id: i32
}
impl<'a> RemoveCommand {
#[allow(clippy::needless_pass_by_value)]
pub fn run(&self, guild: GuildId, params: Params) -> CommandResult<'a> {
let affected = crate::bot::with_connection(|c| {
use crate::database::schema::auto_replies::dsl; | if affected > 0 {
Ok(CommandSuccess::default())
} else {
Err("No auto replies were deleted.".into())
}
}
} | diesel::delete(
dsl::auto_replies.filter(dsl::id.eq(params.id).and(dsl::server_id.eq(guild.to_u64())))
)
.execute(c)
}).chain_err(|| "could not delete auto_replies")?; | random_line_split |
remove.rs | use crate::database::models::ToU64;
use diesel::prelude::*;
use lalafell::commands::prelude::*;
use lalafell::error::*;
use serenity::model::id::GuildId;
pub struct RemoveCommand;
#[derive(Debug, StructOpt)]
pub struct Params {
#[structopt(help = "The ID of the auto reply to remove")]
id: i32
}
impl<'a> RemoveCommand {
#[allow(clippy::needless_pass_by_value)]
pub fn run(&self, guild: GuildId, params: Params) -> CommandResult<'a> {
let affected = crate::bot::with_connection(|c| {
use crate::database::schema::auto_replies::dsl;
diesel::delete(
dsl::auto_replies.filter(dsl::id.eq(params.id).and(dsl::server_id.eq(guild.to_u64())))
)
.execute(c)
}).chain_err(|| "could not delete auto_replies")?;
if affected > 0 {
Ok(CommandSuccess::default())
} else |
}
}
| {
Err("No auto replies were deleted.".into())
} | conditional_block |
remove.rs | use crate::database::models::ToU64;
use diesel::prelude::*;
use lalafell::commands::prelude::*;
use lalafell::error::*;
use serenity::model::id::GuildId;
pub struct | ;
#[derive(Debug, StructOpt)]
pub struct Params {
#[structopt(help = "The ID of the auto reply to remove")]
id: i32
}
impl<'a> RemoveCommand {
#[allow(clippy::needless_pass_by_value)]
pub fn run(&self, guild: GuildId, params: Params) -> CommandResult<'a> {
let affected = crate::bot::with_connection(|c| {
use crate::database::schema::auto_replies::dsl;
diesel::delete(
dsl::auto_replies.filter(dsl::id.eq(params.id).and(dsl::server_id.eq(guild.to_u64())))
)
.execute(c)
}).chain_err(|| "could not delete auto_replies")?;
if affected > 0 {
Ok(CommandSuccess::default())
} else {
Err("No auto replies were deleted.".into())
}
}
}
| RemoveCommand | identifier_name |
privacy-ns.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(non_snake_case)]
// Check we do the correct privacy checks when we import a name and there is an
// item with that name in both the value and type namespaces.
// pretty-expanded FIXME #23616
#![allow(dead_code)]
#![allow(unused_imports)]
// public type, private value
pub mod foo1 {
pub trait Bar {
fn dummy(&self) { }
}
pub struct Baz;
fn Bar() { }
}
fn test_unused1() {
use foo1::*;
}
fn test_single1() {
use foo1::Bar;
let _x: Box<Bar>;
}
fn test_list1() {
use foo1::{Bar,Baz};
let _x: Box<Bar>;
}
fn test_glob1() {
use foo1::*;
let _x: Box<Bar>;
}
// private type, public value
pub mod foo2 {
trait Bar {
fn | (&self) { }
}
pub struct Baz;
pub fn Bar() { }
}
fn test_unused2() {
use foo2::*;
}
fn test_single2() {
use foo2::Bar;
Bar();
}
fn test_list2() {
use foo2::{Bar,Baz};
Bar();
}
fn test_glob2() {
use foo2::*;
Bar();
}
// public type, public value
pub mod foo3 {
pub trait Bar {
fn dummy(&self) { }
}
pub struct Baz;
pub fn Bar() { }
}
fn test_unused3() {
use foo3::*;
}
fn test_single3() {
use foo3::Bar;
Bar();
let _x: Box<Bar>;
}
fn test_list3() {
use foo3::{Bar,Baz};
Bar();
let _x: Box<Bar>;
}
fn test_glob3() {
use foo3::*;
Bar();
let _x: Box<Bar>;
}
fn main() {
}
| dummy | identifier_name |
privacy-ns.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(non_snake_case)]
// Check we do the correct privacy checks when we import a name and there is an
// item with that name in both the value and type namespaces.
// pretty-expanded FIXME #23616
#![allow(dead_code)]
#![allow(unused_imports)]
// public type, private value
pub mod foo1 {
pub trait Bar {
fn dummy(&self) { }
}
pub struct Baz;
fn Bar() { }
}
fn test_unused1() {
use foo1::*;
}
fn test_single1() {
use foo1::Bar;
let _x: Box<Bar>;
}
fn test_list1() {
use foo1::{Bar,Baz};
let _x: Box<Bar>;
}
fn test_glob1() {
use foo1::*;
let _x: Box<Bar>;
}
// private type, public value
pub mod foo2 {
trait Bar {
fn dummy(&self) { }
}
pub struct Baz;
pub fn Bar() { } |
fn test_unused2() {
use foo2::*;
}
fn test_single2() {
use foo2::Bar;
Bar();
}
fn test_list2() {
use foo2::{Bar,Baz};
Bar();
}
fn test_glob2() {
use foo2::*;
Bar();
}
// public type, public value
pub mod foo3 {
pub trait Bar {
fn dummy(&self) { }
}
pub struct Baz;
pub fn Bar() { }
}
fn test_unused3() {
use foo3::*;
}
fn test_single3() {
use foo3::Bar;
Bar();
let _x: Box<Bar>;
}
fn test_list3() {
use foo3::{Bar,Baz};
Bar();
let _x: Box<Bar>;
}
fn test_glob3() {
use foo3::*;
Bar();
let _x: Box<Bar>;
}
fn main() {
} | } | random_line_split |
privacy-ns.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(non_snake_case)]
// Check we do the correct privacy checks when we import a name and there is an
// item with that name in both the value and type namespaces.
// pretty-expanded FIXME #23616
#![allow(dead_code)]
#![allow(unused_imports)]
// public type, private value
pub mod foo1 {
pub trait Bar {
fn dummy(&self) { }
}
pub struct Baz;
fn Bar() { }
}
fn test_unused1() {
use foo1::*;
}
fn test_single1() {
use foo1::Bar;
let _x: Box<Bar>;
}
fn test_list1() {
use foo1::{Bar,Baz};
let _x: Box<Bar>;
}
fn test_glob1() {
use foo1::*;
let _x: Box<Bar>;
}
// private type, public value
pub mod foo2 {
trait Bar {
fn dummy(&self) { }
}
pub struct Baz;
pub fn Bar() { }
}
fn test_unused2() {
use foo2::*;
}
fn test_single2() {
use foo2::Bar;
Bar();
}
fn test_list2() {
use foo2::{Bar,Baz};
Bar();
}
fn test_glob2() {
use foo2::*;
Bar();
}
// public type, public value
pub mod foo3 {
pub trait Bar {
fn dummy(&self) { }
}
pub struct Baz;
pub fn Bar() { }
}
fn test_unused3() |
fn test_single3() {
use foo3::Bar;
Bar();
let _x: Box<Bar>;
}
fn test_list3() {
use foo3::{Bar,Baz};
Bar();
let _x: Box<Bar>;
}
fn test_glob3() {
use foo3::*;
Bar();
let _x: Box<Bar>;
}
fn main() {
}
| {
use foo3::*;
} | identifier_body |
v1.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The first version of the prelude of the standard library.
#![stable]
// Reexported core operators
#[stable] #[doc(no_inline)] pub use marker::{Copy, Send, Sized, Sync};
#[stable] #[doc(no_inline)] pub use ops::{Drop, Fn, FnMut, FnOnce};
// TEMPORARY
#[unstable] #[doc(no_inline)] pub use ops::FullRange;
// Reexported functions
#[stable] #[doc(no_inline)] pub use mem::drop;
// Reexported types and traits
#[stable] #[doc(no_inline)] pub use boxed::Box;
#[stable] #[doc(no_inline)] pub use char::CharExt;
#[stable] #[doc(no_inline)] pub use clone::Clone;
#[stable] #[doc(no_inline)] pub use cmp::{PartialEq, PartialOrd, Eq, Ord};
#[stable] #[doc(no_inline)] pub use iter::DoubleEndedIterator;
#[stable] #[doc(no_inline)] pub use iter::ExactSizeIterator;
#[stable] #[doc(no_inline)] pub use iter::{Iterator, IteratorExt, Extend};
#[stable] #[doc(no_inline)] pub use option::Option::{self, Some, None};
#[stable] #[doc(no_inline)] pub use ptr::{PtrExt, MutPtrExt};
#[stable] #[doc(no_inline)] pub use result::Result::{self, Ok, Err};
#[stable] #[doc(no_inline)] pub use slice::AsSlice;
#[stable] #[doc(no_inline)] pub use slice::{SliceExt, SliceConcatExt};
#[stable] #[doc(no_inline)] pub use str::{Str, StrExt};
#[stable] #[doc(no_inline)] pub use string::{String, ToString}; | // NB: remove when path reform lands
//#[doc(no_inline)] pub use path::{Path, GenericPath};
// NB: remove when I/O reform lands
//#[doc(no_inline)] pub use io::{Buffer, Writer, Reader, Seek, BufferPrelude};
// NB: remove when range syntax lands
#[doc(no_inline)] pub use iter::range; | #[stable] #[doc(no_inline)] pub use vec::Vec;
| random_line_split |
region.rs | */ | }
}
pub fn temporary_scope(&self, expr_id: ast::NodeId) -> Option<ast::NodeId> {
//! Returns the scope when temp created by expr_id will be cleaned up
// check for a designated rvalue scope
let rvalue_scopes = self.rvalue_scopes.borrow();
match rvalue_scopes.get().find(&expr_id) {
Some(&s) => {
debug!("temporary_scope({}) = {} [custom]", expr_id, s);
return Some(s);
}
None => { }
}
// else, locate the innermost terminating scope
let mut id = self.encl_scope(expr_id);
let terminating_scopes = self.terminating_scopes.borrow();
while!terminating_scopes.get().contains(&id) {
match self.opt_encl_scope(id) {
Some(p) => {
id = p;
}
None => {
debug!("temporary_scope({}) = None", expr_id);
return None;
}
}
}
debug!("temporary_scope({}) = {} [enclosing]", expr_id, id);
return Some(id);
}
pub fn encl_region(&self, id: ast::NodeId) -> ty::Region {
//! Returns the narrowest scope region that encloses `id`, if any.
ty::ReScope(self.encl_scope(id))
}
pub fn var_region(&self, id: ast::NodeId) -> ty::Region {
//! Returns the lifetime of the variable `id`.
ty::ReScope(self.var_scope(id))
}
pub fn scopes_intersect(&self, scope1: ast::NodeId, scope2: ast::NodeId)
-> bool {
self.is_subscope_of(scope1, scope2) ||
self.is_subscope_of(scope2, scope1)
}
pub fn is_subscope_of(&self,
subscope: ast::NodeId,
superscope: ast::NodeId)
-> bool {
/*!
* Returns true if `subscope` is equal to or is lexically
* nested inside `superscope` and false otherwise.
*/
let mut s = subscope;
while superscope!= s {
let scope_map = self.scope_map.borrow();
match scope_map.get().find(&s) {
None => {
debug!("is_subscope_of({}, {}, s={})=false",
subscope, superscope, s);
return false;
}
Some(&scope) => s = scope
}
}
debug!("is_subscope_of({}, {})=true",
subscope, superscope);
return true;
}
pub fn sub_free_region(&self, sub: FreeRegion, sup: FreeRegion) -> bool {
/*!
* Determines whether two free regions have a subregion relationship
* by walking the graph encoded in `free_region_map`. Note that
* it is possible that `sub!= sup` and `sub <= sup` and `sup <= sub`
* (that is, the user can give two different names to the same lifetime).
*/
if sub == sup {
return true;
}
// Do a little breadth-first-search here. The `queue` list
// doubles as a way to detect if we've seen a particular FR
// before. Note that we expect this graph to be an *extremely
// shallow* tree.
let mut queue = ~[sub];
let mut i = 0;
while i < queue.len() {
let free_region_map = self.free_region_map.borrow();
match free_region_map.get().find(&queue[i]) {
Some(parents) => {
for parent in parents.iter() {
if *parent == sup {
return true;
}
if!queue.iter().any(|x| x == parent) {
queue.push(*parent);
}
}
}
None => {}
}
i += 1;
}
return false;
}
pub fn is_subregion_of(&self,
sub_region: ty::Region,
super_region: ty::Region)
-> bool {
/*!
* Determines whether one region is a subregion of another. This is
* intended to run *after inference* and sadly the logic is somewhat
* duplicated with the code in infer.rs.
*/
debug!("is_subregion_of(sub_region={:?}, super_region={:?})",
sub_region, super_region);
sub_region == super_region || {
match (sub_region, super_region) {
(_, ty::ReStatic) => {
true
}
(ty::ReScope(sub_scope), ty::ReScope(super_scope)) => {
self.is_subscope_of(sub_scope, super_scope)
}
(ty::ReScope(sub_scope), ty::ReFree(ref fr)) => {
self.is_subscope_of(sub_scope, fr.scope_id)
}
(ty::ReFree(sub_fr), ty::ReFree(super_fr)) => {
self.sub_free_region(sub_fr, super_fr)
}
_ => {
false
}
}
}
}
pub fn nearest_common_ancestor(&self,
scope_a: ast::NodeId,
scope_b: ast::NodeId)
-> Option<ast::NodeId> {
/*!
* Finds the nearest common ancestor (if any) of two scopes. That
* is, finds the smallest scope which is greater than or equal to
* both `scope_a` and `scope_b`.
*/
if scope_a == scope_b { return Some(scope_a); }
let a_ancestors = ancestors_of(self, scope_a);
let b_ancestors = ancestors_of(self, scope_b);
let mut a_index = a_ancestors.len() - 1u;
let mut b_index = b_ancestors.len() - 1u;
// Here, ~[ab]_ancestors is a vector going from narrow to broad.
// The end of each vector will be the item where the scope is
// defined; if there are any common ancestors, then the tails of
// the vector will be the same. So basically we want to walk
// backwards from the tail of each vector and find the first point
// where they diverge. If one vector is a suffix of the other,
// then the corresponding scope is a superscope of the other.
if a_ancestors[a_index]!= b_ancestors[b_index] {
return None;
}
loop {
// Loop invariant: a_ancestors[a_index] == b_ancestors[b_index]
// for all indices between a_index and the end of the array
if a_index == 0u { return Some(scope_a); }
if b_index == 0u { return Some(scope_b); }
a_index -= 1u;
b_index -= 1u;
if a_ancestors[a_index]!= b_ancestors[b_index] {
return Some(a_ancestors[a_index + 1u]);
}
}
fn ancestors_of(this: &RegionMaps, scope: ast::NodeId)
-> ~[ast::NodeId]
{
// debug!("ancestors_of(scope={})", scope);
let mut result = ~[scope];
let mut scope = scope;
loop {
let scope_map = this.scope_map.borrow();
match scope_map.get().find(&scope) {
None => return result,
Some(&superscope) => {
result.push(superscope);
scope = superscope;
}
}
// debug!("ancestors_of_loop(scope={})", scope);
}
}
}
}
/// Records the current parent (if any) as the parent of `child_id`.
fn record_superlifetime(visitor: &mut RegionResolutionVisitor,
cx: Context,
child_id: ast::NodeId,
_sp: Span) {
for &parent_id in cx.parent.iter() {
visitor.region_maps.record_encl_scope(child_id, parent_id);
}
}
/// Records the lifetime of a local variable as `cx.var_parent`
fn record_var_lifetime(visitor: &mut RegionResolutionVisitor,
cx: Context,
var_id: ast::NodeId,
_sp: Span) {
match cx.var_parent {
Some(parent_id) => {
visitor.region_maps.record_var_scope(var_id, parent_id);
}
None => {
// this can happen in extern fn declarations like
//
// extern fn isalnum(c: c_int) -> c_int
}
}
}
fn resolve_block(visitor: &mut RegionResolutionVisitor,
blk: &ast::Block,
cx: Context) {
debug!("resolve_block(blk.id={})", blk.id);
// Record the parent of this block.
record_superlifetime(visitor, cx, blk.id, blk.span);
// We treat the tail expression in the block (if any) somewhat
// differently from the statements. The issue has to do with
// temporary lifetimes. If the user writes:
//
// {
// ... (&foo())...
// }
//
let subcx = Context {var_parent: Some(blk.id), parent: Some(blk.id)};
visit::walk_block(visitor, blk, subcx);
}
fn resolve_arm(visitor: &mut RegionResolutionVisitor,
arm: &ast::Arm,
cx: Context) {
visitor.region_maps.mark_as_terminating_scope(arm.body.id);
match arm.guard {
Some(expr) => {
visitor.region_maps.mark_as_terminating_scope(expr.id);
}
None => { }
}
visit::walk_arm(visitor, arm, cx);
}
fn resolve_pat(visitor: &mut RegionResolutionVisitor,
pat: &ast::Pat,
cx: Context) {
record_superlifetime(visitor, cx, pat.id, pat.span);
// If this is a binding (or maybe a binding, I'm too lazy to check
// the def map) then record the lifetime of that binding.
match pat.node {
ast::PatIdent(..) => {
record_var_lifetime(visitor, cx, pat.id, pat.span);
}
_ => { }
}
visit::walk_pat(visitor, pat, cx);
}
fn resolve_stmt(visitor: &mut RegionResolutionVisitor,
stmt: &ast::Stmt,
cx: Context) {
let stmt_id = stmt_id(stmt);
debug!("resolve_stmt(stmt.id={})", stmt_id);
visitor.region_maps.mark_as_terminating_scope(stmt_id);
record_superlifetime(visitor, cx, stmt_id, stmt.span);
let subcx = Context {parent: Some(stmt_id),..cx};
visit::walk_stmt(visitor, stmt, subcx);
}
fn resolve_expr(visitor: &mut RegionResolutionVisitor,
expr: &ast::Expr,
cx: Context) {
debug!("resolve_expr(expr.id={})", expr.id);
record_superlifetime(visitor, cx, expr.id, expr.span);
let mut new_cx = cx;
new_cx.parent = Some(expr.id);
match expr.node {
// Conditional or repeating scopes are always terminating
// scopes, meaning that temporaries cannot outlive them.
// This ensures fixed size stacks.
ast::ExprBinary(_, ast::BiAnd, _, r) |
ast::ExprBinary(_, ast::BiOr, _, r) => {
// For shortcircuiting operators, mark the RHS as a terminating
// scope since it only executes conditionally.
visitor.region_maps.mark_as_terminating_scope(r.id);
}
ast::ExprIf(_, then, Some(otherwise)) => {
visitor.region_maps.mark_as_terminating_scope(then.id);
visitor.region_maps.mark_as_terminating_scope(otherwise.id);
}
ast::ExprIf(_, then, None) => {
visitor.region_maps.mark_as_terminating_scope(then.id);
}
ast::ExprLoop(body, _) |
ast::ExprWhile(_, body) => {
visitor.region_maps.mark_as_terminating_scope(body.id);
}
ast::ExprMatch(..) => {
new_cx.var_parent = Some(expr.id);
}
ast::ExprAssignOp(..) | ast::ExprIndex(..) |
ast::ExprUnary(..) | ast::ExprCall(..) | ast::ExprMethodCall(..) => {
// FIXME(#6268) Nested method calls
//
// The lifetimes for a call or method call look as follows:
//
// call.id
// - arg0.id
// -...
// - argN.id
// - call.callee_id
//
// The idea is that call.callee_id represents *the time when
// the invoked function is actually running* and call.id
// represents *the time to prepare the arguments and make the
// call*. See the section "Borrows in Calls" borrowck/doc.rs
// for an extended explanantion of why this distinction is
// important.
//
// record_superlifetime(new_cx, expr.callee_id);
}
_ => {}
};
visit::walk_expr(visitor, expr, new_cx);
}
fn resolve_local(visitor: &mut RegionResolutionVisitor,
local: &ast::Local,
cx: Context) {
debug!("resolve_local(local.id={},local.init={})",
local.id,local.init.is_some());
let blk_id = match cx.var_parent {
Some(id) => id,
None => {
visitor.sess.span_bug(
local.span,
"Local without enclosing block");
}
};
// For convenience in trans, associate with the local-id the var
// scope that will be used for any bindings declared in this
// pattern.
visitor.region_maps.record_var_scope(local.id, blk_id);
// As an exception to the normal rules governing temporary
// lifetimes, initializers in a let have a temporary lifetime
// of the enclosing block. This means that e.g. a program
// like the following is legal:
//
// let ref x = HashMap::new();
//
// Because the hash map will be freed in the enclosing block.
//
// We express the rules more formally based on 3 grammars (defined
// fully in the helpers below that implement them):
//
// 1. `E&`, which matches expressions like `&<rvalue>` that
// own a pointer into the stack.
//
// 2. `P&`, which matches patterns like `ref x` or `(ref x, ref
// y)` that produce ref bindings into the value they are
// matched against or something (at least partially) owned by
// the value they are matched against. (By partially owned,
// I mean that creating a binding into a ref-counted or managed value
// would still count.)
//
// 3. `ET`, which matches both rvalues like `foo()` as well as lvalues
// based on rvalues like `foo().x[2].y`.
//
// A subexpression `<rvalue>` that appears in a let initializer
// `let pat [: ty] = expr` has an extended temporary lifetime if
// any of the following conditions are met:
//
// A. `pat` matches `P&` and `expr` matches `ET`
// (covers cases where `pat` creates ref bindings into an rvalue
// produced by `expr`)
// B. `ty` is a borrowed pointer and `expr` matches `ET`
// (covers cases where coercion creates a borrow)
// C. `expr` matches `E&`
// (covers cases `expr` borrows an rvalue that is then assigned
// to memory (at least partially) owned by the binding)
//
// Here are some examples hopefully giving an intution where each
// rule comes into play and why:
//
// Rule A. `let (ref x, ref y) = (foo().x, 44)`. The rvalue `(22, 44)`
// would have an extended lifetime, but not `foo()`.
//
// Rule B. `let x: &[...] = [foo().x]`. The rvalue `[foo().x]`
// would have an extended lifetime, but not `foo()`.
//
// Rule C. `let x = &foo().x`. The rvalue ``foo()` would have extended
// lifetime.
//
// In some cases, multiple rules may apply (though not to the same
// rvalue). For example:
//
// let ref x = [&a(), &b()];
//
// Here, the expression `[...]` has an extended lifetime due to rule
// A, but the inner rvalues `a()` and `b()` have an extended lifetime
// due to rule C.
//
// FIXME(#6308) -- Note that `[]` patterns work more smoothly post-DST.
match local.init {
Some(expr) => {
record_rvalue_scope_if_borrow_expr(visitor, expr, blk_id);
if is_binding_pat(local.pat) || is_borrowed_ty(local.ty) {
record_rvalue_scope(visitor, expr, blk_id);
}
}
None => { }
}
visit::walk_local(visitor, local, cx);
fn is_binding_pat(pat: &ast::Pat) -> bool {
/*!
* True if `pat` match the `P&` nonterminal:
*
* P& = ref X
* | StructName {..., P&,... }
* | VariantName(..., P&,...)
* | [..., P&,... ]
* | (..., P&,... )
* | ~P&
* | box P&
*/
match pat.node {
ast::PatIdent(ast::BindByRef(_), _, _) => true,
ast::PatStruct(_, ref field_pats, _) => {
field_pats.iter().any(|fp| is_binding_pat(fp.pat))
}
ast::PatVec(ref pats1, ref pats2, ref pats3) => {
pats1.iter().any(|&p |
let var_map = self.var_map.borrow();
match var_map.get().find(&var_id) {
Some(&r) => r,
None => { fail!("No enclosing scope for id {}", var_id); } | random_line_split |
region.rs |
let scope_map = this.scope_map.borrow();
match scope_map.get().find(&scope) {
None => return result,
Some(&superscope) => {
result.push(superscope);
scope = superscope;
}
}
// debug!("ancestors_of_loop(scope={})", scope);
}
}
}
}
/// Records the current parent (if any) as the parent of `child_id`.
fn record_superlifetime(visitor: &mut RegionResolutionVisitor,
cx: Context,
child_id: ast::NodeId,
_sp: Span) {
for &parent_id in cx.parent.iter() {
visitor.region_maps.record_encl_scope(child_id, parent_id);
}
}
/// Records the lifetime of a local variable as `cx.var_parent`
fn record_var_lifetime(visitor: &mut RegionResolutionVisitor,
cx: Context,
var_id: ast::NodeId,
_sp: Span) {
match cx.var_parent {
Some(parent_id) => {
visitor.region_maps.record_var_scope(var_id, parent_id);
}
None => {
// this can happen in extern fn declarations like
//
// extern fn isalnum(c: c_int) -> c_int
}
}
}
fn resolve_block(visitor: &mut RegionResolutionVisitor,
blk: &ast::Block,
cx: Context) {
debug!("resolve_block(blk.id={})", blk.id);
// Record the parent of this block.
record_superlifetime(visitor, cx, blk.id, blk.span);
// We treat the tail expression in the block (if any) somewhat
// differently from the statements. The issue has to do with
// temporary lifetimes. If the user writes:
//
// {
// ... (&foo())...
// }
//
let subcx = Context {var_parent: Some(blk.id), parent: Some(blk.id)};
visit::walk_block(visitor, blk, subcx);
}
fn resolve_arm(visitor: &mut RegionResolutionVisitor,
arm: &ast::Arm,
cx: Context) {
visitor.region_maps.mark_as_terminating_scope(arm.body.id);
match arm.guard {
Some(expr) => {
visitor.region_maps.mark_as_terminating_scope(expr.id);
}
None => { }
}
visit::walk_arm(visitor, arm, cx);
}
fn resolve_pat(visitor: &mut RegionResolutionVisitor,
pat: &ast::Pat,
cx: Context) {
record_superlifetime(visitor, cx, pat.id, pat.span);
// If this is a binding (or maybe a binding, I'm too lazy to check
// the def map) then record the lifetime of that binding.
match pat.node {
ast::PatIdent(..) => {
record_var_lifetime(visitor, cx, pat.id, pat.span);
}
_ => { }
}
visit::walk_pat(visitor, pat, cx);
}
fn resolve_stmt(visitor: &mut RegionResolutionVisitor,
stmt: &ast::Stmt,
cx: Context) {
let stmt_id = stmt_id(stmt);
debug!("resolve_stmt(stmt.id={})", stmt_id);
visitor.region_maps.mark_as_terminating_scope(stmt_id);
record_superlifetime(visitor, cx, stmt_id, stmt.span);
let subcx = Context {parent: Some(stmt_id),..cx};
visit::walk_stmt(visitor, stmt, subcx);
}
fn resolve_expr(visitor: &mut RegionResolutionVisitor,
expr: &ast::Expr,
cx: Context) {
debug!("resolve_expr(expr.id={})", expr.id);
record_superlifetime(visitor, cx, expr.id, expr.span);
let mut new_cx = cx;
new_cx.parent = Some(expr.id);
match expr.node {
// Conditional or repeating scopes are always terminating
// scopes, meaning that temporaries cannot outlive them.
// This ensures fixed size stacks.
ast::ExprBinary(_, ast::BiAnd, _, r) |
ast::ExprBinary(_, ast::BiOr, _, r) => {
// For shortcircuiting operators, mark the RHS as a terminating
// scope since it only executes conditionally.
visitor.region_maps.mark_as_terminating_scope(r.id);
}
ast::ExprIf(_, then, Some(otherwise)) => {
visitor.region_maps.mark_as_terminating_scope(then.id);
visitor.region_maps.mark_as_terminating_scope(otherwise.id);
}
ast::ExprIf(_, then, None) => {
visitor.region_maps.mark_as_terminating_scope(then.id);
}
ast::ExprLoop(body, _) |
ast::ExprWhile(_, body) => {
visitor.region_maps.mark_as_terminating_scope(body.id);
}
ast::ExprMatch(..) => {
new_cx.var_parent = Some(expr.id);
}
ast::ExprAssignOp(..) | ast::ExprIndex(..) |
ast::ExprUnary(..) | ast::ExprCall(..) | ast::ExprMethodCall(..) => {
// FIXME(#6268) Nested method calls
//
// The lifetimes for a call or method call look as follows:
//
// call.id
// - arg0.id
// -...
// - argN.id
// - call.callee_id
//
// The idea is that call.callee_id represents *the time when
// the invoked function is actually running* and call.id
// represents *the time to prepare the arguments and make the
// call*. See the section "Borrows in Calls" borrowck/doc.rs
// for an extended explanantion of why this distinction is
// important.
//
// record_superlifetime(new_cx, expr.callee_id);
}
_ => {}
};
visit::walk_expr(visitor, expr, new_cx);
}
fn resolve_local(visitor: &mut RegionResolutionVisitor,
local: &ast::Local,
cx: Context) {
debug!("resolve_local(local.id={},local.init={})",
local.id,local.init.is_some());
let blk_id = match cx.var_parent {
Some(id) => id,
None => {
visitor.sess.span_bug(
local.span,
"Local without enclosing block");
}
};
// For convenience in trans, associate with the local-id the var
// scope that will be used for any bindings declared in this
// pattern.
visitor.region_maps.record_var_scope(local.id, blk_id);
// As an exception to the normal rules governing temporary
// lifetimes, initializers in a let have a temporary lifetime
// of the enclosing block. This means that e.g. a program
// like the following is legal:
//
// let ref x = HashMap::new();
//
// Because the hash map will be freed in the enclosing block.
//
// We express the rules more formally based on 3 grammars (defined
// fully in the helpers below that implement them):
//
// 1. `E&`, which matches expressions like `&<rvalue>` that
// own a pointer into the stack.
//
// 2. `P&`, which matches patterns like `ref x` or `(ref x, ref
// y)` that produce ref bindings into the value they are
// matched against or something (at least partially) owned by
// the value they are matched against. (By partially owned,
// I mean that creating a binding into a ref-counted or managed value
// would still count.)
//
// 3. `ET`, which matches both rvalues like `foo()` as well as lvalues
// based on rvalues like `foo().x[2].y`.
//
// A subexpression `<rvalue>` that appears in a let initializer
// `let pat [: ty] = expr` has an extended temporary lifetime if
// any of the following conditions are met:
//
// A. `pat` matches `P&` and `expr` matches `ET`
// (covers cases where `pat` creates ref bindings into an rvalue
// produced by `expr`)
// B. `ty` is a borrowed pointer and `expr` matches `ET`
// (covers cases where coercion creates a borrow)
// C. `expr` matches `E&`
// (covers cases `expr` borrows an rvalue that is then assigned
// to memory (at least partially) owned by the binding)
//
// Here are some examples hopefully giving an intution where each
// rule comes into play and why:
//
// Rule A. `let (ref x, ref y) = (foo().x, 44)`. The rvalue `(22, 44)`
// would have an extended lifetime, but not `foo()`.
//
// Rule B. `let x: &[...] = [foo().x]`. The rvalue `[foo().x]`
// would have an extended lifetime, but not `foo()`.
//
// Rule C. `let x = &foo().x`. The rvalue ``foo()` would have extended
// lifetime.
//
// In some cases, multiple rules may apply (though not to the same
// rvalue). For example:
//
// let ref x = [&a(), &b()];
//
// Here, the expression `[...]` has an extended lifetime due to rule
// A, but the inner rvalues `a()` and `b()` have an extended lifetime
// due to rule C.
//
// FIXME(#6308) -- Note that `[]` patterns work more smoothly post-DST.
match local.init {
Some(expr) => {
record_rvalue_scope_if_borrow_expr(visitor, expr, blk_id);
if is_binding_pat(local.pat) || is_borrowed_ty(local.ty) {
record_rvalue_scope(visitor, expr, blk_id);
}
}
None => { }
}
visit::walk_local(visitor, local, cx);
fn is_binding_pat(pat: &ast::Pat) -> bool {
/*!
* True if `pat` match the `P&` nonterminal:
*
* P& = ref X
* | StructName {..., P&,... }
* | VariantName(..., P&,...)
* | [..., P&,... ]
* | (..., P&,... )
* | ~P&
* | box P&
*/
match pat.node {
ast::PatIdent(ast::BindByRef(_), _, _) => true,
ast::PatStruct(_, ref field_pats, _) => {
field_pats.iter().any(|fp| is_binding_pat(fp.pat))
}
ast::PatVec(ref pats1, ref pats2, ref pats3) => {
pats1.iter().any(|&p| is_binding_pat(p)) ||
pats2.iter().any(|&p| is_binding_pat(p)) ||
pats3.iter().any(|&p| is_binding_pat(p))
}
ast::PatEnum(_, Some(ref subpats)) |
ast::PatTup(ref subpats) => {
subpats.iter().any(|&p| is_binding_pat(p))
}
ast::PatUniq(subpat) => {
is_binding_pat(subpat)
}
_ => false,
}
}
fn is_borrowed_ty(ty: &ast::Ty) -> bool {
/*!
* True if `ty` is a borrowed pointer type
* like `&int` or `&[...]`.
*/
match ty.node {
ast::TyRptr(..) => true,
_ => false
}
}
fn record_rvalue_scope_if_borrow_expr(visitor: &mut RegionResolutionVisitor,
expr: &ast::Expr,
blk_id: ast::NodeId) {
/*!
* If `expr` matches the `E&` grammar, then records an extended
* rvalue scope as appropriate:
*
* E& = & ET
* | StructName {..., f: E&,... }
* | [..., E&,... ]
* | (..., E&,... )
* | {...; E&}
* | ~E&
* | E& as...
* | ( E& )
*/
match expr.node {
ast::ExprAddrOf(_, subexpr) => {
record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
record_rvalue_scope(visitor, subexpr, blk_id);
}
ast::ExprStruct(_, ref fields, _) => {
for field in fields.iter() {
record_rvalue_scope_if_borrow_expr(
visitor, field.expr, blk_id);
}
}
ast::ExprVstore(subexpr, _) => {
visitor.region_maps.record_rvalue_scope(subexpr.id, blk_id);
record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
}
ast::ExprVec(ref subexprs, _) |
ast::ExprTup(ref subexprs) => {
for &subexpr in subexprs.iter() {
record_rvalue_scope_if_borrow_expr(
visitor, subexpr, blk_id);
}
}
ast::ExprUnary(_, ast::UnUniq, subexpr) => {
record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
}
ast::ExprCast(subexpr, _) |
ast::ExprParen(subexpr) => {
record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id)
}
ast::ExprBlock(ref block) => {
match block.expr {
Some(subexpr) => {
record_rvalue_scope_if_borrow_expr(
visitor, subexpr, blk_id);
}
None => { }
}
}
_ => {
}
}
}
fn record_rvalue_scope<'a>(visitor: &mut RegionResolutionVisitor,
expr: &'a ast::Expr,
blk_id: ast::NodeId) {
/*!
* Applied to an expression `expr` if `expr` -- or something
* owned or partially owned by `expr` -- is going to be
* indirectly referenced by a variable in a let statement. In
* that case, the "temporary lifetime" or `expr` is extended
* to be the block enclosing the `let` statement.
*
* More formally, if `expr` matches the grammar `ET`, record
* the rvalue scope of the matching `<rvalue>` as `blk_id`:
*
* ET = *ET
* | ET[...]
* | ET.f
* | (ET)
* | <rvalue>
*
* Note: ET is intended to match "rvalues or
* lvalues based on rvalues".
*/
let mut expr = expr;
loop {
// Note: give all the expressions matching `ET` with the
// extended temporary lifetime, not just the innermost rvalue,
// because in trans if we must compile e.g. `*rvalue()`
// into a temporary, we request the temporary scope of the
// outer expression.
visitor.region_maps.record_rvalue_scope(expr.id, blk_id);
match expr.node {
ast::ExprAddrOf(_, ref subexpr) |
ast::ExprUnary(_, ast::UnDeref, ref subexpr) |
ast::ExprField(ref subexpr, _, _) |
ast::ExprIndex(_, ref subexpr, _) |
ast::ExprParen(ref subexpr) => {
let subexpr: &'a @Expr = subexpr; // FIXME(#11586)
expr = &**subexpr;
}
_ => {
return;
}
}
}
}
}
fn resolve_item(visitor: &mut RegionResolutionVisitor,
item: &ast::Item,
cx: Context) {
// Items create a new outer block scope as far as we're concerned.
let new_cx = Context {var_parent: None, parent: None,..cx};
visit::walk_item(visitor, item, new_cx);
}
fn resolve_fn(visitor: &mut RegionResolutionVisitor,
fk: &FnKind,
decl: &ast::FnDecl,
body: &ast::Block,
sp: Span,
id: ast::NodeId,
cx: Context) {
debug!("region::resolve_fn(id={}, \
span={:?}, \
body.id={}, \
cx.parent={})",
id,
visitor.sess.codemap.span_to_str(sp),
body.id,
cx.parent);
visitor.region_maps.mark_as_terminating_scope(body.id);
// The arguments and `self` are parented to the body of the fn.
let decl_cx = Context {parent: Some(body.id),
var_parent: Some(body.id)};
match *fk {
visit::FkMethod(_, _, method) => {
visitor.region_maps.record_var_scope(method.self_id, body.id);
}
_ => {}
}
visit::walk_fn_decl(visitor, decl, decl_cx);
// The body of the fn itself is either a root scope (top-level fn)
// or it continues with the inherited scope (closures).
let body_cx = match *fk {
visit::FkItemFn(..) | visit::FkMethod(..) => {
Context {parent: None, var_parent: None,..cx}
}
visit::FkFnBlock(..) => cx
};
visitor.visit_block(body, body_cx);
}
impl<'a> Visitor<Context> for RegionResolutionVisitor<'a> {
fn visit_block(&mut self, b: &Block, cx: Context) {
resolve_block(self, b, cx);
}
fn visit_item(&mut self, i: &Item, cx: Context) {
resolve_item(self, i, cx);
}
fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl,
b: &Block, s: Span, n: NodeId, cx: Context) {
resolve_fn(self, fk, fd, b, s, n, cx);
}
fn visit_arm(&mut self, a: &Arm, cx: Context) | {
resolve_arm(self, a, cx);
} | identifier_body |
|
region.rs | scope_a: ast::NodeId,
scope_b: ast::NodeId)
-> Option<ast::NodeId> {
/*!
* Finds the nearest common ancestor (if any) of two scopes. That
* is, finds the smallest scope which is greater than or equal to
* both `scope_a` and `scope_b`.
*/
if scope_a == scope_b { return Some(scope_a); }
let a_ancestors = ancestors_of(self, scope_a);
let b_ancestors = ancestors_of(self, scope_b);
let mut a_index = a_ancestors.len() - 1u;
let mut b_index = b_ancestors.len() - 1u;
// Here, ~[ab]_ancestors is a vector going from narrow to broad.
// The end of each vector will be the item where the scope is
// defined; if there are any common ancestors, then the tails of
// the vector will be the same. So basically we want to walk
// backwards from the tail of each vector and find the first point
// where they diverge. If one vector is a suffix of the other,
// then the corresponding scope is a superscope of the other.
if a_ancestors[a_index]!= b_ancestors[b_index] {
return None;
}
loop {
// Loop invariant: a_ancestors[a_index] == b_ancestors[b_index]
// for all indices between a_index and the end of the array
if a_index == 0u { return Some(scope_a); }
if b_index == 0u { return Some(scope_b); }
a_index -= 1u;
b_index -= 1u;
if a_ancestors[a_index]!= b_ancestors[b_index] {
return Some(a_ancestors[a_index + 1u]);
}
}
fn ancestors_of(this: &RegionMaps, scope: ast::NodeId)
-> ~[ast::NodeId]
{
// debug!("ancestors_of(scope={})", scope);
let mut result = ~[scope];
let mut scope = scope;
loop {
let scope_map = this.scope_map.borrow();
match scope_map.get().find(&scope) {
None => return result,
Some(&superscope) => {
result.push(superscope);
scope = superscope;
}
}
// debug!("ancestors_of_loop(scope={})", scope);
}
}
}
}
/// Records the current parent (if any) as the parent of `child_id`.
fn record_superlifetime(visitor: &mut RegionResolutionVisitor,
cx: Context,
child_id: ast::NodeId,
_sp: Span) {
for &parent_id in cx.parent.iter() {
visitor.region_maps.record_encl_scope(child_id, parent_id);
}
}
/// Records the lifetime of a local variable as `cx.var_parent`
fn record_var_lifetime(visitor: &mut RegionResolutionVisitor,
cx: Context,
var_id: ast::NodeId,
_sp: Span) {
match cx.var_parent {
Some(parent_id) => {
visitor.region_maps.record_var_scope(var_id, parent_id);
}
None => {
// this can happen in extern fn declarations like
//
// extern fn isalnum(c: c_int) -> c_int
}
}
}
fn resolve_block(visitor: &mut RegionResolutionVisitor,
blk: &ast::Block,
cx: Context) {
debug!("resolve_block(blk.id={})", blk.id);
// Record the parent of this block.
record_superlifetime(visitor, cx, blk.id, blk.span);
// We treat the tail expression in the block (if any) somewhat
// differently from the statements. The issue has to do with
// temporary lifetimes. If the user writes:
//
// {
// ... (&foo())...
// }
//
let subcx = Context {var_parent: Some(blk.id), parent: Some(blk.id)};
visit::walk_block(visitor, blk, subcx);
}
fn resolve_arm(visitor: &mut RegionResolutionVisitor,
arm: &ast::Arm,
cx: Context) {
visitor.region_maps.mark_as_terminating_scope(arm.body.id);
match arm.guard {
Some(expr) => {
visitor.region_maps.mark_as_terminating_scope(expr.id);
}
None => { }
}
visit::walk_arm(visitor, arm, cx);
}
fn resolve_pat(visitor: &mut RegionResolutionVisitor,
pat: &ast::Pat,
cx: Context) {
record_superlifetime(visitor, cx, pat.id, pat.span);
// If this is a binding (or maybe a binding, I'm too lazy to check
// the def map) then record the lifetime of that binding.
match pat.node {
ast::PatIdent(..) => {
record_var_lifetime(visitor, cx, pat.id, pat.span);
}
_ => { }
}
visit::walk_pat(visitor, pat, cx);
}
fn resolve_stmt(visitor: &mut RegionResolutionVisitor,
stmt: &ast::Stmt,
cx: Context) {
let stmt_id = stmt_id(stmt);
debug!("resolve_stmt(stmt.id={})", stmt_id);
visitor.region_maps.mark_as_terminating_scope(stmt_id);
record_superlifetime(visitor, cx, stmt_id, stmt.span);
let subcx = Context {parent: Some(stmt_id),..cx};
visit::walk_stmt(visitor, stmt, subcx);
}
fn resolve_expr(visitor: &mut RegionResolutionVisitor,
expr: &ast::Expr,
cx: Context) {
debug!("resolve_expr(expr.id={})", expr.id);
record_superlifetime(visitor, cx, expr.id, expr.span);
let mut new_cx = cx;
new_cx.parent = Some(expr.id);
match expr.node {
// Conditional or repeating scopes are always terminating
// scopes, meaning that temporaries cannot outlive them.
// This ensures fixed size stacks.
ast::ExprBinary(_, ast::BiAnd, _, r) |
ast::ExprBinary(_, ast::BiOr, _, r) => {
// For shortcircuiting operators, mark the RHS as a terminating
// scope since it only executes conditionally.
visitor.region_maps.mark_as_terminating_scope(r.id);
}
ast::ExprIf(_, then, Some(otherwise)) => {
visitor.region_maps.mark_as_terminating_scope(then.id);
visitor.region_maps.mark_as_terminating_scope(otherwise.id);
}
ast::ExprIf(_, then, None) => {
visitor.region_maps.mark_as_terminating_scope(then.id);
}
ast::ExprLoop(body, _) |
ast::ExprWhile(_, body) => {
visitor.region_maps.mark_as_terminating_scope(body.id);
}
ast::ExprMatch(..) => {
new_cx.var_parent = Some(expr.id);
}
ast::ExprAssignOp(..) | ast::ExprIndex(..) |
ast::ExprUnary(..) | ast::ExprCall(..) | ast::ExprMethodCall(..) => {
// FIXME(#6268) Nested method calls
//
// The lifetimes for a call or method call look as follows:
//
// call.id
// - arg0.id
// -...
// - argN.id
// - call.callee_id
//
// The idea is that call.callee_id represents *the time when
// the invoked function is actually running* and call.id
// represents *the time to prepare the arguments and make the
// call*. See the section "Borrows in Calls" borrowck/doc.rs
// for an extended explanantion of why this distinction is
// important.
//
// record_superlifetime(new_cx, expr.callee_id);
}
_ => {}
};
visit::walk_expr(visitor, expr, new_cx);
}
fn resolve_local(visitor: &mut RegionResolutionVisitor,
local: &ast::Local,
cx: Context) {
debug!("resolve_local(local.id={},local.init={})",
local.id,local.init.is_some());
let blk_id = match cx.var_parent {
Some(id) => id,
None => {
visitor.sess.span_bug(
local.span,
"Local without enclosing block");
}
};
// For convenience in trans, associate with the local-id the var
// scope that will be used for any bindings declared in this
// pattern.
visitor.region_maps.record_var_scope(local.id, blk_id);
// As an exception to the normal rules governing temporary
// lifetimes, initializers in a let have a temporary lifetime
// of the enclosing block. This means that e.g. a program
// like the following is legal:
//
// let ref x = HashMap::new();
//
// Because the hash map will be freed in the enclosing block.
//
// We express the rules more formally based on 3 grammars (defined
// fully in the helpers below that implement them):
//
// 1. `E&`, which matches expressions like `&<rvalue>` that
// own a pointer into the stack.
//
// 2. `P&`, which matches patterns like `ref x` or `(ref x, ref
// y)` that produce ref bindings into the value they are
// matched against or something (at least partially) owned by
// the value they are matched against. (By partially owned,
// I mean that creating a binding into a ref-counted or managed value
// would still count.)
//
// 3. `ET`, which matches both rvalues like `foo()` as well as lvalues
// based on rvalues like `foo().x[2].y`.
//
// A subexpression `<rvalue>` that appears in a let initializer
// `let pat [: ty] = expr` has an extended temporary lifetime if
// any of the following conditions are met:
//
// A. `pat` matches `P&` and `expr` matches `ET`
// (covers cases where `pat` creates ref bindings into an rvalue
// produced by `expr`)
// B. `ty` is a borrowed pointer and `expr` matches `ET`
// (covers cases where coercion creates a borrow)
// C. `expr` matches `E&`
// (covers cases `expr` borrows an rvalue that is then assigned
// to memory (at least partially) owned by the binding)
//
// Here are some examples hopefully giving an intution where each
// rule comes into play and why:
//
// Rule A. `let (ref x, ref y) = (foo().x, 44)`. The rvalue `(22, 44)`
// would have an extended lifetime, but not `foo()`.
//
// Rule B. `let x: &[...] = [foo().x]`. The rvalue `[foo().x]`
// would have an extended lifetime, but not `foo()`.
//
// Rule C. `let x = &foo().x`. The rvalue ``foo()` would have extended
// lifetime.
//
// In some cases, multiple rules may apply (though not to the same
// rvalue). For example:
//
// let ref x = [&a(), &b()];
//
// Here, the expression `[...]` has an extended lifetime due to rule
// A, but the inner rvalues `a()` and `b()` have an extended lifetime
// due to rule C.
//
// FIXME(#6308) -- Note that `[]` patterns work more smoothly post-DST.
match local.init {
Some(expr) => {
record_rvalue_scope_if_borrow_expr(visitor, expr, blk_id);
if is_binding_pat(local.pat) || is_borrowed_ty(local.ty) {
record_rvalue_scope(visitor, expr, blk_id);
}
}
None => { }
}
visit::walk_local(visitor, local, cx);
fn is_binding_pat(pat: &ast::Pat) -> bool {
/*!
* True if `pat` match the `P&` nonterminal:
*
* P& = ref X
* | StructName {..., P&,... }
* | VariantName(..., P&,...)
* | [..., P&,... ]
* | (..., P&,... )
* | ~P&
* | box P&
*/
match pat.node {
ast::PatIdent(ast::BindByRef(_), _, _) => true,
ast::PatStruct(_, ref field_pats, _) => {
field_pats.iter().any(|fp| is_binding_pat(fp.pat))
}
ast::PatVec(ref pats1, ref pats2, ref pats3) => {
pats1.iter().any(|&p| is_binding_pat(p)) ||
pats2.iter().any(|&p| is_binding_pat(p)) ||
pats3.iter().any(|&p| is_binding_pat(p))
}
ast::PatEnum(_, Some(ref subpats)) |
ast::PatTup(ref subpats) => {
subpats.iter().any(|&p| is_binding_pat(p))
}
ast::PatUniq(subpat) => {
is_binding_pat(subpat)
}
_ => false,
}
}
fn is_borrowed_ty(ty: &ast::Ty) -> bool {
/*!
* True if `ty` is a borrowed pointer type
* like `&int` or `&[...]`.
*/
match ty.node {
ast::TyRptr(..) => true,
_ => false
}
}
fn record_rvalue_scope_if_borrow_expr(visitor: &mut RegionResolutionVisitor,
expr: &ast::Expr,
blk_id: ast::NodeId) {
/*!
* If `expr` matches the `E&` grammar, then records an extended
* rvalue scope as appropriate:
*
* E& = & ET
* | StructName {..., f: E&,... }
* | [..., E&,... ]
* | (..., E&,... )
* | {...; E&}
* | ~E&
* | E& as...
* | ( E& )
*/
match expr.node {
ast::ExprAddrOf(_, subexpr) => {
record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
record_rvalue_scope(visitor, subexpr, blk_id);
}
ast::ExprStruct(_, ref fields, _) => {
for field in fields.iter() {
record_rvalue_scope_if_borrow_expr(
visitor, field.expr, blk_id);
}
}
ast::ExprVstore(subexpr, _) => {
visitor.region_maps.record_rvalue_scope(subexpr.id, blk_id);
record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
}
ast::ExprVec(ref subexprs, _) |
ast::ExprTup(ref subexprs) => {
for &subexpr in subexprs.iter() {
record_rvalue_scope_if_borrow_expr(
visitor, subexpr, blk_id);
}
}
ast::ExprUnary(_, ast::UnUniq, subexpr) => {
record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
}
ast::ExprCast(subexpr, _) |
ast::ExprParen(subexpr) => {
record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id)
}
ast::ExprBlock(ref block) => {
match block.expr {
Some(subexpr) => {
record_rvalue_scope_if_borrow_expr(
visitor, subexpr, blk_id);
}
None => { }
}
}
_ => {
}
}
}
fn record_rvalue_scope<'a>(visitor: &mut RegionResolutionVisitor,
expr: &'a ast::Expr,
blk_id: ast::NodeId) {
/*!
* Applied to an expression `expr` if `expr` -- or something
* owned or partially owned by `expr` -- is going to be
* indirectly referenced by a variable in a let statement. In
* that case, the "temporary lifetime" or `expr` is extended
* to be the block enclosing the `let` statement.
*
* More formally, if `expr` matches the grammar `ET`, record
* the rvalue scope of the matching `<rvalue>` as `blk_id`:
*
* ET = *ET
* | ET[...]
* | ET.f
* | (ET)
* | <rvalue>
*
* Note: ET is intended to match "rvalues or
* lvalues based on rvalues".
*/
let mut expr = expr;
loop {
// Note: give all the expressions matching `ET` with the
// extended temporary lifetime, not just the innermost rvalue,
// because in trans if we must compile e.g. `*rvalue()`
// into a temporary, we request the temporary scope of the
// outer expression.
visitor.region_maps.record_rvalue_scope(expr.id, blk_id);
match expr.node {
ast::ExprAddrOf(_, ref subexpr) |
ast::ExprUnary(_, ast::UnDeref, ref subexpr) |
ast::ExprField(ref subexpr, _, _) |
ast::ExprIndex(_, ref subexpr, _) |
ast::ExprParen(ref subexpr) => {
let subexpr: &'a @Expr = subexpr; // FIXME(#11586)
expr = &**subexpr;
}
_ => {
return;
}
}
}
}
}
fn resolve_item(visitor: &mut RegionResolutionVisitor,
item: &ast::Item,
cx: Context) {
// Items create a new outer block scope as far as we're concerned.
let new_cx = Context {var_parent: None, parent: None,..cx};
visit::walk_item(visitor, item, new_cx);
}
fn | resolve_fn | identifier_name |
|
main.rs | #![feature(plugin)]
#![plugin(rocket_codegen)]
#[macro_use]
extern crate lazy_static;
extern crate uuid;
extern crate rocket;
extern crate rocket_contrib;
use std::collections::HashMap;
use uuid::Uuid;
use rocket_contrib::UUID;
#[cfg(test)]
mod tests;
lazy_static! { | static ref PEOPLE: HashMap<Uuid, &'static str> = {
let mut m = HashMap::new();
let lacy_id = Uuid::parse_str("7f205202-7ba1-4c39-b2fc-3e630722bf9f").unwrap();
let bob_id = Uuid::parse_str("4da34121-bc7d-4fc1-aee6-bf8de0795333").unwrap();
let george_id = Uuid::parse_str("ad962969-4e3d-4de7-ac4a-2d86d6d10839").unwrap();
m.insert(lacy_id, "Lacy");
m.insert(bob_id, "Bob");
m.insert(george_id, "George");
m
};
}
#[get("/people/<id>")]
fn people(id: UUID) -> Result<String, String> {
// Because UUID implements the Deref trait, we use Deref coercion to
// convert rocket_contrib::UUID to uuid::Uuid.
Ok(PEOPLE.get(&id)
.map(|person| format!("We found: {}", person))
.ok_or(format!("Person not found for UUID: {}", id))?)
}
fn main() {
rocket::ignite()
.mount("/", routes![people])
.launch();
} | // A small people lookup table for the sake of this example. In a real
// application this could be a database lookup. Notice that we use the
// uuid::Uuid type here and not the rocket_contrib::UUID type. | random_line_split |
main.rs | #![feature(plugin)]
#![plugin(rocket_codegen)]
#[macro_use]
extern crate lazy_static;
extern crate uuid;
extern crate rocket;
extern crate rocket_contrib;
use std::collections::HashMap;
use uuid::Uuid;
use rocket_contrib::UUID;
#[cfg(test)]
mod tests;
lazy_static! {
// A small people lookup table for the sake of this example. In a real
// application this could be a database lookup. Notice that we use the
// uuid::Uuid type here and not the rocket_contrib::UUID type.
static ref PEOPLE: HashMap<Uuid, &'static str> = {
let mut m = HashMap::new();
let lacy_id = Uuid::parse_str("7f205202-7ba1-4c39-b2fc-3e630722bf9f").unwrap();
let bob_id = Uuid::parse_str("4da34121-bc7d-4fc1-aee6-bf8de0795333").unwrap();
let george_id = Uuid::parse_str("ad962969-4e3d-4de7-ac4a-2d86d6d10839").unwrap();
m.insert(lacy_id, "Lacy");
m.insert(bob_id, "Bob");
m.insert(george_id, "George");
m
};
}
#[get("/people/<id>")]
fn people(id: UUID) -> Result<String, String> {
// Because UUID implements the Deref trait, we use Deref coercion to
// convert rocket_contrib::UUID to uuid::Uuid.
Ok(PEOPLE.get(&id)
.map(|person| format!("We found: {}", person))
.ok_or(format!("Person not found for UUID: {}", id))?)
}
fn | () {
rocket::ignite()
.mount("/", routes![people])
.launch();
}
| main | identifier_name |
main.rs | #![feature(plugin)]
#![plugin(rocket_codegen)]
#[macro_use]
extern crate lazy_static;
extern crate uuid;
extern crate rocket;
extern crate rocket_contrib;
use std::collections::HashMap;
use uuid::Uuid;
use rocket_contrib::UUID;
#[cfg(test)]
mod tests;
lazy_static! {
// A small people lookup table for the sake of this example. In a real
// application this could be a database lookup. Notice that we use the
// uuid::Uuid type here and not the rocket_contrib::UUID type.
static ref PEOPLE: HashMap<Uuid, &'static str> = {
let mut m = HashMap::new();
let lacy_id = Uuid::parse_str("7f205202-7ba1-4c39-b2fc-3e630722bf9f").unwrap();
let bob_id = Uuid::parse_str("4da34121-bc7d-4fc1-aee6-bf8de0795333").unwrap();
let george_id = Uuid::parse_str("ad962969-4e3d-4de7-ac4a-2d86d6d10839").unwrap();
m.insert(lacy_id, "Lacy");
m.insert(bob_id, "Bob");
m.insert(george_id, "George");
m
};
}
#[get("/people/<id>")]
fn people(id: UUID) -> Result<String, String> {
// Because UUID implements the Deref trait, we use Deref coercion to
// convert rocket_contrib::UUID to uuid::Uuid.
Ok(PEOPLE.get(&id)
.map(|person| format!("We found: {}", person))
.ok_or(format!("Person not found for UUID: {}", id))?)
}
fn main() | {
rocket::ignite()
.mount("/", routes![people])
.launch();
} | identifier_body |
|
account.rs | extern crate meg;
use std::env;
use std::clone::Clone;
use turbo::util::{CliResult, Config};
use self::meg::ops::meg_account_create as Act;
use self::meg::ops::meg_account_show as Show;
#[derive(RustcDecodable, Clone)]
pub struct Options {
pub arg_email: String,
pub flag_show: bool,
}
pub const USAGE: &'static str = "
Usage:
meg account [options] [<email>]
Options:
-h, --help Print this message
--create Provide an email to create a new account
--show Provide an email to show the account
-v, --verbose Use verbose output
";
pub fn execute(options: Options, _: &Config) -> CliResult<Option<()>> {
let vec = env::args().collect::<Vec<_>>();
for x in vec.iter() {
if x == "--create" {
let mut acct: Act::Createoptions = Act::CreateAcc::new();
acct.email = options.arg_email.clone();
let x = acct.create();
} else if x == "--show" {
let mut acct: Show::Showoptions = Show::ShowAcc::new(); //Not reqd - to expand later if
acct.email = options.arg_email.clone(); //multiple accounts needs to be showed
let x = acct.show();
}
}
return Ok(None) | } | random_line_split |
|
account.rs | extern crate meg;
use std::env;
use std::clone::Clone;
use turbo::util::{CliResult, Config};
use self::meg::ops::meg_account_create as Act;
use self::meg::ops::meg_account_show as Show;
#[derive(RustcDecodable, Clone)]
pub struct | {
pub arg_email: String,
pub flag_show: bool,
}
pub const USAGE: &'static str = "
Usage:
meg account [options] [<email>]
Options:
-h, --help Print this message
--create Provide an email to create a new account
--show Provide an email to show the account
-v, --verbose Use verbose output
";
pub fn execute(options: Options, _: &Config) -> CliResult<Option<()>> {
let vec = env::args().collect::<Vec<_>>();
for x in vec.iter() {
if x == "--create" {
let mut acct: Act::Createoptions = Act::CreateAcc::new();
acct.email = options.arg_email.clone();
let x = acct.create();
} else if x == "--show" {
let mut acct: Show::Showoptions = Show::ShowAcc::new(); //Not reqd - to expand later if
acct.email = options.arg_email.clone(); //multiple accounts needs to be showed
let x = acct.show();
}
}
return Ok(None)
}
| Options | identifier_name |
account.rs | extern crate meg;
use std::env;
use std::clone::Clone;
use turbo::util::{CliResult, Config};
use self::meg::ops::meg_account_create as Act;
use self::meg::ops::meg_account_show as Show;
#[derive(RustcDecodable, Clone)]
pub struct Options {
pub arg_email: String,
pub flag_show: bool,
}
pub const USAGE: &'static str = "
Usage:
meg account [options] [<email>]
Options:
-h, --help Print this message
--create Provide an email to create a new account
--show Provide an email to show the account
-v, --verbose Use verbose output
";
pub fn execute(options: Options, _: &Config) -> CliResult<Option<()>> {
let vec = env::args().collect::<Vec<_>>();
for x in vec.iter() {
if x == "--create" {
let mut acct: Act::Createoptions = Act::CreateAcc::new();
acct.email = options.arg_email.clone();
let x = acct.create();
} else if x == "--show" |
}
return Ok(None)
}
| {
let mut acct: Show::Showoptions = Show::ShowAcc::new(); //Not reqd - to expand later if
acct.email = options.arg_email.clone(); //multiple accounts needs to be showed
let x = acct.show();
} | conditional_block |
non_terminal.rs | // Copyright 2016 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use back::compiler::*;
use back::compiler::value::*;
use back::name_factory::*;
pub struct NonTerminalCompiler;
impl NonTerminalCompiler
{
pub fn recognizer(id: Ident) -> NonTerminalRecognizerCompiler {
NonTerminalRecognizerCompiler {
path: parse_quote!(#id)
}
}
pub fn parser(id: Ident, this_idx: usize) -> NonTerminalParserCompiler {
NonTerminalParserCompiler {
path: parse_quote!(#id),
this_idx
}
}
pub fn external_recognizer(path: syn::Path) -> NonTerminalRecognizerCompiler {
NonTerminalRecognizerCompiler { path }
}
pub fn external_parser(path: syn::Path, this_idx: usize) -> NonTerminalParserCompiler {
NonTerminalParserCompiler { path, this_idx}
}
}
pub struct NonTerminalRecognizerCompiler
{
path: syn::Path
}
impl CompileExpr for NonTerminalRecognizerCompiler
{
fn compile_expr<'a>(&self, _context: &mut Context<'a>,
continuation: Continuation) -> syn::Expr
{
let recognizer_fn = recognizer_name(self.path.clone());
continuation
.map_success(|success, failure| parse_quote!(
{ | #success
}
else {
#failure
}
}
))
.unwrap_success()
}
}
pub struct NonTerminalParserCompiler
{
path: syn::Path,
this_idx: usize
}
impl CompileExpr for NonTerminalParserCompiler
{
fn compile_expr<'a>(&self, context: &mut Context<'a>,
continuation: Continuation) -> syn::Expr
{
let parser_fn = parser_name(self.path.clone());
let cardinality = context.expr_cardinality(self.this_idx);
let mut vars_names: Vec<_> = (0..cardinality)
.map(|_| context.next_free_var())
.collect();
// Due to the reverse compilation scheme, variables are given as `a3, a2,...`, however we need to match them in the good order.
// Note that we cannot use `rev()` since we depend on a global state.
vars_names.reverse();
let vars = tuple_pattern(vars_names);
continuation
.map_success(|success, failure| parse_quote!(
{
let stateful = #parser_fn(state);
if stateful.is_successful() {
let (stateless, #vars) = stateful.extract_data();
state = stateless;
#success
}
else {
state = stateful.failure();
#failure
}
}
))
.unwrap_success()
}
} | state = #recognizer_fn(state);
if state.is_successful() {
state.discard_data(); | random_line_split |
non_terminal.rs | // Copyright 2016 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use back::compiler::*;
use back::compiler::value::*;
use back::name_factory::*;
pub struct NonTerminalCompiler;
impl NonTerminalCompiler
{
pub fn recognizer(id: Ident) -> NonTerminalRecognizerCompiler {
NonTerminalRecognizerCompiler {
path: parse_quote!(#id)
}
}
pub fn parser(id: Ident, this_idx: usize) -> NonTerminalParserCompiler {
NonTerminalParserCompiler {
path: parse_quote!(#id),
this_idx
}
}
pub fn external_recognizer(path: syn::Path) -> NonTerminalRecognizerCompiler {
NonTerminalRecognizerCompiler { path }
}
pub fn external_parser(path: syn::Path, this_idx: usize) -> NonTerminalParserCompiler {
NonTerminalParserCompiler { path, this_idx}
}
}
pub struct NonTerminalRecognizerCompiler
{
path: syn::Path
}
impl CompileExpr for NonTerminalRecognizerCompiler
{
fn compile_expr<'a>(&self, _context: &mut Context<'a>,
continuation: Continuation) -> syn::Expr
{
let recognizer_fn = recognizer_name(self.path.clone());
continuation
.map_success(|success, failure| parse_quote!(
{
state = #recognizer_fn(state);
if state.is_successful() {
state.discard_data();
#success
}
else {
#failure
}
}
))
.unwrap_success()
}
}
pub struct NonTerminalParserCompiler
{
path: syn::Path,
this_idx: usize
}
impl CompileExpr for NonTerminalParserCompiler
{
fn compile_expr<'a>(&self, context: &mut Context<'a>,
continuation: Continuation) -> syn::Expr
| state = stateful.failure();
#failure
}
}
))
.unwrap_success()
}
}
| {
let parser_fn = parser_name(self.path.clone());
let cardinality = context.expr_cardinality(self.this_idx);
let mut vars_names: Vec<_> = (0..cardinality)
.map(|_| context.next_free_var())
.collect();
// Due to the reverse compilation scheme, variables are given as `a3, a2,...`, however we need to match them in the good order.
// Note that we cannot use `rev()` since we depend on a global state.
vars_names.reverse();
let vars = tuple_pattern(vars_names);
continuation
.map_success(|success, failure| parse_quote!(
{
let stateful = #parser_fn(state);
if stateful.is_successful() {
let (stateless, #vars) = stateful.extract_data();
state = stateless;
#success
}
else { | identifier_body |
non_terminal.rs | // Copyright 2016 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use back::compiler::*;
use back::compiler::value::*;
use back::name_factory::*;
pub struct NonTerminalCompiler;
impl NonTerminalCompiler
{
pub fn recognizer(id: Ident) -> NonTerminalRecognizerCompiler {
NonTerminalRecognizerCompiler {
path: parse_quote!(#id)
}
}
pub fn parser(id: Ident, this_idx: usize) -> NonTerminalParserCompiler {
NonTerminalParserCompiler {
path: parse_quote!(#id),
this_idx
}
}
pub fn external_recognizer(path: syn::Path) -> NonTerminalRecognizerCompiler {
NonTerminalRecognizerCompiler { path }
}
pub fn external_parser(path: syn::Path, this_idx: usize) -> NonTerminalParserCompiler {
NonTerminalParserCompiler { path, this_idx}
}
}
pub struct NonTerminalRecognizerCompiler
{
path: syn::Path
}
impl CompileExpr for NonTerminalRecognizerCompiler
{
fn | <'a>(&self, _context: &mut Context<'a>,
continuation: Continuation) -> syn::Expr
{
let recognizer_fn = recognizer_name(self.path.clone());
continuation
.map_success(|success, failure| parse_quote!(
{
state = #recognizer_fn(state);
if state.is_successful() {
state.discard_data();
#success
}
else {
#failure
}
}
))
.unwrap_success()
}
}
pub struct NonTerminalParserCompiler
{
path: syn::Path,
this_idx: usize
}
impl CompileExpr for NonTerminalParserCompiler
{
fn compile_expr<'a>(&self, context: &mut Context<'a>,
continuation: Continuation) -> syn::Expr
{
let parser_fn = parser_name(self.path.clone());
let cardinality = context.expr_cardinality(self.this_idx);
let mut vars_names: Vec<_> = (0..cardinality)
.map(|_| context.next_free_var())
.collect();
// Due to the reverse compilation scheme, variables are given as `a3, a2,...`, however we need to match them in the good order.
// Note that we cannot use `rev()` since we depend on a global state.
vars_names.reverse();
let vars = tuple_pattern(vars_names);
continuation
.map_success(|success, failure| parse_quote!(
{
let stateful = #parser_fn(state);
if stateful.is_successful() {
let (stateless, #vars) = stateful.extract_data();
state = stateless;
#success
}
else {
state = stateful.failure();
#failure
}
}
))
.unwrap_success()
}
}
| compile_expr | identifier_name |
issue-38147-1.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Pass<'a> {
s: &'a mut String
}
impl<'a> Pass<'a> {
fn f(&mut self) {
self.s.push('x');
}
} | struct Foo<'a> {
s: &'a mut String
}
impl<'a> Foo<'a> {
fn f(&self) {
self.s.push('x'); //~ ERROR cannot borrow data mutably
}
}
fn main() {} | random_line_split |
|
issue-38147-1.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Pass<'a> {
s: &'a mut String
}
impl<'a> Pass<'a> {
fn f(&mut self) {
self.s.push('x');
}
}
struct Foo<'a> {
s: &'a mut String
}
impl<'a> Foo<'a> {
fn f(&self) |
}
fn main() {}
| {
self.s.push('x'); //~ ERROR cannot borrow data mutably
} | identifier_body |
issue-38147-1.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Pass<'a> {
s: &'a mut String
}
impl<'a> Pass<'a> {
fn f(&mut self) {
self.s.push('x');
}
}
struct | <'a> {
s: &'a mut String
}
impl<'a> Foo<'a> {
fn f(&self) {
self.s.push('x'); //~ ERROR cannot borrow data mutably
}
}
fn main() {}
| Foo | identifier_name |
submessage.rs | use std::io::{ self, Write };
use byteorder::{ LittleEndian, WriteBytesExt};
pub struct | (pub SubmessageType, pub Vec<u8>);
bitflags! {
flags SubmessageType : u8 {
// const PAD = 0x01, /* Pad */
const LITTLEENDIAN = 0x01, /* Xavier's Endianness hack? */
const ACKNACK = 0x06, /* AckNack */
const HEARTBEAT = 0x07, /* Heartbeat */
const GAP = 0x08, /* Gap */
const INFO_TS = 0x09, /* InfoTimestamp */
const INFO_SRC = 0x0c, /* InfoSource */
const INFO_REPLY_IP4 = 0x0d, /* InfoReplyIp4 */
const INFO_DST = 0x0e, /* InfoDestination */
const INFO_REPLY = 0x0f, /* InfoReply */
const NACK_FRAG = 0x12, /* NackFrag */
const HEARTBEAT_FRAG = 0x13, /* HeartbeatFrag */
const DATA = 0x15, /* Data */
const DATA_FRAG = 0x16, /* DataFrag */
}
}
impl Submessage {
pub fn serialize<W: Write>(&self, mut w: &mut W) -> io::Result<()> {
let mut flags = self.0;
// flags.toggle(LITTLEENDIAN);
flags = flags | LITTLEENDIAN;
let flags_arr = [flags.bits];
try!(w.write_all(&flags_arr[..]));
try!(w.write_u16::<LittleEndian>(self.1.len() as u16));
w.write_all(&self.1[..])
}
} | Submessage | identifier_name |
submessage.rs | use std::io::{ self, Write };
use byteorder::{ LittleEndian, WriteBytesExt};
pub struct Submessage(pub SubmessageType, pub Vec<u8>);
bitflags! {
flags SubmessageType : u8 {
// const PAD = 0x01, /* Pad */ | const HEARTBEAT = 0x07, /* Heartbeat */
const GAP = 0x08, /* Gap */
const INFO_TS = 0x09, /* InfoTimestamp */
const INFO_SRC = 0x0c, /* InfoSource */
const INFO_REPLY_IP4 = 0x0d, /* InfoReplyIp4 */
const INFO_DST = 0x0e, /* InfoDestination */
const INFO_REPLY = 0x0f, /* InfoReply */
const NACK_FRAG = 0x12, /* NackFrag */
const HEARTBEAT_FRAG = 0x13, /* HeartbeatFrag */
const DATA = 0x15, /* Data */
const DATA_FRAG = 0x16, /* DataFrag */
}
}
impl Submessage {
pub fn serialize<W: Write>(&self, mut w: &mut W) -> io::Result<()> {
let mut flags = self.0;
// flags.toggle(LITTLEENDIAN);
flags = flags | LITTLEENDIAN;
let flags_arr = [flags.bits];
try!(w.write_all(&flags_arr[..]));
try!(w.write_u16::<LittleEndian>(self.1.len() as u16));
w.write_all(&self.1[..])
}
} | const LITTLEENDIAN = 0x01, /* Xavier's Endianness hack? */
const ACKNACK = 0x06, /* AckNack */ | random_line_split |
into_matcher.rs | use super::Matcher;
use regex::{Regex, Captures};
impl From<Regex> for Matcher {
fn from(regex: Regex) -> Matcher {
let path = regex.as_str().to_string();
Matcher::new(path, regex)
}
}
impl<'a> From<&'a str> for Matcher {
fn from(s: &'a str) -> Matcher {
From::from(s.to_string())
}
}
lazy_static! {
static ref REGEX_VAR_SEQ: Regex = Regex::new(r":([,a-zA-Z0-9_-]*)").unwrap();
}
pub static FORMAT_PARAM: &'static str = "format";
// FIXME: Once const fn lands this could be defined in terms of the above
static FORMAT_VAR: &'static str = ":format";
static VAR_SEQ: &'static str = "[.,a-zA-Z0-9_-]*";
static VAR_SEQ_WITH_SLASH: &'static str = "[.,/a-zA-Z0-9_-]*";
// matches request params (e.g.?foo=true&bar=false)
static REGEX_PARAM_SEQ: &'static str = "(\\?[.a-zA-Z0-9%_=&-]*)?";
impl From<String> for Matcher {
fn | (s: String) -> Matcher {
let with_format = if s.contains(FORMAT_VAR) {
s
} else {
format!("{}(\\.{})?", s, FORMAT_VAR)
};
// First mark all double wildcards for replacement. We can't directly
// replace them since the replacement does contain the * symbol as well,
// which would get overwritten with the next replace call
let with_placeholder = with_format.replace("**", "___DOUBLE_WILDCARD___");
// Then replace the regular wildcard symbols (*) with the appropriate regex
let star_replaced = with_placeholder.replace("*", VAR_SEQ);
// Now replace the previously marked double wild cards (**)
let wildcarded = star_replaced.replace("___DOUBLE_WILDCARD___", VAR_SEQ_WITH_SLASH);
// Add a named capture for each :(variable) symbol
let named_captures = REGEX_VAR_SEQ.replace_all(&wildcarded, |captures: &Captures| {
// There should only ever be one match (after subgroup 0)
let c = captures.iter().skip(1).next().unwrap();
format!("(?P<{}>[.,a-zA-Z0-9%_-]*)", c.unwrap())
});
let line_regex = format!("^{}{}$", named_captures, REGEX_PARAM_SEQ);
let regex = Regex::new(&line_regex).unwrap();
Matcher::new(with_format, regex)
}
}
| from | identifier_name |
into_matcher.rs | use super::Matcher;
use regex::{Regex, Captures};
impl From<Regex> for Matcher {
fn from(regex: Regex) -> Matcher {
let path = regex.as_str().to_string();
Matcher::new(path, regex)
}
}
impl<'a> From<&'a str> for Matcher {
fn from(s: &'a str) -> Matcher {
From::from(s.to_string())
}
}
lazy_static! {
static ref REGEX_VAR_SEQ: Regex = Regex::new(r":([,a-zA-Z0-9_-]*)").unwrap();
}
pub static FORMAT_PARAM: &'static str = "format";
// FIXME: Once const fn lands this could be defined in terms of the above
static FORMAT_VAR: &'static str = ":format";
static VAR_SEQ: &'static str = "[.,a-zA-Z0-9_-]*";
static VAR_SEQ_WITH_SLASH: &'static str = "[.,/a-zA-Z0-9_-]*";
// matches request params (e.g.?foo=true&bar=false)
static REGEX_PARAM_SEQ: &'static str = "(\\?[.a-zA-Z0-9%_=&-]*)?";
impl From<String> for Matcher {
fn from(s: String) -> Matcher {
let with_format = if s.contains(FORMAT_VAR) | else {
format!("{}(\\.{})?", s, FORMAT_VAR)
};
// First mark all double wildcards for replacement. We can't directly
// replace them since the replacement does contain the * symbol as well,
// which would get overwritten with the next replace call
let with_placeholder = with_format.replace("**", "___DOUBLE_WILDCARD___");
// Then replace the regular wildcard symbols (*) with the appropriate regex
let star_replaced = with_placeholder.replace("*", VAR_SEQ);
// Now replace the previously marked double wild cards (**)
let wildcarded = star_replaced.replace("___DOUBLE_WILDCARD___", VAR_SEQ_WITH_SLASH);
// Add a named capture for each :(variable) symbol
let named_captures = REGEX_VAR_SEQ.replace_all(&wildcarded, |captures: &Captures| {
// There should only ever be one match (after subgroup 0)
let c = captures.iter().skip(1).next().unwrap();
format!("(?P<{}>[.,a-zA-Z0-9%_-]*)", c.unwrap())
});
let line_regex = format!("^{}{}$", named_captures, REGEX_PARAM_SEQ);
let regex = Regex::new(&line_regex).unwrap();
Matcher::new(with_format, regex)
}
}
| {
s
} | conditional_block |
into_matcher.rs | use super::Matcher;
use regex::{Regex, Captures};
impl From<Regex> for Matcher {
fn from(regex: Regex) -> Matcher |
}
impl<'a> From<&'a str> for Matcher {
fn from(s: &'a str) -> Matcher {
From::from(s.to_string())
}
}
lazy_static! {
static ref REGEX_VAR_SEQ: Regex = Regex::new(r":([,a-zA-Z0-9_-]*)").unwrap();
}
pub static FORMAT_PARAM: &'static str = "format";
// FIXME: Once const fn lands this could be defined in terms of the above
static FORMAT_VAR: &'static str = ":format";
static VAR_SEQ: &'static str = "[.,a-zA-Z0-9_-]*";
static VAR_SEQ_WITH_SLASH: &'static str = "[.,/a-zA-Z0-9_-]*";
// matches request params (e.g.?foo=true&bar=false)
static REGEX_PARAM_SEQ: &'static str = "(\\?[.a-zA-Z0-9%_=&-]*)?";
impl From<String> for Matcher {
fn from(s: String) -> Matcher {
let with_format = if s.contains(FORMAT_VAR) {
s
} else {
format!("{}(\\.{})?", s, FORMAT_VAR)
};
// First mark all double wildcards for replacement. We can't directly
// replace them since the replacement does contain the * symbol as well,
// which would get overwritten with the next replace call
let with_placeholder = with_format.replace("**", "___DOUBLE_WILDCARD___");
// Then replace the regular wildcard symbols (*) with the appropriate regex
let star_replaced = with_placeholder.replace("*", VAR_SEQ);
// Now replace the previously marked double wild cards (**)
let wildcarded = star_replaced.replace("___DOUBLE_WILDCARD___", VAR_SEQ_WITH_SLASH);
// Add a named capture for each :(variable) symbol
let named_captures = REGEX_VAR_SEQ.replace_all(&wildcarded, |captures: &Captures| {
// There should only ever be one match (after subgroup 0)
let c = captures.iter().skip(1).next().unwrap();
format!("(?P<{}>[.,a-zA-Z0-9%_-]*)", c.unwrap())
});
let line_regex = format!("^{}{}$", named_captures, REGEX_PARAM_SEQ);
let regex = Regex::new(&line_regex).unwrap();
Matcher::new(with_format, regex)
}
}
| {
let path = regex.as_str().to_string();
Matcher::new(path, regex)
} | identifier_body |
into_matcher.rs | use super::Matcher;
use regex::{Regex, Captures};
impl From<Regex> for Matcher {
fn from(regex: Regex) -> Matcher {
let path = regex.as_str().to_string();
Matcher::new(path, regex)
}
} | fn from(s: &'a str) -> Matcher {
From::from(s.to_string())
}
}
lazy_static! {
static ref REGEX_VAR_SEQ: Regex = Regex::new(r":([,a-zA-Z0-9_-]*)").unwrap();
}
pub static FORMAT_PARAM: &'static str = "format";
// FIXME: Once const fn lands this could be defined in terms of the above
static FORMAT_VAR: &'static str = ":format";
static VAR_SEQ: &'static str = "[.,a-zA-Z0-9_-]*";
static VAR_SEQ_WITH_SLASH: &'static str = "[.,/a-zA-Z0-9_-]*";
// matches request params (e.g.?foo=true&bar=false)
static REGEX_PARAM_SEQ: &'static str = "(\\?[.a-zA-Z0-9%_=&-]*)?";
impl From<String> for Matcher {
fn from(s: String) -> Matcher {
let with_format = if s.contains(FORMAT_VAR) {
s
} else {
format!("{}(\\.{})?", s, FORMAT_VAR)
};
// First mark all double wildcards for replacement. We can't directly
// replace them since the replacement does contain the * symbol as well,
// which would get overwritten with the next replace call
let with_placeholder = with_format.replace("**", "___DOUBLE_WILDCARD___");
// Then replace the regular wildcard symbols (*) with the appropriate regex
let star_replaced = with_placeholder.replace("*", VAR_SEQ);
// Now replace the previously marked double wild cards (**)
let wildcarded = star_replaced.replace("___DOUBLE_WILDCARD___", VAR_SEQ_WITH_SLASH);
// Add a named capture for each :(variable) symbol
let named_captures = REGEX_VAR_SEQ.replace_all(&wildcarded, |captures: &Captures| {
// There should only ever be one match (after subgroup 0)
let c = captures.iter().skip(1).next().unwrap();
format!("(?P<{}>[.,a-zA-Z0-9%_-]*)", c.unwrap())
});
let line_regex = format!("^{}{}$", named_captures, REGEX_PARAM_SEQ);
let regex = Regex::new(&line_regex).unwrap();
Matcher::new(with_format, regex)
}
} |
impl<'a> From<&'a str> for Matcher { | random_line_split |
is_integer.rs | use malachite_base::num::conversion::traits::IsInteger;
use Rational;
impl<'a> IsInteger for &'a Rational {
/// Determines whether a `Rational` is an integer.
///
/// $f(x) = x \in \Z$.
///
/// # Worst-case complexity
/// Constant time and additional memory.
///
/// # Examples
/// ```
/// extern crate malachite_base;
/// extern crate malachite_q;
///
/// use malachite_base::num::basic::traits::{One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert_eq!(Rational::ZERO.is_integer(), true);
/// assert_eq!(Rational::ONE.is_integer(), true);
/// assert_eq!(Rational::from(100).is_integer(), true);
/// assert_eq!(Rational::from_str("22/7").unwrap().is_integer(), false);
/// ```
#[inline]
fn is_integer(self) -> bool |
}
| {
self.denominator == 1u32
} | identifier_body |
is_integer.rs | use malachite_base::num::conversion::traits::IsInteger;
use Rational;
impl<'a> IsInteger for &'a Rational {
/// Determines whether a `Rational` is an integer.
///
/// $f(x) = x \in \Z$.
///
/// # Worst-case complexity
/// Constant time and additional memory.
///
/// # Examples
/// ```
/// extern crate malachite_base;
/// extern crate malachite_q;
///
/// use malachite_base::num::basic::traits::{One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert_eq!(Rational::ZERO.is_integer(), true);
/// assert_eq!(Rational::ONE.is_integer(), true);
/// assert_eq!(Rational::from(100).is_integer(), true);
/// assert_eq!(Rational::from_str("22/7").unwrap().is_integer(), false);
/// ```
#[inline]
fn | (self) -> bool {
self.denominator == 1u32
}
}
| is_integer | identifier_name |
is_integer.rs | use malachite_base::num::conversion::traits::IsInteger;
use Rational;
impl<'a> IsInteger for &'a Rational {
/// Determines whether a `Rational` is an integer.
///
/// $f(x) = x \in \Z$.
///
/// # Worst-case complexity
/// Constant time and additional memory. | /// extern crate malachite_q;
///
/// use malachite_base::num::basic::traits::{One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert_eq!(Rational::ZERO.is_integer(), true);
/// assert_eq!(Rational::ONE.is_integer(), true);
/// assert_eq!(Rational::from(100).is_integer(), true);
/// assert_eq!(Rational::from_str("22/7").unwrap().is_integer(), false);
/// ```
#[inline]
fn is_integer(self) -> bool {
self.denominator == 1u32
}
} | ///
/// # Examples
/// ```
/// extern crate malachite_base; | random_line_split |
mem_size_tbl.rs | // SPDX-License-Identifier: MIT
// Copyright [email protected]
// Copyright iced contributors
use super::super::super::iced_constants::IcedConstants;
use alloc::vec::Vec;
// GENERATOR-BEGIN: MemorySizes
// ⚠️This was generated by GENERATOR!🦹♂️
#[rustfmt::skip]
static MEM_SIZE_TBL_DATA: [u8; 141] = [
0x00,
0x01,
0x0D,
0x03,
0x0B,
0x0B,
0x0E,
0x0F,
0x10,
0x01,
0x0D,
0x03,
0x0B,
0x0E,
0x0F,
0x10, | 0x08,
0x0C,
0x0D,
0x03,
0x0B,
0x03,
0x0B,
0x0B,
0x09,
0x08,
0x08,
0x0D,
0x03,
0x0B,
0x0C,
0x0E,
0x0D,
0x04,
0x05,
0x07,
0x06,
0x00,
0x00,
0x00,
0x00,
0x0C,
0x10,
0x00,
0x0C,
0x11,
0x10,
0x0D,
0x0D,
0x03,
0x03,
0x03,
0x03,
0x03,
0x0B,
0x0B,
0x0B,
0x0B,
0x0B,
0x0B,
0x0B,
0x0B,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0E,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x0F,
0x10,
0x10,
0x10,
0x10,
0x10,
0x10,
0x10,
0x10,
0x10,
0x10,
0x10,
0x10,
0x10,
0x02,
0x02,
0x02,
0x02,
0x02,
0x0A,
0x0A,
0x0A,
0x02,
0x0A,
0x02,
0x02,
0x0A,
0x0A,
0x0A,
0x02,
0x0A,
0x02,
0x02,
0x0A,
0x0A,
0x0A,
0x02,
0x0A,
0x02,
0x02,
0x02,
0x0A,
0x0A,
0x0A,
0x0A,
0x0A,
0x0A,
0x02,
0x02,
0x02,
];
// GENERATOR-END: MemorySizes
lazy_static! {
pub(super) static ref MEM_SIZE_TBL: Vec<&'static str> = {
let mut v = Vec::with_capacity(IcedConstants::MEMORY_SIZE_ENUM_COUNT);
for &mem_keywords in MEM_SIZE_TBL_DATA.iter() {
let keywords: &'static str = match mem_keywords {
// GENERATOR-BEGIN: Match
// ⚠️This was generated by GENERATOR!🦹♂️
0 => "",
1 => "byte ptr ",
2 => "dword bcst ",
3 => "dword ptr ",
4 => "fpuenv14 ptr ",
5 => "fpuenv28 ptr ",
6 => "fpustate108 ptr ",
7 => "fpustate94 ptr ",
8 => "fword ptr ",
9 => "oword ptr ",
10 => "qword bcst ",
11 => "qword ptr ",
12 => "tbyte ptr ",
13 => "word ptr ",
14 => "xmmword ptr ",
15 => "ymmword ptr ",
16 => "zmmword ptr ",
17 => "mem384 ptr ",
// GENERATOR-END: Match
_ => unreachable!(),
};
v.push(keywords);
}
v
};
} | 0x03, | random_line_split |
enable_stratagem_button.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
components::{
font_awesome_outline,
stratagem::{Command, StratagemEnable},
},
extensions::{MergeAttrs, NodeExt, RequestExt},
generated::css_classes::C,
};
use iml_wire_types::{EndpointName, StratagemConfiguration};
use seed::{prelude::*, *};
pub async fn enable_stratagem<T: serde::de::DeserializeOwned +'static>(
model: StratagemEnable,
) -> Result<fetch::FetchObject<T>, fetch::FetchObject<T>> {
fetch::Request::api_call(StratagemConfiguration::endpoint_name())
.method(fetch::Method::Post)
.with_auth()
.send_json(&model)
.fetch_json(std::convert::identity)
.await
}
pub fn view(is_valid: bool, disabled: bool) -> Node<Command> {
let btn = button![
class![
C.bg_blue_500,
C.hover__bg_blue_700,
C.text_white,
C.font_bold,
C.p_2,
C.rounded,
C.w_full,
C.text_sm,
C.col_span_2,
],
"Enable Scan Interval",
font_awesome_outline(class![C.inline, C.h_4, C.w_4, C.ml_2], "clock")
];
if is_valid &&!disabled {
btn.with_listener(simple_ev(Ev::Click, Command::Enable))
} else |
}
| {
btn.merge_attrs(attrs! {At::Disabled => "disabled"})
.merge_attrs(class![C.opacity_50, C.cursor_not_allowed])
} | conditional_block |
enable_stratagem_button.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
components::{
font_awesome_outline,
stratagem::{Command, StratagemEnable},
},
extensions::{MergeAttrs, NodeExt, RequestExt},
generated::css_classes::C,
};
use iml_wire_types::{EndpointName, StratagemConfiguration};
use seed::{prelude::*, *};
pub async fn enable_stratagem<T: serde::de::DeserializeOwned +'static>(
model: StratagemEnable,
) -> Result<fetch::FetchObject<T>, fetch::FetchObject<T>> |
pub fn view(is_valid: bool, disabled: bool) -> Node<Command> {
let btn = button![
class![
C.bg_blue_500,
C.hover__bg_blue_700,
C.text_white,
C.font_bold,
C.p_2,
C.rounded,
C.w_full,
C.text_sm,
C.col_span_2,
],
"Enable Scan Interval",
font_awesome_outline(class![C.inline, C.h_4, C.w_4, C.ml_2], "clock")
];
if is_valid &&!disabled {
btn.with_listener(simple_ev(Ev::Click, Command::Enable))
} else {
btn.merge_attrs(attrs! {At::Disabled => "disabled"})
.merge_attrs(class![C.opacity_50, C.cursor_not_allowed])
}
}
| {
fetch::Request::api_call(StratagemConfiguration::endpoint_name())
.method(fetch::Method::Post)
.with_auth()
.send_json(&model)
.fetch_json(std::convert::identity)
.await
} | identifier_body |
enable_stratagem_button.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
components::{
font_awesome_outline,
stratagem::{Command, StratagemEnable},
},
extensions::{MergeAttrs, NodeExt, RequestExt},
generated::css_classes::C,
};
use iml_wire_types::{EndpointName, StratagemConfiguration};
use seed::{prelude::*, *};
pub async fn enable_stratagem<T: serde::de::DeserializeOwned +'static>(
model: StratagemEnable,
) -> Result<fetch::FetchObject<T>, fetch::FetchObject<T>> {
fetch::Request::api_call(StratagemConfiguration::endpoint_name())
.method(fetch::Method::Post)
.with_auth()
.send_json(&model)
.fetch_json(std::convert::identity)
.await
}
pub fn view(is_valid: bool, disabled: bool) -> Node<Command> {
let btn = button![
class![ | C.font_bold,
C.p_2,
C.rounded,
C.w_full,
C.text_sm,
C.col_span_2,
],
"Enable Scan Interval",
font_awesome_outline(class![C.inline, C.h_4, C.w_4, C.ml_2], "clock")
];
if is_valid &&!disabled {
btn.with_listener(simple_ev(Ev::Click, Command::Enable))
} else {
btn.merge_attrs(attrs! {At::Disabled => "disabled"})
.merge_attrs(class![C.opacity_50, C.cursor_not_allowed])
}
} | C.bg_blue_500,
C.hover__bg_blue_700,
C.text_white, | random_line_split |
enable_stratagem_button.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
components::{
font_awesome_outline,
stratagem::{Command, StratagemEnable},
},
extensions::{MergeAttrs, NodeExt, RequestExt},
generated::css_classes::C,
};
use iml_wire_types::{EndpointName, StratagemConfiguration};
use seed::{prelude::*, *};
pub async fn enable_stratagem<T: serde::de::DeserializeOwned +'static>(
model: StratagemEnable,
) -> Result<fetch::FetchObject<T>, fetch::FetchObject<T>> {
fetch::Request::api_call(StratagemConfiguration::endpoint_name())
.method(fetch::Method::Post)
.with_auth()
.send_json(&model)
.fetch_json(std::convert::identity)
.await
}
pub fn | (is_valid: bool, disabled: bool) -> Node<Command> {
let btn = button![
class![
C.bg_blue_500,
C.hover__bg_blue_700,
C.text_white,
C.font_bold,
C.p_2,
C.rounded,
C.w_full,
C.text_sm,
C.col_span_2,
],
"Enable Scan Interval",
font_awesome_outline(class![C.inline, C.h_4, C.w_4, C.ml_2], "clock")
];
if is_valid &&!disabled {
btn.with_listener(simple_ev(Ev::Click, Command::Enable))
} else {
btn.merge_attrs(attrs! {At::Disabled => "disabled"})
.merge_attrs(class![C.opacity_50, C.cursor_not_allowed])
}
}
| view | identifier_name |
constants.rs | pub const MH_MAGIC_64: u32 = 0xfeedfacf;
pub const MH_CIGAM_64: u32 = 0xcffaedfe;
const LC_REQ_DYLD: u32 = 0x80000000;
#[repr(u32)]
#[derive(Eq,PartialEq)]
#[allow(non_camel_case_types)]
pub enum | {
/// After MacOS X 10.1 when a new load command is added that is required to be
/// understood by the dynamic linker for the image to execute properly the
/// LC_REQ_DYLD bit will be or'ed into the load command constant. If the dynamic
/// linker sees such a load command it it does not understand will issue a
/// "unknown load command required for execution" error and refuse to use the
/// image. Other load commands without this bit that are not understood will
/// simply be ignored.
LC_REQ_DYLD = LC_REQ_DYLD,
/// segment of this file to be mapped
LC_SEGMENT = 0x1,
/// link-edit stab symbol table info
LC_SYMTAB = 0x2,
/// link-edit gdb symbol table info (obsolete)
LC_SYMSEG = 0x3,
/// thread
LC_THREAD = 0x4,
/// unix thread (includes a stack)
LC_UNIXTHREAD = 0x5,
/// load a specified fixed VM shared library
LC_LOADFVMLIB = 0x6,
/// fixed VM shared library identification
LC_IDFVMLIB = 0x7,
/// object identification info (obsolete)
LC_IDENT = 0x8,
/// fixed VM file inclusion (internal use)
LC_FVMFILE = 0x9,
/// prepage command (internal use)
LC_PREPAGE = 0xa,
/// dynamic link-edit symbol table info
LC_DYSYMTAB = 0xb,
/// load a dynamically linked shared library
LC_LOAD_DYLIB = 0xc,
/// dynamically linked shared lib ident
LC_ID_DYLIB = 0xd,
/// load a dynamic linker
LC_LOAD_DYLINKER = 0xe,
/// dynamic linker identification
LC_ID_DYLINKER = 0xf,
/// modules prebound for a dynamically linked shared library
LC_PREBOUND_DYLIB = 0x10,
/// image routines
LC_ROUTINES = 0x11,
/// sub framework
LC_SUB_FRAMEWORK = 0x12,
/// sub umbrella
LC_SUB_UMBRELLA = 0x13,
/// sub client
LC_SUB_CLIENT = 0x14,
/// sub library
LC_SUB_LIBRARY = 0x15,
/// two-level namespace lookup hints
LC_TWOLEVEL_HINTS = 0x16,
/// prebind checksum
LC_PREBIND_CKSUM = 0x17,
/// load a dynamically linked shared library that is allowed to be missing
/// (all symbols are weak imported).
LC_LOAD_WEAK_DYLIB = (0x18 | LC_REQ_DYLD),
/// 64-bit segment of this file to be mapped
LC_SEGMENT_64 = 0x19,
/// 64-bit image routines
LC_ROUTINES_64 = 0x1a,
/// the uuid
LC_UUID = 0x1b,
/// runpath additions
LC_RPATH = (0x1c | LC_REQ_DYLD),
/// local of code signature
LC_CODE_SIGNATURE = 0x1d,
/// local of info to split segments
LC_SEGMENT_SPLIT_INFO = 0x1e,
/// load and re-export dylib
LC_REEXPORT_DYLIB = (0x1f | LC_REQ_DYLD),
/// delay load of dylib until first use
LC_LAZY_LOAD_DYLIB = 0x20,
/// encrypted segment information
LC_ENCRYPTION_INFO = 0x21,
/// compressed dyld information
LC_DYLD_INFO = 0x22,
/// compressed dyld information only
LC_DYLD_INFO_ONLY = (0x22|LC_REQ_DYLD),
/// load upward dylib
LC_LOAD_UPWARD_DYLIB = (0x23 | LC_REQ_DYLD),
/// build for MacOSX min OS version
LC_VERSION_MIN_MACOSX = 0x24,
/// build for iPhoneOS min OS version
LC_VERSION_MIN_IPHONEOS = 0x25,
/// compressed table of function start addresses
LC_FUNCTION_STARTS = 0x26,
/// string for dyld to treat like environment variable
LC_DYLD_ENVIRONMENT = 0x27,
/// replacement for LC_UNIXTHREAD
LC_MAIN = (0x28|LC_REQ_DYLD),
/// table of non-instructions in __text
LC_DATA_IN_CODE = 0x29,
/// source version used to build binary
LC_SOURCE_VERSION = 0x2A,
/// Code signing DRs copied from linked dylibs
LC_DYLIB_CODE_SIGN_DRS = 0x2B,
/// 64-bit encrypted segment information
LC_ENCRYPTION_INFO_64 = 0x2C,
/// linker options in MH_OBJECT files
LC_LINKER_OPTION = 0x2D,
/// optimization hints in MH_OBJECT files
LC_LINKER_OPTIMIZATION_HINT = 0x2E,
/// build for Watch min OS version
LC_VERSION_MIN_WATCHOS = 0x30,
}
| LcType | identifier_name |
constants.rs | pub const MH_MAGIC_64: u32 = 0xfeedfacf;
pub const MH_CIGAM_64: u32 = 0xcffaedfe;
const LC_REQ_DYLD: u32 = 0x80000000;
#[repr(u32)]
#[derive(Eq,PartialEq)]
#[allow(non_camel_case_types)]
pub enum LcType {
/// After MacOS X 10.1 when a new load command is added that is required to be
/// understood by the dynamic linker for the image to execute properly the
/// LC_REQ_DYLD bit will be or'ed into the load command constant. If the dynamic
/// linker sees such a load command it it does not understand will issue a
/// "unknown load command required for execution" error and refuse to use the
/// image. Other load commands without this bit that are not understood will
/// simply be ignored.
LC_REQ_DYLD = LC_REQ_DYLD,
/// segment of this file to be mapped
LC_SEGMENT = 0x1,
/// link-edit stab symbol table info
LC_SYMTAB = 0x2,
/// link-edit gdb symbol table info (obsolete)
LC_SYMSEG = 0x3,
/// thread
LC_THREAD = 0x4,
/// unix thread (includes a stack)
LC_UNIXTHREAD = 0x5,
/// load a specified fixed VM shared library
LC_LOADFVMLIB = 0x6,
/// fixed VM shared library identification
LC_IDFVMLIB = 0x7,
/// object identification info (obsolete)
LC_IDENT = 0x8,
/// fixed VM file inclusion (internal use)
LC_FVMFILE = 0x9,
/// prepage command (internal use)
LC_PREPAGE = 0xa,
/// dynamic link-edit symbol table info
LC_DYSYMTAB = 0xb,
/// load a dynamically linked shared library
LC_LOAD_DYLIB = 0xc,
/// dynamically linked shared lib ident
LC_ID_DYLIB = 0xd,
/// load a dynamic linker
LC_LOAD_DYLINKER = 0xe,
/// dynamic linker identification
LC_ID_DYLINKER = 0xf,
/// modules prebound for a dynamically linked shared library
LC_PREBOUND_DYLIB = 0x10,
/// image routines
LC_ROUTINES = 0x11,
/// sub framework
LC_SUB_FRAMEWORK = 0x12,
/// sub umbrella
LC_SUB_UMBRELLA = 0x13,
/// sub client
LC_SUB_CLIENT = 0x14,
/// sub library |
/// two-level namespace lookup hints
LC_TWOLEVEL_HINTS = 0x16,
/// prebind checksum
LC_PREBIND_CKSUM = 0x17,
/// load a dynamically linked shared library that is allowed to be missing
/// (all symbols are weak imported).
LC_LOAD_WEAK_DYLIB = (0x18 | LC_REQ_DYLD),
/// 64-bit segment of this file to be mapped
LC_SEGMENT_64 = 0x19,
/// 64-bit image routines
LC_ROUTINES_64 = 0x1a,
/// the uuid
LC_UUID = 0x1b,
/// runpath additions
LC_RPATH = (0x1c | LC_REQ_DYLD),
/// local of code signature
LC_CODE_SIGNATURE = 0x1d,
/// local of info to split segments
LC_SEGMENT_SPLIT_INFO = 0x1e,
/// load and re-export dylib
LC_REEXPORT_DYLIB = (0x1f | LC_REQ_DYLD),
/// delay load of dylib until first use
LC_LAZY_LOAD_DYLIB = 0x20,
/// encrypted segment information
LC_ENCRYPTION_INFO = 0x21,
/// compressed dyld information
LC_DYLD_INFO = 0x22,
/// compressed dyld information only
LC_DYLD_INFO_ONLY = (0x22|LC_REQ_DYLD),
/// load upward dylib
LC_LOAD_UPWARD_DYLIB = (0x23 | LC_REQ_DYLD),
/// build for MacOSX min OS version
LC_VERSION_MIN_MACOSX = 0x24,
/// build for iPhoneOS min OS version
LC_VERSION_MIN_IPHONEOS = 0x25,
/// compressed table of function start addresses
LC_FUNCTION_STARTS = 0x26,
/// string for dyld to treat like environment variable
LC_DYLD_ENVIRONMENT = 0x27,
/// replacement for LC_UNIXTHREAD
LC_MAIN = (0x28|LC_REQ_DYLD),
/// table of non-instructions in __text
LC_DATA_IN_CODE = 0x29,
/// source version used to build binary
LC_SOURCE_VERSION = 0x2A,
/// Code signing DRs copied from linked dylibs
LC_DYLIB_CODE_SIGN_DRS = 0x2B,
/// 64-bit encrypted segment information
LC_ENCRYPTION_INFO_64 = 0x2C,
/// linker options in MH_OBJECT files
LC_LINKER_OPTION = 0x2D,
/// optimization hints in MH_OBJECT files
LC_LINKER_OPTIMIZATION_HINT = 0x2E,
/// build for Watch min OS version
LC_VERSION_MIN_WATCHOS = 0x30,
} | LC_SUB_LIBRARY = 0x15, | random_line_split |
lib.rs | // Definitions
mod definitions {
//
// Each encoded block begins with the varint-encoded length of the decoded data,
// followed by a sequence of chunks. Chunks begin and end on byte boundaries.
// The
// first byte of each chunk is broken into its 2 least and 6 most significant
// bits
// called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk
// tag.
// Zero means a literal tag. All other values mean a copy tag.
//
// For literal tags:
// - If m < 60, the next 1 + m bytes are literal bytes.
// - Otherwise, let n be the little-endian unsigned integer denoted by the next
// m - 59 bytes. The next 1 + n bytes after that are literal bytes.
//
// For copy tags, length bytes are copied from offset bytes ago, in the style of
// Lempel-Ziv compression algorithms. In particular:
// - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
// The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
// of the offset. The next byte is bits 0-7 of the offset. | // - For l == 3, this tag is a legacy format that is no longer supported.
//
pub const TAG_LITERAL: u8 = 0x00;
pub const TAG_COPY_1: u8 = 0x01;
pub const TAG_COPY_2: u8 = 0x02;
pub const TAG_COPY_4: u8 = 0x03;
pub const CHECK_SUM_SIZE: u8 = 4;
pub const CHUNK_HEADER_SIZE: u8 = 4;
pub const MAGIC_BODY : [u8; 6] = *b"sNaPpY";
pub const MAGIC_CHUNK : [u8; 10] = [0xff,0x06,0x00,0x00,0x73,0x4e,0x61,0x50,0x70,0x59];
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
pub const MAX_UNCOMPRESSED_CHUNK_LEN : u32 = 65536;
pub const CHUNK_TYPE_COMPRESSED_DATA: u8 = 0x00;
pub const CHUNK_TYPE_UNCOMPRESSED_DATA: u8 = 0x01;
pub const CHUNK_TYPE_PADDING: u8 = 0xfe;
pub const CHUNK_TYPE_STREAM_IDENTIFIER: u8 = 0xff;
}
// Snappy Compressor
mod compress;
pub use self::compress::{Compressor, compress, max_compressed_len};
// Snappy Decompressor
mod decompress;
pub use self::decompress::{Decompressor, decompress, decompressed_len}; | // - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
// The length is 1 + m. The offset is the little-endian unsigned integer
// denoted by the next 2 bytes. | random_line_split |
main.rs | use std::io;
pub struct JiaeHomework {
index: usize,
value: i32,
}
fn stdinln_i32() -> i32 {
// 이 함수는 하나의 줄을 stdin으로 받아 단 하나의 integer를 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read stdin.");
buffer.trim().parse::<i32>().unwrap()
}
fn stdinln_vec_i32() -> Vec<i32> {
// 이 함수는 하나의 줄을 stdin으로 받아 여러개의 integer 을 vector로 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read line");
let ret: Vec<i32> = buffer.split(" ")
.map(|x| x.trim().parse().expect("Unexpected Integer Pattern"))
.collect();
ret
}
/*
이번 문제는 Method 스타일로 풀어봅시다.!
https://doc.rust-lang.org/rust-by-example/fn/methods.html
이번 문제는 Rust-Ownership 문제가 야기될 수 있습니다. 한번 읽어보세요.
https://rinthel.github.io/rust-lang-book-ko/ch04-02-references-and-borrowing.html
*/
impl JiaeHomework {
fn new() -> JiaeHomework {
JiaeHomework{index:0, value:0}
}
fn solve(&mut self, source: &Vec<i32>, nr_items: usize) {
// solve를 완성해주세요!
let mut sum :i32 = 0;
// get Sum of source vector.
for i in 0..nr_items{
sum += source[i];
}
let avg = sum/nr_items as i32;
// Rust 2018에서는 (source[0]-avg).abs() 를 해도 되나.
// 현재 구름은 rustc 2017년대 버젼을 쓰고있다. 따라서 이와같이 해결한다.
// i32::abs(...)
let mut near_gap = i32::abs(source[0]-avg);
for i in 1..nr_items {
let current_gap = i32::abs(source[i]-avg);
if current_gap < near_gap {
self.index = i;
near_gap = current_gap;
}
}
self.value = source[self.index];
// End of solve
}
fn print(&self) {
println!("{} {}", self.index + 1, self.value);
}
}
fn main(){
let nr_case = stdinln_i32() as usize;
let inpu | 를 완성해주세요!
let mut problem = JiaeHomework::new();
problem.solve(&inputs, nr_case);
problem.print();
} | ts = stdinln_vec_i32();
// JiaeHomework.solve(...) | conditional_block |
main.rs | use std::io;
pub struct JiaeHomework {
index: usize,
value: i32,
}
fn stdinln_i32() -> i32 {
// 이 함수는 하나의 줄을 stdin으로 받아 단 하나의 integer를 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read stdin.");
buffer.trim().parse::<i32>().unwrap()
}
fn stdinln_vec_i32() -> Vec<i32> {
// 이 함수는 하나의 줄을 stdin으로 받아 여러개의 integer 을 vector로 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read line");
let ret: Vec<i32> = buffer.split(" ")
.map(|x| x.trim().parse().expect("Unexpected Integer Pattern"))
.collect();
ret
}
/*
이번 문제는 Method 스타일로 풀어봅시다.!
https://doc.rust-lang.org/rust-by-example/fn/methods.html
이번 문제는 Rust-Ownership 문제가 야기될 수 있습니다. 한번 읽어보세요.
https://rinthel.github.io/rust-lang-book-ko/ch04-02-references-and-borrowing.html
*/
impl JiaeHomework {
fn new() -> JiaeHomework {
JiaeHomework{index:0, value:0}
}
fn solve(&mut self, source: &Vec<i32>, nr_items: usize) {
// solve를 완성해주세요!
let mut sum :i32 = 0;
// get Sum of source vector.
for i in 0..nr_items{
sum += source[i];
}
let avg = sum/nr_items as i32;
// Rust 2018에서는 (source[0]-avg).abs() 를 해도 되나.
// 현재 구름은 rustc 2017년대 버젼을 쓰고있다. 따라서 이와같이 해결한다.
// i32::abs(...)
let mut near_gap = i32::abs(source[0]-avg);
for i in 1..nr_items {
let current_gap = i32::abs(source[i]-avg);
if current_gap < near_gap {
self.index = i;
near_gap = current_gap;
}
}
self.value = source[self.index];
// End of solve
}
fn print(&self) {
println!("{} {}", self.index + 1, self.value);
}
}
fn main(){
let nr_case = stdinln_i32() as usize;
let inputs = stdinln_vec_i32();
// JiaeHomework.solve(...) 를 완성해주세요!
let mut problem = JiaeHomework::new();
problem.solve(&inputs, nr_case);
problem.print();
} | identifier_body |
||
main.rs | use std::io;
pub struct JiaeHomework {
index: usize,
value: i32,
}
fn stdinln_i32() -> i32 {
// 이 함수는 하나의 줄을 stdin으로 받아 단 하나의 integer를 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read stdin.");
buffer.trim().parse::<i32>().unwrap()
}
fn stdinln_vec_i32() -> Vec<i32> {
// 이 함수는 하나의 줄을 stdin으로 받아 여러개의 integer 을 vector로 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read line");
let ret: Vec<i32> = buffer.split(" ")
.map(|x| x.trim().parse().expect("Unexpected Integer Pattern"))
.collect();
ret
}
/*
이번 문제는 Method 스타일로 풀어봅시다.!
https://doc.rust-lang.org/rust-by-example/fn/methods.html
이번 문제는 Rust-Ownership 문제가 야기될 수 있습니다. 한번 읽어보세요.
https://rinthel.github.io/rust-lang-book-ko/ch04-02-references-and-borrowing.html
*/
impl JiaeHomework {
fn new() -> JiaeHomework {
JiaeHomework{index:0, value:0}
}
fn solve(&mut self, source: &Vec<i32>, nr_items: usize) {
// solve를 완성해주세요!
let mut sum :i32 = 0;
// get Sum of source vector.
for i in 0..nr_items{
sum += sour | ;
}
let avg = sum/nr_items as i32;
// Rust 2018에서는 (source[0]-avg).abs() 를 해도 되나.
// 현재 구름은 rustc 2017년대 버젼을 쓰고있다. 따라서 이와같이 해결한다.
// i32::abs(...)
let mut near_gap = i32::abs(source[0]-avg);
for i in 1..nr_items {
let current_gap = i32::abs(source[i]-avg);
if current_gap < near_gap {
self.index = i;
near_gap = current_gap;
}
}
self.value = source[self.index];
// End of solve
}
fn print(&self) {
println!("{} {}", self.index + 1, self.value);
}
}
fn main(){
let nr_case = stdinln_i32() as usize;
let inputs = stdinln_vec_i32();
// JiaeHomework.solve(...) 를 완성해주세요!
let mut problem = JiaeHomework::new();
problem.solve(&inputs, nr_case);
problem.print();
} | ce[i] | identifier_name |
main.rs | use std::io;
pub struct JiaeHomework {
index: usize,
value: i32,
}
fn stdinln_i32() -> i32 {
// 이 함수는 하나의 줄을 stdin으로 받아 단 하나의 integer를 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read stdin.");
buffer.trim().parse::<i32>().unwrap()
}
fn stdinln_vec_i32() -> Vec<i32> { | .collect();
ret
}
/*
이번 문제는 Method 스타일로 풀어봅시다.!
https://doc.rust-lang.org/rust-by-example/fn/methods.html
이번 문제는 Rust-Ownership 문제가 야기될 수 있습니다. 한번 읽어보세요.
https://rinthel.github.io/rust-lang-book-ko/ch04-02-references-and-borrowing.html
*/
impl JiaeHomework {
fn new() -> JiaeHomework {
JiaeHomework{index:0, value:0}
}
fn solve(&mut self, source: &Vec<i32>, nr_items: usize) {
// solve를 완성해주세요!
let mut sum :i32 = 0;
// get Sum of source vector.
for i in 0..nr_items{
sum += source[i];
}
let avg = sum/nr_items as i32;
// Rust 2018에서는 (source[0]-avg).abs() 를 해도 되나.
// 현재 구름은 rustc 2017년대 버젼을 쓰고있다. 따라서 이와같이 해결한다.
// i32::abs(...)
let mut near_gap = i32::abs(source[0]-avg);
for i in 1..nr_items {
let current_gap = i32::abs(source[i]-avg);
if current_gap < near_gap {
self.index = i;
near_gap = current_gap;
}
}
self.value = source[self.index];
// End of solve
}
fn print(&self) {
println!("{} {}", self.index + 1, self.value);
}
}
fn main(){
let nr_case = stdinln_i32() as usize;
let inputs = stdinln_vec_i32();
// JiaeHomework.solve(...) 를 완성해주세요!
let mut problem = JiaeHomework::new();
problem.solve(&inputs, nr_case);
problem.print();
} | // 이 함수는 하나의 줄을 stdin으로 받아 여러개의 integer 을 vector로 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read line");
let ret: Vec<i32> = buffer.split(" ")
.map(|x| x.trim().parse().expect("Unexpected Integer Pattern")) | random_line_split |
player.rs | use diesel::prelude::*;
use diesel::pg::PgConnection;
use video::Video;
use room::Room;
use std::{thread, time};
use schema;
use std::time::SystemTime;
use std::collections::HashMap;
use std::sync::Mutex;
use establish_connection;
lazy_static! {
static ref PLAYLIST_THREADS: Mutex<HashMap<i64, VideoStatus>> = Mutex::new(HashMap::new());
}
enum VideoStatus {
Play,
Skip,
}
/// Fetches the current video from the playlist and waits for the duration of the video
/// Afterwards it updates the database and marks the video as played.
pub fn play_current_video(conn: &PgConnection, room: &Room) -> bool {
use self::schema::videos::dsl::*;
let video = Video::belonging_to(room)
.filter(played.eq(false))
.order(id)
.first::<Video>(conn);
match video {
Ok(video) => {
let video_duration = time::Duration::from_secs(duration_to_seconds(&video.duration));
super::diesel::update(&video)
.set(started_on.eq(SystemTime::now()))
.execute(conn)
.expect("Unable to start playing the current video.");
println!(
"Start playing: [{}] With ID: [{}] and duration: [{}] in room: [{}].",
&video.title, &video.id, &video.duration, &room.name
);
PLAYLIST_THREADS
.lock()
.unwrap()
.insert(room.id, VideoStatus::Play);
let now = SystemTime::now();
let mut playing: bool = true;
// Continue playing this song while playing is true
// Playing will be set to false if either the timer has run out
// Or when someone skips the song by setting the PLAYLIST_THREADS[ROOM_NAME] to something other than "play"
while playing {
// Check if someone tried to skip the video
match PLAYLIST_THREADS.lock().unwrap().get(&room.id) {
Some(status) => {
playing = handle_video_event(status);
}
None => {
PLAYLIST_THREADS
.lock()
.unwrap()
.insert(room.id, VideoStatus::Play);
}
}
// Check if the video has ran out of time
match now.elapsed() {
Ok(elapsed) => {
if elapsed.as_secs() >= video_duration.as_secs() {
playing = false;
}
}
Err(e) => {
playing = false;
println!("SystemTime elapsed error: {}", e);
}
}
thread::sleep(time::Duration::from_millis(250));
}
println!("Done playing [{}] from room [{}]", &video.title, &room.name);
// Mark the video as played
super::diesel::update(&video)
.set(played.eq(true))
.execute(conn)
.expect("Unable to mark the current video as played.");
true
}
Err(_) => {
stop_playing(room);
false
}
}
}
fn handle_video_event(status: &VideoStatus) -> bool {
match *status {
VideoStatus::Play => true,
VideoStatus::Skip => false,
}
}
/// Start a thread to watch a certain playlist
pub fn play_video_thread(room: Room) {
thread::Builder::new()
.spawn(move || {
let c: PgConnection = establish_connection();
loop {
play_current_video(&c, &room);
if!PLAYLIST_THREADS.lock().unwrap().contains_key(&room.id) {
println!("Stop playin thread with id: {}", room.id);
break;
}
}
})
.unwrap();
}
// Loop through every room & start playing their playlists IF the playlist isn't empty.
// At the end of the loop, start the FFA playlist(room None)
pub fn init_playlist_listener() {
use self::schema::rooms::dsl::*;
use playlist::Playlist;
let conn: PgConnection = establish_connection();
let result = rooms.load::<Room>(&conn).expect("Error loading videos");
for room in result {
if Playlist::is_empty(&conn, &room) {
continue;
}
start_playing(room);
}
}
pub fn start_playing(room: Room) |
pub fn stop_playing(room: &Room) {
PLAYLIST_THREADS.lock().unwrap().remove(&room.id);
}
// Returns a duration string as seconds
// EG: "PT1H10M10S" -> 4210
pub fn duration_to_seconds(duration: &str) -> u64 {
let v: Vec<&str> = duration.split(|c: char|!c.is_numeric()).collect();
let mut index: u32 = 0;
let mut total: i32 = 0;
for i in (0..v.len()).rev() {
if!v[i].is_empty() {
total += v[i].parse::<i32>().unwrap() * (60i32.pow(index));
index += 1;
}
}
total as u64
}
pub fn skip_video(room: &i64) {
let mut rooms = PLAYLIST_THREADS.lock().unwrap();
println!("Skipping a song in room [{}]", room);
if let Some(mut_key) = rooms.get_mut(room) {
*mut_key = VideoStatus::Skip;
} else {
println!("Invalid room, could not skip song.");
}
}
| {
let mut hashmap = PLAYLIST_THREADS.lock().unwrap();
if !hashmap.contains_key(&room.id) {
hashmap.insert(room.id, VideoStatus::Play);
play_video_thread(room);
}
} | identifier_body |
player.rs | use diesel::prelude::*;
use diesel::pg::PgConnection;
use video::Video;
use room::Room;
use std::{thread, time};
use schema;
use std::time::SystemTime;
use std::collections::HashMap;
use std::sync::Mutex;
use establish_connection;
lazy_static! {
static ref PLAYLIST_THREADS: Mutex<HashMap<i64, VideoStatus>> = Mutex::new(HashMap::new());
}
enum | {
Play,
Skip,
}
/// Fetches the current video from the playlist and waits for the duration of the video
/// Afterwards it updates the database and marks the video as played.
pub fn play_current_video(conn: &PgConnection, room: &Room) -> bool {
use self::schema::videos::dsl::*;
let video = Video::belonging_to(room)
.filter(played.eq(false))
.order(id)
.first::<Video>(conn);
match video {
Ok(video) => {
let video_duration = time::Duration::from_secs(duration_to_seconds(&video.duration));
super::diesel::update(&video)
.set(started_on.eq(SystemTime::now()))
.execute(conn)
.expect("Unable to start playing the current video.");
println!(
"Start playing: [{}] With ID: [{}] and duration: [{}] in room: [{}].",
&video.title, &video.id, &video.duration, &room.name
);
PLAYLIST_THREADS
.lock()
.unwrap()
.insert(room.id, VideoStatus::Play);
let now = SystemTime::now();
let mut playing: bool = true;
// Continue playing this song while playing is true
// Playing will be set to false if either the timer has run out
// Or when someone skips the song by setting the PLAYLIST_THREADS[ROOM_NAME] to something other than "play"
while playing {
// Check if someone tried to skip the video
match PLAYLIST_THREADS.lock().unwrap().get(&room.id) {
Some(status) => {
playing = handle_video_event(status);
}
None => {
PLAYLIST_THREADS
.lock()
.unwrap()
.insert(room.id, VideoStatus::Play);
}
}
// Check if the video has ran out of time
match now.elapsed() {
Ok(elapsed) => {
if elapsed.as_secs() >= video_duration.as_secs() {
playing = false;
}
}
Err(e) => {
playing = false;
println!("SystemTime elapsed error: {}", e);
}
}
thread::sleep(time::Duration::from_millis(250));
}
println!("Done playing [{}] from room [{}]", &video.title, &room.name);
// Mark the video as played
super::diesel::update(&video)
.set(played.eq(true))
.execute(conn)
.expect("Unable to mark the current video as played.");
true
}
Err(_) => {
stop_playing(room);
false
}
}
}
fn handle_video_event(status: &VideoStatus) -> bool {
match *status {
VideoStatus::Play => true,
VideoStatus::Skip => false,
}
}
/// Start a thread to watch a certain playlist
pub fn play_video_thread(room: Room) {
thread::Builder::new()
.spawn(move || {
let c: PgConnection = establish_connection();
loop {
play_current_video(&c, &room);
if!PLAYLIST_THREADS.lock().unwrap().contains_key(&room.id) {
println!("Stop playin thread with id: {}", room.id);
break;
}
}
})
.unwrap();
}
// Loop through every room & start playing their playlists IF the playlist isn't empty.
// At the end of the loop, start the FFA playlist(room None)
pub fn init_playlist_listener() {
use self::schema::rooms::dsl::*;
use playlist::Playlist;
let conn: PgConnection = establish_connection();
let result = rooms.load::<Room>(&conn).expect("Error loading videos");
for room in result {
if Playlist::is_empty(&conn, &room) {
continue;
}
start_playing(room);
}
}
pub fn start_playing(room: Room) {
let mut hashmap = PLAYLIST_THREADS.lock().unwrap();
if!hashmap.contains_key(&room.id) {
hashmap.insert(room.id, VideoStatus::Play);
play_video_thread(room);
}
}
pub fn stop_playing(room: &Room) {
PLAYLIST_THREADS.lock().unwrap().remove(&room.id);
}
// Returns a duration string as seconds
// EG: "PT1H10M10S" -> 4210
pub fn duration_to_seconds(duration: &str) -> u64 {
let v: Vec<&str> = duration.split(|c: char|!c.is_numeric()).collect();
let mut index: u32 = 0;
let mut total: i32 = 0;
for i in (0..v.len()).rev() {
if!v[i].is_empty() {
total += v[i].parse::<i32>().unwrap() * (60i32.pow(index));
index += 1;
}
}
total as u64
}
pub fn skip_video(room: &i64) {
let mut rooms = PLAYLIST_THREADS.lock().unwrap();
println!("Skipping a song in room [{}]", room);
if let Some(mut_key) = rooms.get_mut(room) {
*mut_key = VideoStatus::Skip;
} else {
println!("Invalid room, could not skip song.");
}
}
| VideoStatus | identifier_name |
player.rs | use diesel::prelude::*;
use diesel::pg::PgConnection;
use video::Video;
use room::Room;
use std::{thread, time};
use schema;
use std::time::SystemTime;
use std::collections::HashMap;
use std::sync::Mutex;
use establish_connection;
|
enum VideoStatus {
Play,
Skip,
}
/// Fetches the current video from the playlist and waits for the duration of the video
/// Afterwards it updates the database and marks the video as played.
pub fn play_current_video(conn: &PgConnection, room: &Room) -> bool {
use self::schema::videos::dsl::*;
let video = Video::belonging_to(room)
.filter(played.eq(false))
.order(id)
.first::<Video>(conn);
match video {
Ok(video) => {
let video_duration = time::Duration::from_secs(duration_to_seconds(&video.duration));
super::diesel::update(&video)
.set(started_on.eq(SystemTime::now()))
.execute(conn)
.expect("Unable to start playing the current video.");
println!(
"Start playing: [{}] With ID: [{}] and duration: [{}] in room: [{}].",
&video.title, &video.id, &video.duration, &room.name
);
PLAYLIST_THREADS
.lock()
.unwrap()
.insert(room.id, VideoStatus::Play);
let now = SystemTime::now();
let mut playing: bool = true;
// Continue playing this song while playing is true
// Playing will be set to false if either the timer has run out
// Or when someone skips the song by setting the PLAYLIST_THREADS[ROOM_NAME] to something other than "play"
while playing {
// Check if someone tried to skip the video
match PLAYLIST_THREADS.lock().unwrap().get(&room.id) {
Some(status) => {
playing = handle_video_event(status);
}
None => {
PLAYLIST_THREADS
.lock()
.unwrap()
.insert(room.id, VideoStatus::Play);
}
}
// Check if the video has ran out of time
match now.elapsed() {
Ok(elapsed) => {
if elapsed.as_secs() >= video_duration.as_secs() {
playing = false;
}
}
Err(e) => {
playing = false;
println!("SystemTime elapsed error: {}", e);
}
}
thread::sleep(time::Duration::from_millis(250));
}
println!("Done playing [{}] from room [{}]", &video.title, &room.name);
// Mark the video as played
super::diesel::update(&video)
.set(played.eq(true))
.execute(conn)
.expect("Unable to mark the current video as played.");
true
}
Err(_) => {
stop_playing(room);
false
}
}
}
fn handle_video_event(status: &VideoStatus) -> bool {
match *status {
VideoStatus::Play => true,
VideoStatus::Skip => false,
}
}
/// Start a thread to watch a certain playlist
pub fn play_video_thread(room: Room) {
thread::Builder::new()
.spawn(move || {
let c: PgConnection = establish_connection();
loop {
play_current_video(&c, &room);
if!PLAYLIST_THREADS.lock().unwrap().contains_key(&room.id) {
println!("Stop playin thread with id: {}", room.id);
break;
}
}
})
.unwrap();
}
// Loop through every room & start playing their playlists IF the playlist isn't empty.
// At the end of the loop, start the FFA playlist(room None)
pub fn init_playlist_listener() {
use self::schema::rooms::dsl::*;
use playlist::Playlist;
let conn: PgConnection = establish_connection();
let result = rooms.load::<Room>(&conn).expect("Error loading videos");
for room in result {
if Playlist::is_empty(&conn, &room) {
continue;
}
start_playing(room);
}
}
pub fn start_playing(room: Room) {
let mut hashmap = PLAYLIST_THREADS.lock().unwrap();
if!hashmap.contains_key(&room.id) {
hashmap.insert(room.id, VideoStatus::Play);
play_video_thread(room);
}
}
pub fn stop_playing(room: &Room) {
PLAYLIST_THREADS.lock().unwrap().remove(&room.id);
}
// Returns a duration string as seconds
// EG: "PT1H10M10S" -> 4210
pub fn duration_to_seconds(duration: &str) -> u64 {
let v: Vec<&str> = duration.split(|c: char|!c.is_numeric()).collect();
let mut index: u32 = 0;
let mut total: i32 = 0;
for i in (0..v.len()).rev() {
if!v[i].is_empty() {
total += v[i].parse::<i32>().unwrap() * (60i32.pow(index));
index += 1;
}
}
total as u64
}
pub fn skip_video(room: &i64) {
let mut rooms = PLAYLIST_THREADS.lock().unwrap();
println!("Skipping a song in room [{}]", room);
if let Some(mut_key) = rooms.get_mut(room) {
*mut_key = VideoStatus::Skip;
} else {
println!("Invalid room, could not skip song.");
}
} | lazy_static! {
static ref PLAYLIST_THREADS: Mutex<HashMap<i64, VideoStatus>> = Mutex::new(HashMap::new());
} | random_line_split |
player.rs | use diesel::prelude::*;
use diesel::pg::PgConnection;
use video::Video;
use room::Room;
use std::{thread, time};
use schema;
use std::time::SystemTime;
use std::collections::HashMap;
use std::sync::Mutex;
use establish_connection;
lazy_static! {
static ref PLAYLIST_THREADS: Mutex<HashMap<i64, VideoStatus>> = Mutex::new(HashMap::new());
}
enum VideoStatus {
Play,
Skip,
}
/// Fetches the current video from the playlist and waits for the duration of the video
/// Afterwards it updates the database and marks the video as played.
pub fn play_current_video(conn: &PgConnection, room: &Room) -> bool {
use self::schema::videos::dsl::*;
let video = Video::belonging_to(room)
.filter(played.eq(false))
.order(id)
.first::<Video>(conn);
match video {
Ok(video) => {
let video_duration = time::Duration::from_secs(duration_to_seconds(&video.duration));
super::diesel::update(&video)
.set(started_on.eq(SystemTime::now()))
.execute(conn)
.expect("Unable to start playing the current video.");
println!(
"Start playing: [{}] With ID: [{}] and duration: [{}] in room: [{}].",
&video.title, &video.id, &video.duration, &room.name
);
PLAYLIST_THREADS
.lock()
.unwrap()
.insert(room.id, VideoStatus::Play);
let now = SystemTime::now();
let mut playing: bool = true;
// Continue playing this song while playing is true
// Playing will be set to false if either the timer has run out
// Or when someone skips the song by setting the PLAYLIST_THREADS[ROOM_NAME] to something other than "play"
while playing {
// Check if someone tried to skip the video
match PLAYLIST_THREADS.lock().unwrap().get(&room.id) {
Some(status) => |
None => {
PLAYLIST_THREADS
.lock()
.unwrap()
.insert(room.id, VideoStatus::Play);
}
}
// Check if the video has ran out of time
match now.elapsed() {
Ok(elapsed) => {
if elapsed.as_secs() >= video_duration.as_secs() {
playing = false;
}
}
Err(e) => {
playing = false;
println!("SystemTime elapsed error: {}", e);
}
}
thread::sleep(time::Duration::from_millis(250));
}
println!("Done playing [{}] from room [{}]", &video.title, &room.name);
// Mark the video as played
super::diesel::update(&video)
.set(played.eq(true))
.execute(conn)
.expect("Unable to mark the current video as played.");
true
}
Err(_) => {
stop_playing(room);
false
}
}
}
fn handle_video_event(status: &VideoStatus) -> bool {
match *status {
VideoStatus::Play => true,
VideoStatus::Skip => false,
}
}
/// Start a thread to watch a certain playlist
pub fn play_video_thread(room: Room) {
thread::Builder::new()
.spawn(move || {
let c: PgConnection = establish_connection();
loop {
play_current_video(&c, &room);
if!PLAYLIST_THREADS.lock().unwrap().contains_key(&room.id) {
println!("Stop playin thread with id: {}", room.id);
break;
}
}
})
.unwrap();
}
// Loop through every room & start playing their playlists IF the playlist isn't empty.
// At the end of the loop, start the FFA playlist(room None)
pub fn init_playlist_listener() {
use self::schema::rooms::dsl::*;
use playlist::Playlist;
let conn: PgConnection = establish_connection();
let result = rooms.load::<Room>(&conn).expect("Error loading videos");
for room in result {
if Playlist::is_empty(&conn, &room) {
continue;
}
start_playing(room);
}
}
pub fn start_playing(room: Room) {
let mut hashmap = PLAYLIST_THREADS.lock().unwrap();
if!hashmap.contains_key(&room.id) {
hashmap.insert(room.id, VideoStatus::Play);
play_video_thread(room);
}
}
pub fn stop_playing(room: &Room) {
PLAYLIST_THREADS.lock().unwrap().remove(&room.id);
}
// Returns a duration string as seconds
// EG: "PT1H10M10S" -> 4210
pub fn duration_to_seconds(duration: &str) -> u64 {
let v: Vec<&str> = duration.split(|c: char|!c.is_numeric()).collect();
let mut index: u32 = 0;
let mut total: i32 = 0;
for i in (0..v.len()).rev() {
if!v[i].is_empty() {
total += v[i].parse::<i32>().unwrap() * (60i32.pow(index));
index += 1;
}
}
total as u64
}
pub fn skip_video(room: &i64) {
let mut rooms = PLAYLIST_THREADS.lock().unwrap();
println!("Skipping a song in room [{}]", room);
if let Some(mut_key) = rooms.get_mut(room) {
*mut_key = VideoStatus::Skip;
} else {
println!("Invalid room, could not skip song.");
}
}
| {
playing = handle_video_event(status);
} | conditional_block |
issue_3979_traits.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[crate_id="issue_3979_traits#0.1"];
#[crate_type = "lib"];
pub trait Positioned {
fn SetX(&mut self, int);
fn X(&self) -> int;
}
pub trait Movable: Positioned {
fn | (&mut self, dx: int) {
let x = self.X() + dx;
self.SetX(x);
}
}
| translate | identifier_name |
issue_3979_traits.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[crate_id="issue_3979_traits#0.1"];
#[crate_type = "lib"];
pub trait Positioned {
fn SetX(&mut self, int);
fn X(&self) -> int;
}
pub trait Movable: Positioned {
fn translate(&mut self, dx: int) |
}
| {
let x = self.X() + dx;
self.SetX(x);
} | identifier_body |
issue_3979_traits.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[crate_id="issue_3979_traits#0.1"];
#[crate_type = "lib"];
pub trait Positioned {
fn SetX(&mut self, int);
fn X(&self) -> int;
}
pub trait Movable: Positioned {
fn translate(&mut self, dx: int) {
let x = self.X() + dx; | self.SetX(x);
}
} | random_line_split |
|
blob_records.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use crate::{
btree_set,
capacity::{Capacity, CHUNK_COPY_COUNT},
error::convert_to_error_message,
node_ops::{NodeDuties, NodeDuty},
Error, Result,
};
use log::{info, warn};
use sn_data_types::{Blob, BlobAddress, PublicKey};
use sn_messaging::{
client::{BlobDataExchange, BlobRead, BlobWrite, ClientSigned, CmdError, QueryResponse},
node::{NodeCmd, NodeMsg, NodeQuery, NodeSystemCmd},
Aggregation, EndUser, MessageId,
};
use sn_routing::Prefix;
use std::{
collections::BTreeSet,
fmt::{self, Display, Formatter},
};
use xor_name::XorName;
use super::{
adult_liveness::AdultLiveness, build_client_error_response, build_client_query_response,
};
/// Operations over the data type Blob.
pub(super) struct BlobRecords {
capacity: Capacity,
adult_liveness: AdultLiveness,
}
impl BlobRecords {
pub(super) fn new(capacity: Capacity) -> Self {
Self {
capacity,
adult_liveness: AdultLiveness::new(),
}
}
pub async fn get_data_of(&self, prefix: Prefix) -> BlobDataExchange {
// Prepare full_adult details
let full_adults = self.capacity.full_adults_matching(prefix).await;
BlobDataExchange { full_adults }
}
pub async fn update(&self, blob_data: BlobDataExchange) {
let BlobDataExchange { full_adults } = blob_data;
self.capacity.insert_full_adults(full_adults).await
}
/// Registered holders not present in provided list of members
/// will be removed from adult_storage_info and no longer tracked for liveness.
pub async fn retain_members_only(&mut self, members: BTreeSet<XorName>) -> Result<()> {
// full adults
self.capacity.retain_members_only(&members).await;
// stop tracking liveness of absent holders
self.adult_liveness.retain_members_only(members);
Ok(())
}
pub(super) async fn write(
&mut self,
write: BlobWrite,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
use BlobWrite::*;
match write {
New(data) => self.store(data, msg_id, client_signed, origin).await,
DeletePrivate(address) => self.delete(address, msg_id, client_signed, origin).await,
}
}
/// Adds a given node to the list of full nodes.
pub async fn increase_full_node_count(&mut self, node_id: PublicKey) {
info!(
"No. of full Adults: {:?}",
self.capacity.full_adults_count().await
);
info!("Increasing full Adults count");
self.capacity
.insert_full_adults(btree_set!(XorName::from(node_id)))
.await;
}
/// Removes a given node from the list of full nodes.
#[allow(unused)] // TODO: Remove node from full list at 50%?
async fn decrease_full_adults_count_if_present(&mut self, node_name: XorName) {
info!(
"No. of Full Nodes: {:?}",
self.capacity.full_adults_count().await
);
info!("Checking if {:?} is present as full_node", node_name);
self.capacity
.remove_full_adults(btree_set!(node_name))
.await;
}
async fn send_chunks_to_adults(
&mut self,
data: Blob,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
let target_holders = self.capacity.get_chunk_holder_adults(data.name()).await;
info!("Storing {} copies of the data", target_holders.len());
if CHUNK_COPY_COUNT > target_holders.len() {
return self
.send_error(
Error::NoAdults(self.capacity.our_prefix().await),
msg_id,
origin,
)
.await;
}
let blob_write = BlobWrite::New(data);
Ok(NodeDuty::SendToNodes {
targets: target_holders,
msg: NodeMsg::NodeCmd {
cmd: NodeCmd::Chunks {
cmd: blob_write,
client_signed,
origin,
},
id: msg_id,
},
aggregation: Aggregation::AtDestination,
})
}
async fn store(
&mut self,
data: Blob,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
if let Err(error) = validate_data_owner(&data, &client_signed.public_key) {
return self.send_error(error, msg_id, origin).await;
}
self.send_chunks_to_adults(data, msg_id, client_signed, origin)
.await
}
pub async fn record_adult_read_liveness(
&mut self,
correlation_id: MessageId,
response: QueryResponse,
src: XorName,
) -> Result<NodeDuties> {
if!matches!(response, QueryResponse::GetBlob(_)) {
return Err(Error::Logic(format!(
"Got {:?}, but only `GetBlob` query responses are supposed to exist in this flow.",
response
))); | correlation_id,
&src,
response.is_success(),
) {
// If a full adult responds with error. Drop the response
if!response.is_success() && self.capacity.is_full(&src).await {
// We've already responded already with a success
// so do nothing
} else {
duties.push(NodeDuty::Send(build_client_query_response(
response,
correlation_id,
end_user,
)));
}
}
let mut unresponsive_adults = Vec::new();
for (name, count) in self.adult_liveness.find_unresponsive_adults() {
warn!(
"Adult {} has {} pending ops. It might be unresponsive",
name, count
);
unresponsive_adults.push(name);
}
if!unresponsive_adults.is_empty() {
duties.push(NodeDuty::ProposeOffline(unresponsive_adults));
}
Ok(duties)
}
async fn send_error(
&self,
error: Error,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
let error = convert_to_error_message(error);
Ok(NodeDuty::Send(build_client_error_response(
CmdError::Data(error),
msg_id,
origin,
)))
}
async fn delete(
&mut self,
address: BlobAddress,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
let targets = self.capacity.get_chunk_holder_adults(address.name()).await;
let msg = NodeMsg::NodeCmd {
cmd: NodeCmd::Chunks {
cmd: BlobWrite::DeletePrivate(address),
client_signed,
origin,
},
id: msg_id,
};
Ok(NodeDuty::SendToNodes {
msg,
targets,
aggregation: Aggregation::AtDestination,
})
}
pub(super) async fn republish_chunk(&mut self, data: Blob) -> Result<NodeDuty> {
let owner = data.owner();
let target_holders = self.capacity.get_chunk_holder_adults(data.name()).await;
// deterministic msg id for aggregation
let msg_id = MessageId::from_content(&(*data.name(), owner, &target_holders))?;
info!(
"Republishing chunk {:?} to holders {:?} with MessageId {:?}",
data.address(),
&target_holders,
msg_id
);
Ok(NodeDuty::SendToNodes {
targets: target_holders,
msg: NodeMsg::NodeCmd {
cmd: NodeCmd::System(NodeSystemCmd::ReplicateChunk(data)),
id: msg_id,
},
aggregation: Aggregation::None,
})
}
pub(super) async fn read(
&mut self,
read: &BlobRead,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
match read {
BlobRead::Get(address) => self.get(*address, msg_id, origin).await,
}
}
async fn get(
&mut self,
address: BlobAddress,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
let targets = self.capacity.get_chunk_holder_adults(address.name()).await;
if targets.is_empty() {
return self
.send_error(
Error::NoAdults(self.capacity.our_prefix().await),
msg_id,
origin,
)
.await;
}
if self
.adult_liveness
.new_read(msg_id, address, origin, targets.clone())
{
let msg = NodeMsg::NodeQuery {
query: NodeQuery::Chunks {
query: BlobRead::Get(address),
origin,
},
id: msg_id,
};
Ok(NodeDuty::SendToNodes {
msg,
targets,
aggregation: Aggregation::None,
})
} else {
info!(
"Operation with MessageId {:?} is already in progress",
msg_id
);
Ok(NodeDuty::NoOp)
}
}
}
fn validate_data_owner(data: &Blob, requester: &PublicKey) -> Result<()> {
if data.is_private() {
data.owner()
.ok_or_else(|| Error::InvalidOwner(*requester))
.and_then(|data_owner| {
if data_owner!= requester {
Err(Error::InvalidOwner(*requester))
} else {
Ok(())
}
})
} else {
Ok(())
}
}
impl Display for BlobRecords {
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
write!(formatter, "BlobRecords")
}
} | }
let mut duties = vec![];
if let Some((_address, end_user)) = self.adult_liveness.record_adult_read_liveness( | random_line_split |
blob_records.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use crate::{
btree_set,
capacity::{Capacity, CHUNK_COPY_COUNT},
error::convert_to_error_message,
node_ops::{NodeDuties, NodeDuty},
Error, Result,
};
use log::{info, warn};
use sn_data_types::{Blob, BlobAddress, PublicKey};
use sn_messaging::{
client::{BlobDataExchange, BlobRead, BlobWrite, ClientSigned, CmdError, QueryResponse},
node::{NodeCmd, NodeMsg, NodeQuery, NodeSystemCmd},
Aggregation, EndUser, MessageId,
};
use sn_routing::Prefix;
use std::{
collections::BTreeSet,
fmt::{self, Display, Formatter},
};
use xor_name::XorName;
use super::{
adult_liveness::AdultLiveness, build_client_error_response, build_client_query_response,
};
/// Operations over the data type Blob.
pub(super) struct BlobRecords {
capacity: Capacity,
adult_liveness: AdultLiveness,
}
impl BlobRecords {
pub(super) fn new(capacity: Capacity) -> Self {
Self {
capacity,
adult_liveness: AdultLiveness::new(),
}
}
pub async fn get_data_of(&self, prefix: Prefix) -> BlobDataExchange {
// Prepare full_adult details
let full_adults = self.capacity.full_adults_matching(prefix).await;
BlobDataExchange { full_adults }
}
pub async fn update(&self, blob_data: BlobDataExchange) {
let BlobDataExchange { full_adults } = blob_data;
self.capacity.insert_full_adults(full_adults).await
}
/// Registered holders not present in provided list of members
/// will be removed from adult_storage_info and no longer tracked for liveness.
pub async fn retain_members_only(&mut self, members: BTreeSet<XorName>) -> Result<()> {
// full adults
self.capacity.retain_members_only(&members).await;
// stop tracking liveness of absent holders
self.adult_liveness.retain_members_only(members);
Ok(())
}
pub(super) async fn write(
&mut self,
write: BlobWrite,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
use BlobWrite::*;
match write {
New(data) => self.store(data, msg_id, client_signed, origin).await,
DeletePrivate(address) => self.delete(address, msg_id, client_signed, origin).await,
}
}
/// Adds a given node to the list of full nodes.
pub async fn | (&mut self, node_id: PublicKey) {
info!(
"No. of full Adults: {:?}",
self.capacity.full_adults_count().await
);
info!("Increasing full Adults count");
self.capacity
.insert_full_adults(btree_set!(XorName::from(node_id)))
.await;
}
/// Removes a given node from the list of full nodes.
#[allow(unused)] // TODO: Remove node from full list at 50%?
async fn decrease_full_adults_count_if_present(&mut self, node_name: XorName) {
info!(
"No. of Full Nodes: {:?}",
self.capacity.full_adults_count().await
);
info!("Checking if {:?} is present as full_node", node_name);
self.capacity
.remove_full_adults(btree_set!(node_name))
.await;
}
async fn send_chunks_to_adults(
&mut self,
data: Blob,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
let target_holders = self.capacity.get_chunk_holder_adults(data.name()).await;
info!("Storing {} copies of the data", target_holders.len());
if CHUNK_COPY_COUNT > target_holders.len() {
return self
.send_error(
Error::NoAdults(self.capacity.our_prefix().await),
msg_id,
origin,
)
.await;
}
let blob_write = BlobWrite::New(data);
Ok(NodeDuty::SendToNodes {
targets: target_holders,
msg: NodeMsg::NodeCmd {
cmd: NodeCmd::Chunks {
cmd: blob_write,
client_signed,
origin,
},
id: msg_id,
},
aggregation: Aggregation::AtDestination,
})
}
async fn store(
&mut self,
data: Blob,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
if let Err(error) = validate_data_owner(&data, &client_signed.public_key) {
return self.send_error(error, msg_id, origin).await;
}
self.send_chunks_to_adults(data, msg_id, client_signed, origin)
.await
}
pub async fn record_adult_read_liveness(
&mut self,
correlation_id: MessageId,
response: QueryResponse,
src: XorName,
) -> Result<NodeDuties> {
if!matches!(response, QueryResponse::GetBlob(_)) {
return Err(Error::Logic(format!(
"Got {:?}, but only `GetBlob` query responses are supposed to exist in this flow.",
response
)));
}
let mut duties = vec![];
if let Some((_address, end_user)) = self.adult_liveness.record_adult_read_liveness(
correlation_id,
&src,
response.is_success(),
) {
// If a full adult responds with error. Drop the response
if!response.is_success() && self.capacity.is_full(&src).await {
// We've already responded already with a success
// so do nothing
} else {
duties.push(NodeDuty::Send(build_client_query_response(
response,
correlation_id,
end_user,
)));
}
}
let mut unresponsive_adults = Vec::new();
for (name, count) in self.adult_liveness.find_unresponsive_adults() {
warn!(
"Adult {} has {} pending ops. It might be unresponsive",
name, count
);
unresponsive_adults.push(name);
}
if!unresponsive_adults.is_empty() {
duties.push(NodeDuty::ProposeOffline(unresponsive_adults));
}
Ok(duties)
}
async fn send_error(
&self,
error: Error,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
let error = convert_to_error_message(error);
Ok(NodeDuty::Send(build_client_error_response(
CmdError::Data(error),
msg_id,
origin,
)))
}
async fn delete(
&mut self,
address: BlobAddress,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
let targets = self.capacity.get_chunk_holder_adults(address.name()).await;
let msg = NodeMsg::NodeCmd {
cmd: NodeCmd::Chunks {
cmd: BlobWrite::DeletePrivate(address),
client_signed,
origin,
},
id: msg_id,
};
Ok(NodeDuty::SendToNodes {
msg,
targets,
aggregation: Aggregation::AtDestination,
})
}
pub(super) async fn republish_chunk(&mut self, data: Blob) -> Result<NodeDuty> {
let owner = data.owner();
let target_holders = self.capacity.get_chunk_holder_adults(data.name()).await;
// deterministic msg id for aggregation
let msg_id = MessageId::from_content(&(*data.name(), owner, &target_holders))?;
info!(
"Republishing chunk {:?} to holders {:?} with MessageId {:?}",
data.address(),
&target_holders,
msg_id
);
Ok(NodeDuty::SendToNodes {
targets: target_holders,
msg: NodeMsg::NodeCmd {
cmd: NodeCmd::System(NodeSystemCmd::ReplicateChunk(data)),
id: msg_id,
},
aggregation: Aggregation::None,
})
}
pub(super) async fn read(
&mut self,
read: &BlobRead,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
match read {
BlobRead::Get(address) => self.get(*address, msg_id, origin).await,
}
}
async fn get(
&mut self,
address: BlobAddress,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
let targets = self.capacity.get_chunk_holder_adults(address.name()).await;
if targets.is_empty() {
return self
.send_error(
Error::NoAdults(self.capacity.our_prefix().await),
msg_id,
origin,
)
.await;
}
if self
.adult_liveness
.new_read(msg_id, address, origin, targets.clone())
{
let msg = NodeMsg::NodeQuery {
query: NodeQuery::Chunks {
query: BlobRead::Get(address),
origin,
},
id: msg_id,
};
Ok(NodeDuty::SendToNodes {
msg,
targets,
aggregation: Aggregation::None,
})
} else {
info!(
"Operation with MessageId {:?} is already in progress",
msg_id
);
Ok(NodeDuty::NoOp)
}
}
}
fn validate_data_owner(data: &Blob, requester: &PublicKey) -> Result<()> {
if data.is_private() {
data.owner()
.ok_or_else(|| Error::InvalidOwner(*requester))
.and_then(|data_owner| {
if data_owner!= requester {
Err(Error::InvalidOwner(*requester))
} else {
Ok(())
}
})
} else {
Ok(())
}
}
impl Display for BlobRecords {
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
write!(formatter, "BlobRecords")
}
}
| increase_full_node_count | identifier_name |
blob_records.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use crate::{
btree_set,
capacity::{Capacity, CHUNK_COPY_COUNT},
error::convert_to_error_message,
node_ops::{NodeDuties, NodeDuty},
Error, Result,
};
use log::{info, warn};
use sn_data_types::{Blob, BlobAddress, PublicKey};
use sn_messaging::{
client::{BlobDataExchange, BlobRead, BlobWrite, ClientSigned, CmdError, QueryResponse},
node::{NodeCmd, NodeMsg, NodeQuery, NodeSystemCmd},
Aggregation, EndUser, MessageId,
};
use sn_routing::Prefix;
use std::{
collections::BTreeSet,
fmt::{self, Display, Formatter},
};
use xor_name::XorName;
use super::{
adult_liveness::AdultLiveness, build_client_error_response, build_client_query_response,
};
/// Operations over the data type Blob.
pub(super) struct BlobRecords {
capacity: Capacity,
adult_liveness: AdultLiveness,
}
impl BlobRecords {
pub(super) fn new(capacity: Capacity) -> Self {
Self {
capacity,
adult_liveness: AdultLiveness::new(),
}
}
pub async fn get_data_of(&self, prefix: Prefix) -> BlobDataExchange {
// Prepare full_adult details
let full_adults = self.capacity.full_adults_matching(prefix).await;
BlobDataExchange { full_adults }
}
pub async fn update(&self, blob_data: BlobDataExchange) {
let BlobDataExchange { full_adults } = blob_data;
self.capacity.insert_full_adults(full_adults).await
}
/// Registered holders not present in provided list of members
/// will be removed from adult_storage_info and no longer tracked for liveness.
pub async fn retain_members_only(&mut self, members: BTreeSet<XorName>) -> Result<()> {
// full adults
self.capacity.retain_members_only(&members).await;
// stop tracking liveness of absent holders
self.adult_liveness.retain_members_only(members);
Ok(())
}
pub(super) async fn write(
&mut self,
write: BlobWrite,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
use BlobWrite::*;
match write {
New(data) => self.store(data, msg_id, client_signed, origin).await,
DeletePrivate(address) => self.delete(address, msg_id, client_signed, origin).await,
}
}
/// Adds a given node to the list of full nodes.
pub async fn increase_full_node_count(&mut self, node_id: PublicKey) {
info!(
"No. of full Adults: {:?}",
self.capacity.full_adults_count().await
);
info!("Increasing full Adults count");
self.capacity
.insert_full_adults(btree_set!(XorName::from(node_id)))
.await;
}
/// Removes a given node from the list of full nodes.
#[allow(unused)] // TODO: Remove node from full list at 50%?
async fn decrease_full_adults_count_if_present(&mut self, node_name: XorName) {
info!(
"No. of Full Nodes: {:?}",
self.capacity.full_adults_count().await
);
info!("Checking if {:?} is present as full_node", node_name);
self.capacity
.remove_full_adults(btree_set!(node_name))
.await;
}
async fn send_chunks_to_adults(
&mut self,
data: Blob,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
let target_holders = self.capacity.get_chunk_holder_adults(data.name()).await;
info!("Storing {} copies of the data", target_holders.len());
if CHUNK_COPY_COUNT > target_holders.len() {
return self
.send_error(
Error::NoAdults(self.capacity.our_prefix().await),
msg_id,
origin,
)
.await;
}
let blob_write = BlobWrite::New(data);
Ok(NodeDuty::SendToNodes {
targets: target_holders,
msg: NodeMsg::NodeCmd {
cmd: NodeCmd::Chunks {
cmd: blob_write,
client_signed,
origin,
},
id: msg_id,
},
aggregation: Aggregation::AtDestination,
})
}
async fn store(
&mut self,
data: Blob,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> |
pub async fn record_adult_read_liveness(
&mut self,
correlation_id: MessageId,
response: QueryResponse,
src: XorName,
) -> Result<NodeDuties> {
if!matches!(response, QueryResponse::GetBlob(_)) {
return Err(Error::Logic(format!(
"Got {:?}, but only `GetBlob` query responses are supposed to exist in this flow.",
response
)));
}
let mut duties = vec![];
if let Some((_address, end_user)) = self.adult_liveness.record_adult_read_liveness(
correlation_id,
&src,
response.is_success(),
) {
// If a full adult responds with error. Drop the response
if!response.is_success() && self.capacity.is_full(&src).await {
// We've already responded already with a success
// so do nothing
} else {
duties.push(NodeDuty::Send(build_client_query_response(
response,
correlation_id,
end_user,
)));
}
}
let mut unresponsive_adults = Vec::new();
for (name, count) in self.adult_liveness.find_unresponsive_adults() {
warn!(
"Adult {} has {} pending ops. It might be unresponsive",
name, count
);
unresponsive_adults.push(name);
}
if!unresponsive_adults.is_empty() {
duties.push(NodeDuty::ProposeOffline(unresponsive_adults));
}
Ok(duties)
}
async fn send_error(
&self,
error: Error,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
let error = convert_to_error_message(error);
Ok(NodeDuty::Send(build_client_error_response(
CmdError::Data(error),
msg_id,
origin,
)))
}
async fn delete(
&mut self,
address: BlobAddress,
msg_id: MessageId,
client_signed: ClientSigned,
origin: EndUser,
) -> Result<NodeDuty> {
let targets = self.capacity.get_chunk_holder_adults(address.name()).await;
let msg = NodeMsg::NodeCmd {
cmd: NodeCmd::Chunks {
cmd: BlobWrite::DeletePrivate(address),
client_signed,
origin,
},
id: msg_id,
};
Ok(NodeDuty::SendToNodes {
msg,
targets,
aggregation: Aggregation::AtDestination,
})
}
pub(super) async fn republish_chunk(&mut self, data: Blob) -> Result<NodeDuty> {
let owner = data.owner();
let target_holders = self.capacity.get_chunk_holder_adults(data.name()).await;
// deterministic msg id for aggregation
let msg_id = MessageId::from_content(&(*data.name(), owner, &target_holders))?;
info!(
"Republishing chunk {:?} to holders {:?} with MessageId {:?}",
data.address(),
&target_holders,
msg_id
);
Ok(NodeDuty::SendToNodes {
targets: target_holders,
msg: NodeMsg::NodeCmd {
cmd: NodeCmd::System(NodeSystemCmd::ReplicateChunk(data)),
id: msg_id,
},
aggregation: Aggregation::None,
})
}
pub(super) async fn read(
&mut self,
read: &BlobRead,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
match read {
BlobRead::Get(address) => self.get(*address, msg_id, origin).await,
}
}
async fn get(
&mut self,
address: BlobAddress,
msg_id: MessageId,
origin: EndUser,
) -> Result<NodeDuty> {
let targets = self.capacity.get_chunk_holder_adults(address.name()).await;
if targets.is_empty() {
return self
.send_error(
Error::NoAdults(self.capacity.our_prefix().await),
msg_id,
origin,
)
.await;
}
if self
.adult_liveness
.new_read(msg_id, address, origin, targets.clone())
{
let msg = NodeMsg::NodeQuery {
query: NodeQuery::Chunks {
query: BlobRead::Get(address),
origin,
},
id: msg_id,
};
Ok(NodeDuty::SendToNodes {
msg,
targets,
aggregation: Aggregation::None,
})
} else {
info!(
"Operation with MessageId {:?} is already in progress",
msg_id
);
Ok(NodeDuty::NoOp)
}
}
}
fn validate_data_owner(data: &Blob, requester: &PublicKey) -> Result<()> {
if data.is_private() {
data.owner()
.ok_or_else(|| Error::InvalidOwner(*requester))
.and_then(|data_owner| {
if data_owner!= requester {
Err(Error::InvalidOwner(*requester))
} else {
Ok(())
}
})
} else {
Ok(())
}
}
impl Display for BlobRecords {
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
write!(formatter, "BlobRecords")
}
}
| {
if let Err(error) = validate_data_owner(&data, &client_signed.public_key) {
return self.send_error(error, msg_id, origin).await;
}
self.send_chunks_to_adults(data, msg_id, client_signed, origin)
.await
} | identifier_body |
fmt.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Handles nested padding in `Debug::fmt` output.
//!
//! `PadAdapter` is taken from rustc:
//! https://github.com/rust-lang/rust/commit/e3656bd81baa3c2cb5065da04f9debf378f99772
use std::fmt;
struct PadAdapter<'a, 'b: 'a> {
fmt: &'a mut fmt::Formatter<'b>,
on_newline: bool,
}
impl<'a, 'b: 'a> PadAdapter<'a, 'b> {
fn new(fmt: &'a mut fmt::Formatter<'b>) -> PadAdapter<'a, 'b> {
PadAdapter {
fmt,
on_newline: false,
}
}
}
impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> {
fn write_str(&mut self, mut s: &str) -> fmt::Result {
while!s.is_empty() {
if self.on_newline {
self.fmt.write_str(" ")?;
}
let split = match s.find('\n') {
Some(pos) => {
self.on_newline = true;
pos + 1
}
None => |
};
self.fmt.write_str(&s[..split])?;
s = &s[split..];
}
Ok(())
}
}
/// Write Debug output with alternative form considered.
pub fn write_debug(formatter: &mut fmt::Formatter, value: impl fmt::Debug) -> fmt::Result {
if formatter.alternate() {
let mut writer = PadAdapter::new(formatter);
fmt::write(&mut writer, format_args!("\n{:#?}", value))
} else {
write!(formatter, " {:?}", value)
}
}
| {
self.on_newline = false;
s.len()
} | conditional_block |
fmt.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Handles nested padding in `Debug::fmt` output.
//!
//! `PadAdapter` is taken from rustc:
//! https://github.com/rust-lang/rust/commit/e3656bd81baa3c2cb5065da04f9debf378f99772
use std::fmt;
struct PadAdapter<'a, 'b: 'a> {
fmt: &'a mut fmt::Formatter<'b>,
on_newline: bool,
}
impl<'a, 'b: 'a> PadAdapter<'a, 'b> { | fn new(fmt: &'a mut fmt::Formatter<'b>) -> PadAdapter<'a, 'b> {
PadAdapter {
fmt,
on_newline: false,
}
}
}
impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> {
fn write_str(&mut self, mut s: &str) -> fmt::Result {
while!s.is_empty() {
if self.on_newline {
self.fmt.write_str(" ")?;
}
let split = match s.find('\n') {
Some(pos) => {
self.on_newline = true;
pos + 1
}
None => {
self.on_newline = false;
s.len()
}
};
self.fmt.write_str(&s[..split])?;
s = &s[split..];
}
Ok(())
}
}
/// Write Debug output with alternative form considered.
pub fn write_debug(formatter: &mut fmt::Formatter, value: impl fmt::Debug) -> fmt::Result {
if formatter.alternate() {
let mut writer = PadAdapter::new(formatter);
fmt::write(&mut writer, format_args!("\n{:#?}", value))
} else {
write!(formatter, " {:?}", value)
}
} | random_line_split |
|
fmt.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Handles nested padding in `Debug::fmt` output.
//!
//! `PadAdapter` is taken from rustc:
//! https://github.com/rust-lang/rust/commit/e3656bd81baa3c2cb5065da04f9debf378f99772
use std::fmt;
struct PadAdapter<'a, 'b: 'a> {
fmt: &'a mut fmt::Formatter<'b>,
on_newline: bool,
}
impl<'a, 'b: 'a> PadAdapter<'a, 'b> {
fn new(fmt: &'a mut fmt::Formatter<'b>) -> PadAdapter<'a, 'b> {
PadAdapter {
fmt,
on_newline: false,
}
}
}
impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> {
fn write_str(&mut self, mut s: &str) -> fmt::Result | Ok(())
}
}
/// Write Debug output with alternative form considered.
pub fn write_debug(formatter: &mut fmt::Formatter, value: impl fmt::Debug) -> fmt::Result {
if formatter.alternate() {
let mut writer = PadAdapter::new(formatter);
fmt::write(&mut writer, format_args!("\n{:#?}", value))
} else {
write!(formatter, " {:?}", value)
}
}
| {
while !s.is_empty() {
if self.on_newline {
self.fmt.write_str(" ")?;
}
let split = match s.find('\n') {
Some(pos) => {
self.on_newline = true;
pos + 1
}
None => {
self.on_newline = false;
s.len()
}
};
self.fmt.write_str(&s[..split])?;
s = &s[split..];
}
| identifier_body |
fmt.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Handles nested padding in `Debug::fmt` output.
//!
//! `PadAdapter` is taken from rustc:
//! https://github.com/rust-lang/rust/commit/e3656bd81baa3c2cb5065da04f9debf378f99772
use std::fmt;
struct | <'a, 'b: 'a> {
fmt: &'a mut fmt::Formatter<'b>,
on_newline: bool,
}
impl<'a, 'b: 'a> PadAdapter<'a, 'b> {
fn new(fmt: &'a mut fmt::Formatter<'b>) -> PadAdapter<'a, 'b> {
PadAdapter {
fmt,
on_newline: false,
}
}
}
impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> {
fn write_str(&mut self, mut s: &str) -> fmt::Result {
while!s.is_empty() {
if self.on_newline {
self.fmt.write_str(" ")?;
}
let split = match s.find('\n') {
Some(pos) => {
self.on_newline = true;
pos + 1
}
None => {
self.on_newline = false;
s.len()
}
};
self.fmt.write_str(&s[..split])?;
s = &s[split..];
}
Ok(())
}
}
/// Write Debug output with alternative form considered.
pub fn write_debug(formatter: &mut fmt::Formatter, value: impl fmt::Debug) -> fmt::Result {
if formatter.alternate() {
let mut writer = PadAdapter::new(formatter);
fmt::write(&mut writer, format_args!("\n{:#?}", value))
} else {
write!(formatter, " {:?}", value)
}
}
| PadAdapter | identifier_name |
deque.rs | }
/// When stealing some data, this is an enumeration of the possible outcomes.
#[deriving(Eq)]
pub enum Stolen<T> {
/// The deque was empty at the time of stealing
Empty,
/// The stealer lost the race for stealing data, and a retry may return more
/// data.
Abort,
/// The stealer has successfully stolen some data.
Data(T),
}
/// The allocation pool for buffers used by work-stealing deques. Right now this
/// structure is used for reclamation of memory after it is no longer in use by
/// deques.
///
/// This data structure is protected by a mutex, but it is rarely used. Deques
/// will only use this structure when allocating a new buffer or deallocating a
/// previous one.
pub struct BufferPool<T> {
priv pool: Exclusive<~[~Buffer<T>]>,
}
/// An internal buffer used by the chase-lev deque. This structure is actually
/// implemented as a circular buffer, and is used as the intermediate storage of
/// the data in the deque.
///
/// This type is implemented with *T instead of ~[T] for two reasons:
///
/// 1. There is nothing safe about using this buffer. This easily allows the
/// same value to be read twice in to rust, and there is nothing to
/// prevent this. The usage by the deque must ensure that one of the
/// values is forgotten. Furthermore, we only ever want to manually run
/// destructors for values in this buffer (on drop) because the bounds
/// are defined by the deque it's owned by.
///
/// 2. We can certainly avoid bounds checks using *T instead of ~[T], although
/// LLVM is probably pretty good at doing this already.
struct Buffer<T> {
storage: *T,
log_size: int,
}
impl<T: Send> BufferPool<T> {
/// Allocates a new buffer pool which in turn can be used to allocate new
/// deques.
pub fn new() -> BufferPool<T> {
BufferPool { pool: Exclusive::new(~[]) }
}
/// Allocates a new work-stealing deque which will send/receiving memory to
/// and from this buffer pool.
pub fn deque(&mut self) -> (Worker<T>, Stealer<T>) {
let (a, b) = UnsafeArc::new2(Deque::new(self.clone()));
(Worker { deque: a }, Stealer { deque: b })
}
fn alloc(&mut self, bits: int) -> ~Buffer<T> {
unsafe {
self.pool.with(|pool| {
match pool.iter().position(|x| x.size() >= (1 << bits)) {
Some(i) => pool.remove(i),
None => ~Buffer::new(bits)
}
})
}
}
fn free(&mut self, buf: ~Buffer<T>) {
unsafe {
let mut buf = Some(buf);
self.pool.with(|pool| {
let buf = buf.take_unwrap();
match pool.iter().position(|v| v.size() > buf.size()) {
Some(i) => pool.insert(i, buf),
None => pool.push(buf),
}
})
}
}
}
impl<T: Send> Clone for BufferPool<T> {
fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
}
impl<T: Send> Worker<T> {
/// Pushes data onto the front of this work queue.
pub fn push(&mut self, t: T) {
unsafe { (*self.deque.get()).push(t) }
}
/// Pops data off the front of the work queue, returning `None` on an empty
/// queue.
pub fn pop(&mut self) -> Option<T> {
unsafe { (*self.deque.get()).pop() }
}
/// Gets access to the buffer pool that this worker is attached to. This can
/// be used to create more deques which share the same buffer pool as this
/// deque.
pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
unsafe { &mut (*self.deque.get()).pool }
}
}
impl<T: Send> Stealer<T> {
/// Steals work off the end of the queue (opposite of the worker's end)
pub fn steal(&mut self) -> Stolen<T> {
unsafe { (*self.deque.get()).steal() }
}
/// Gets access to the buffer pool that this stealer is attached to. This
/// can be used to create more deques which share the same buffer pool as
/// this deque.
pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
unsafe { &mut (*self.deque.get()).pool }
}
}
impl<T: Send> Clone for Stealer<T> {
fn clone(&self) -> Stealer<T> { Stealer { deque: self.deque.clone() } }
}
// Almost all of this code can be found directly in the paper so I'm not
// personally going to heavily comment what's going on here.
impl<T: Send> Deque<T> {
fn new(mut pool: BufferPool<T>) -> Deque<T> {
let buf = pool.alloc(MIN_BITS);
Deque {
bottom: AtomicInt::new(0),
top: AtomicInt::new(0),
array: AtomicPtr::new(unsafe { cast::transmute(buf) }),
pool: pool,
}
}
unsafe fn push(&mut self, data: T) {
let mut b = self.bottom.load(SeqCst);
let t = self.top.load(SeqCst);
let mut a = self.array.load(SeqCst);
let size = b - t;
if size >= (*a).size() - 1 {
// You won't find this code in the chase-lev deque paper. This is
// alluded to in a small footnote, however. We always free a buffer
// when growing in order to prevent leaks.
a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
b = self.bottom.load(SeqCst);
}
(*a).put(b, data);
self.bottom.store(b + 1, SeqCst);
}
unsafe fn pop(&mut self) -> Option<T> {
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let b = b - 1;
self.bottom.store(b, SeqCst);
let t = self.top.load(SeqCst);
let size = b - t;
if size < 0 {
self.bottom.store(t, SeqCst);
return None;
}
let data = (*a).get(b);
if size > 0 {
self.maybe_shrink(b, t);
return Some(data);
}
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
self.bottom.store(t + 1, SeqCst);
return Some(data);
} else {
self.bottom.store(t + 1, SeqCst);
cast::forget(data); // someone else stole this value
return None;
}
}
unsafe fn steal(&mut self) -> Stolen<T> {
let t = self.top.load(SeqCst);
let old = self.array.load(SeqCst);
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let size = b - t;
if size <= 0 { return Empty }
if size % (*a).size() == 0 {
if a == old && t == self.top.load(SeqCst) {
return Empty
}
return Abort
}
let data = (*a).get(t);
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
Data(data)
} else {
cast::forget(data); // someone else stole this value
Abort
}
}
unsafe fn maybe_shrink(&mut self, b: int, t: int) {
let a = self.array.load(SeqCst);
if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
self.swap_buffer(b, a, (*a).resize(b, t, -1));
}
}
// Helper routine not mentioned in the paper which is used in growing and
// shrinking buffers to swap in a new buffer into place. As a bit of a
// recap, the whole point that we need a buffer pool rather than just
// calling malloc/free directly is that stealers can continue using buffers
// after this method has called 'free' on it. The continued usage is simply
// a read followed by a forget, but we must make sure that the memory can
// continue to be read after we flag this buffer for reclamation.
unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>,
buf: Buffer<T>) -> *mut Buffer<T> {
let newbuf: *mut Buffer<T> = cast::transmute(~buf);
self.array.store(newbuf, SeqCst);
let ss = (*newbuf).size();
self.bottom.store(b + ss, SeqCst);
let t = self.top.load(SeqCst);
if self.top.compare_and_swap(t, t + ss, SeqCst)!= t {
self.bottom.store(b, SeqCst);
}
self.pool.free(cast::transmute(old));
return newbuf;
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Deque<T> {
fn drop(&mut self) {
let t = self.top.load(SeqCst);
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
// Free whatever is leftover in the dequeue, and then move the buffer
// back into the pool.
for i in range(t, b) {
let _: T = unsafe { (*a).get(i) };
}
self.pool.free(unsafe { cast::transmute(a) });
}
}
impl<T: Send> Buffer<T> {
unsafe fn new(log_size: int) -> Buffer<T> {
let size = (1 << log_size) * mem::size_of::<T>();
let buffer = libc::malloc(size as libc::size_t);
assert!(!buffer.is_null());
Buffer {
storage: buffer as *T,
log_size: log_size,
}
}
fn size(&self) -> int { 1 << self.log_size }
// Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
fn mask(&self) -> int { (1 << self.log_size) - 1 }
// This does not protect against loading duplicate values of the same cell,
// nor does this clear out the contents contained within. Hence, this is a
// very unsafe method which the caller needs to treat specially in case a
// race is lost.
unsafe fn get(&self, i: int) -> T {
ptr::read_ptr(self.storage.offset(i & self.mask()))
}
// Unsafe because this unsafely overwrites possibly uninitialized or
// initialized data.
unsafe fn put(&mut self, i: int, t: T) {
let ptr = self.storage.offset(i & self.mask());
ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1);
cast::forget(t);
}
// Again, unsafe because this has incredibly dubious ownership violations.
// It is assumed that this buffer is immediately dropped.
unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
let mut buf = Buffer::new(self.log_size + delta);
for i in range(t, b) {
buf.put(i, self.get(i));
}
return buf;
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Buffer<T> {
fn drop(&mut self) {
// It is assumed that all buffers are empty on drop.
unsafe { libc::free(self.storage as *libc::c_void) }
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
use cast;
use rt::thread::Thread;
use rand;
use rand::Rng;
use unstable::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
AtomicUint, INIT_ATOMIC_UINT};
use vec;
#[test]
fn smoke() {
let mut pool = BufferPool::new();
let (mut w, mut s) = pool.deque();
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(1);
assert_eq!(w.pop(), Some(1));
w.push(1);
assert_eq!(s.steal(), Data(1));
w.push(1);
assert_eq!(s.clone().steal(), Data(1));
}
#[test]
fn stealpush() {
static AMT: int = 100000;
let mut pool = BufferPool::<int>::new();
let (mut w, s) = pool.deque();
let t = do Thread::start {
let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
Data(i) => {
assert_eq!(i, 1);
left -= 1;
}
Abort | Empty => {}
}
}
};
for _ in range(0, AMT) {
w.push(1);
}
t.join();
}
#[test]
fn stealpush_large() {
static AMT: int = 100000;
let mut pool = BufferPool::<(int, int)>::new();
let (mut w, s) = pool.deque();
let t = do Thread::start {
let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
Data((1, 10)) => { left -= 1; }
Data(..) => fail!(),
Abort | Empty => {}
}
}
};
for _ in range(0, AMT) {
w.push((1, 10));
}
t.join();
}
fn stampede(mut w: Worker<~int>, s: Stealer<~int>,
nthreads: int, amt: uint) {
for _ in range(0, amt) {
w.push(~20);
}
let mut remaining = AtomicUint::new(amt);
let unsafe_remaining: *mut AtomicUint = &mut remaining;
let threads = range(0, nthreads).map(|_| {
let s = s.clone();
do Thread::start {
unsafe {
let mut s = s;
while (*unsafe_remaining).load(SeqCst) > 0 {
match s.steal() {
Data(~20) => {
(*unsafe_remaining).fetch_sub(1, SeqCst);
}
Data(..) => fail!(),
Abort | Empty => {}
}
}
}
}
}).to_owned_vec();
while remaining.load(SeqCst) > 0 {
match w.pop() {
Some(~20) => { remaining.fetch_sub(1, SeqCst); }
Some(..) => fail!(),
None => {}
}
}
for thread in threads.move_iter() {
thread.join();
}
}
#[test]
fn run_stampede() {
let mut pool = BufferPool::<~int>::new();
let (w, s) = pool.deque();
stampede(w, s, 8, 10000);
}
#[test]
fn many_stampede() {
static AMT: uint = 4;
let mut pool = BufferPool::<~int>::new();
let threads = range(0, AMT).map(|_| {
let (w, s) = pool.deque();
do Thread::start {
stampede(w, s, 4, 10000);
}
}).to_owned_vec();
for thread in threads.move_iter() {
thread.join();
}
}
#[test]
fn stress() {
static AMT: int = 100000;
static NTHREADS: int = 8;
static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
static mut HITS: AtomicUint = INIT_ATOMIC_UINT;
let mut pool = BufferPool::<int>::new();
let (mut w, s) = pool.deque();
let threads = range(0, NTHREADS).map(|_| {
let s = s.clone();
do Thread::start {
unsafe {
let mut s = s;
loop {
match s.steal() {
Data(2) => { HITS.fetch_add(1, SeqCst); }
Data(..) => fail!(),
_ if DONE.load(SeqCst) => break,
_ => {}
}
}
}
}
}).to_owned_vec();
let mut rng = rand::task_rng();
let mut expected = 0;
while expected < AMT {
if rng.gen_range(0, 3) == 2 {
match w.pop() {
None => {}
Some(2) => unsafe { HITS.fetch_add(1, SeqCst); },
Some(_) => fail!(),
}
} else {
expected += 1;
w.push(2);
}
}
unsafe {
while HITS.load(SeqCst) < AMT as uint {
match w.pop() {
None => {}
Some(2) => { HITS.fetch_add(1, SeqCst); },
Some(_) => fail!(),
}
}
DONE.store(true, SeqCst);
}
for thread in threads.move_iter() {
thread.join();
}
assert_eq!(unsafe { HITS.load(SeqCst) }, expected as uint);
}
#[test]
#[ignore(cfg(windows))] // apparently windows scheduling is weird?
fn no_starvation() | {
static AMT: int = 10000;
static NTHREADS: int = 4;
static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
let mut pool = BufferPool::<(int, uint)>::new();
let (mut w, s) = pool.deque();
let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| {
let s = s.clone();
let box = ~AtomicUint::new(0);
let thread_box = unsafe {
*cast::transmute::<&~AtomicUint, **mut AtomicUint>(&box)
};
(do Thread::start {
unsafe {
let mut s = s;
loop {
match s.steal() {
Data((1, 2)) => {
(*thread_box).fetch_add(1, SeqCst); | identifier_body |
|
deque.rs |
// XXX: all atomic operations in this module use a SeqCst ordering. That is
// probably overkill
use cast;
use clone::Clone;
use iter::range;
use kinds::Send;
use libc;
use mem;
use ops::Drop;
use option::{Option, Some, None};
use ptr;
use unstable::atomics::{AtomicInt, AtomicPtr, SeqCst};
use unstable::sync::{UnsafeArc, Exclusive};
// Once the queue is less than 1/K full, then it will be downsized. Note that
// the deque requires that this number be less than 2.
static K: int = 4;
// Minimum number of bits that a buffer size should be. No buffer will resize to
// under this value, and all deques will initially contain a buffer of this
// size.
//
// The size in question is 1 << MIN_BITS
static MIN_BITS: int = 7;
struct Deque<T> {
bottom: AtomicInt,
top: AtomicInt,
array: AtomicPtr<Buffer<T>>,
pool: BufferPool<T>,
}
/// Worker half of the work-stealing deque. This worker has exclusive access to
/// one side of the deque, and uses `push` and `pop` method to manipulate it.
///
/// There may only be one worker per deque.
pub struct Worker<T> {
priv deque: UnsafeArc<Deque<T>>,
}
/// The stealing half of the work-stealing deque. Stealers have access to the
/// opposite end of the deque from the worker, and they only have access to the
/// `steal` method.
pub struct Stealer<T> {
priv deque: UnsafeArc<Deque<T>>,
}
/// When stealing some data, this is an enumeration of the possible outcomes.
#[deriving(Eq)]
pub enum Stolen<T> {
/// The deque was empty at the time of stealing
Empty,
/// The stealer lost the race for stealing data, and a retry may return more
/// data.
Abort,
/// The stealer has successfully stolen some data.
Data(T),
}
/// The allocation pool for buffers used by work-stealing deques. Right now this
/// structure is used for reclamation of memory after it is no longer in use by
/// deques.
///
/// This data structure is protected by a mutex, but it is rarely used. Deques
/// will only use this structure when allocating a new buffer or deallocating a
/// previous one.
pub struct BufferPool<T> {
priv pool: Exclusive<~[~Buffer<T>]>,
}
/// An internal buffer used by the chase-lev deque. This structure is actually
/// implemented as a circular buffer, and is used as the intermediate storage of
/// the data in the deque.
///
/// This type is implemented with *T instead of ~[T] for two reasons:
///
/// 1. There is nothing safe about using this buffer. This easily allows the
/// same value to be read twice in to rust, and there is nothing to
/// prevent this. The usage by the deque must ensure that one of the
/// values is forgotten. Furthermore, we only ever want to manually run
/// destructors for values in this buffer (on drop) because the bounds
/// are defined by the deque it's owned by.
///
/// 2. We can certainly avoid bounds checks using *T instead of ~[T], although
/// LLVM is probably pretty good at doing this already.
struct Buffer<T> {
storage: *T,
log_size: int,
}
impl<T: Send> BufferPool<T> {
/// Allocates a new buffer pool which in turn can be used to allocate new
/// deques.
pub fn new() -> BufferPool<T> {
BufferPool { pool: Exclusive::new(~[]) }
}
/// Allocates a new work-stealing deque which will send/receiving memory to
/// and from this buffer pool.
pub fn deque(&mut self) -> (Worker<T>, Stealer<T>) {
let (a, b) = UnsafeArc::new2(Deque::new(self.clone()));
(Worker { deque: a }, Stealer { deque: b })
}
fn alloc(&mut self, bits: int) -> ~Buffer<T> {
unsafe {
self.pool.with(|pool| {
match pool.iter().position(|x| x.size() >= (1 << bits)) {
Some(i) => pool.remove(i),
None => ~Buffer::new(bits)
}
})
}
}
fn free(&mut self, buf: ~Buffer<T>) {
unsafe {
let mut buf = Some(buf);
self.pool.with(|pool| {
let buf = buf.take_unwrap();
match pool.iter().position(|v| v.size() > buf.size()) {
Some(i) => pool.insert(i, buf),
None => pool.push(buf),
}
})
}
}
}
impl<T: Send> Clone for BufferPool<T> {
fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
}
impl<T: Send> Worker<T> {
/// Pushes data onto the front of this work queue.
pub fn push(&mut self, t: T) {
unsafe { (*self.deque.get()).push(t) }
}
/// Pops data off the front of the work queue, returning `None` on an empty
/// queue.
pub fn pop(&mut self) -> Option<T> {
unsafe { (*self.deque.get()).pop() }
}
/// Gets access to the buffer pool that this worker is attached to. This can
/// be used to create more deques which share the same buffer pool as this
/// deque.
pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
unsafe { &mut (*self.deque.get()).pool }
}
}
impl<T: Send> Stealer<T> {
/// Steals work off the end of the queue (opposite of the worker's end)
pub fn steal(&mut self) -> Stolen<T> {
unsafe { (*self.deque.get()).steal() }
}
/// Gets access to the buffer pool that this stealer is attached to. This
/// can be used to create more deques which share the same buffer pool as
/// this deque.
pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
unsafe { &mut (*self.deque.get()).pool }
}
}
impl<T: Send> Clone for Stealer<T> {
fn clone(&self) -> Stealer<T> { Stealer { deque: self.deque.clone() } }
}
// Almost all of this code can be found directly in the paper so I'm not
// personally going to heavily comment what's going on here.
impl<T: Send> Deque<T> {
fn new(mut pool: BufferPool<T>) -> Deque<T> {
let buf = pool.alloc(MIN_BITS);
Deque {
bottom: AtomicInt::new(0),
top: AtomicInt::new(0),
array: AtomicPtr::new(unsafe { cast::transmute(buf) }),
pool: pool,
}
}
unsafe fn push(&mut self, data: T) {
let mut b = self.bottom.load(SeqCst);
let t = self.top.load(SeqCst);
let mut a = self.array.load(SeqCst);
let size = b - t;
if size >= (*a).size() - 1 {
// You won't find this code in the chase-lev deque paper. This is
// alluded to in a small footnote, however. We always free a buffer
// when growing in order to prevent leaks.
a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
b = self.bottom.load(SeqCst);
}
(*a).put(b, data);
self.bottom.store(b + 1, SeqCst);
}
unsafe fn pop(&mut self) -> Option<T> {
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let b = b - 1;
self.bottom.store(b, SeqCst);
let t = self.top.load(SeqCst);
let size = b - t;
if size < 0 {
self.bottom.store(t, SeqCst);
return None;
}
let data = (*a).get(b);
if size > 0 {
self.maybe_shrink(b, t);
return Some(data);
}
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
self.bottom.store(t + 1, SeqCst);
return Some(data);
} else {
self.bottom.store(t + 1, SeqCst);
cast::forget(data); // someone else stole this value
return None;
}
}
unsafe fn steal(&mut self) -> Stolen<T> {
let t = self.top.load(SeqCst);
let old = self.array.load(SeqCst);
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let size = b - t;
if size <= 0 { return Empty }
if size % (*a).size() == 0 {
if a == old && t == self.top.load(SeqCst) {
return Empty
}
return Abort
}
let data = (*a).get(t);
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
Data(data)
} else {
cast::forget(data); // someone else stole this value
Abort
}
}
unsafe fn maybe_shrink(&mut self, b: int, t: int) {
let a = self.array.load(SeqCst);
if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
self.swap_buffer(b, a, (*a).resize(b, t, -1));
}
}
// Helper routine not mentioned in the paper which is used in growing and
// shrinking buffers to swap in a new buffer into place. As a bit of a
// recap, the whole point that we need a buffer pool rather than just
// calling malloc/free directly is that stealers can continue using buffers
// after this method has called 'free' on it. The continued usage is simply
// a read followed by a forget, but we must make sure that the memory can
// continue to be read after we flag this buffer for reclamation.
unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>,
buf: Buffer<T>) -> *mut Buffer<T> {
let newbuf: *mut Buffer<T> = cast::transmute(~buf);
self.array.store(newbuf, SeqCst);
let ss = (*newbuf).size();
self.bottom.store(b + ss, SeqCst);
let t = self.top.load(SeqCst);
if self.top.compare_and_swap(t, t + ss, SeqCst)!= t {
self.bottom.store(b, SeqCst);
}
self.pool.free(cast::transmute(old));
return newbuf;
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Deque<T> {
fn drop(&mut self) {
let t = self.top.load(SeqCst);
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
// Free whatever is leftover in the dequeue, and then move the buffer
// back into the pool.
for i in range(t, b) {
let _: T = unsafe { (*a).get(i) };
}
self.pool.free(unsafe { cast::transmute(a) });
}
}
impl<T: Send> Buffer<T> {
unsafe fn new(log_size: int) -> Buffer<T> {
let size = (1 << log_size) * mem::size_of::<T>();
let buffer = libc::malloc(size as libc::size_t);
assert!(!buffer.is_null());
Buffer {
storage: buffer as *T,
log_size: log_size,
}
}
fn size(&self) -> int { 1 << self.log_size }
// Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
fn mask(&self) -> int { (1 << self.log_size) - 1 }
// This does not protect against loading duplicate values of the same cell,
// nor does this clear out the contents contained within. Hence, this is a
// very unsafe method which the caller needs to treat specially in case a
// race is lost.
unsafe fn get(&self, i: int) -> T {
ptr::read_ptr(self.storage.offset(i & self.mask()))
}
// Unsafe because this unsafely overwrites possibly uninitialized or
// initialized data.
unsafe fn put(&mut self, i: int, t: T) {
let ptr = self.storage.offset(i & self.mask());
ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1);
cast::forget(t);
}
// Again, unsafe because this has incredibly dubious ownership violations.
// It is assumed that this buffer is immediately dropped.
unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
let mut buf = Buffer::new(self.log_size + delta);
for i in range(t, b) {
buf.put(i, self.get(i));
}
return buf;
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Buffer<T> {
fn drop(&mut self) {
// It is assumed that all buffers are empty on drop.
unsafe { libc::free(self.storage as *libc::c_void) }
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
use cast;
use rt::thread::Thread;
use rand;
use rand::Rng;
use unstable::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
AtomicUint, INIT_ATOMIC_UINT};
use vec;
#[test]
fn smoke() {
let mut pool = BufferPool::new();
let (mut w, mut s) = pool.deque();
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(1);
assert_eq!(w.pop(), Some(1));
w.push(1);
assert_eq!(s.steal(), Data(1));
w.push(1);
assert_eq!(s.clone().steal(), Data(1));
}
#[test]
fn stealpush() {
static AMT: int = 100000;
let mut pool = BufferPool::<int>::new();
let (mut w, s) = pool.deque();
let t = do Thread::start {
let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
Data(i) => {
assert_eq!(i, 1);
left -= 1;
}
Abort | Empty => {}
}
}
};
for _ in range(0, AMT) {
w.push(1);
}
t.join();
}
#[test]
fn stealpush_large() {
static AMT: int = 100000;
let mut pool = BufferPool::<(int, int)>::new();
let (mut w, s) = pool.deque();
let t = do Thread::start {
let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
Data((1, 10)) => { left -= 1; }
Data(..) => fail!(),
Abort | Empty => {}
}
}
};
for _ in range(0, AMT) {
w.push((1, 10));
}
t.join();
}
fn stampede(mut w: Worker<~int>, s: Stealer<~int>,
nthreads: int, amt: uint) {
for _ in range(0, amt) {
w.push(~20);
}
let mut remaining = AtomicUint::new(amt);
let unsafe_remaining: *mut AtomicUint = &mut remaining;
let threads = range(0, nthreads).map(|_| {
let s = s.clone();
do Thread::start {
unsafe {
let mut s = s;
while (*unsafe_remaining).load(SeqCst) > 0 {
match s.steal() {
Data(~20) => {
(*unsafe_remaining).fetch_sub(1, SeqCst);
}
Data(..) => fail!(),
Abort | Empty => {}
}
}
}
}
}).to_owned_vec();
while remaining.load(SeqCst) > 0 {
match w.pop() {
Some(~20) => { remaining.fetch_sub(1, SeqCst); }
Some(..) => fail!(),
None => {}
}
}
for thread in threads.move_iter() {
thread.join();
}
}
#[test]
fn run_stampede() {
let mut pool = BufferPool::<~int>::new();
let (w, s) = pool.deque();
stampede(w, s, 8, 10000);
}
#[test]
fn many_stampede() {
static AMT: uint = 4;
let mut pool = BufferPool::<~int>::new();
let threads = range(0, AMT).map(|_| {
let (w, s) = pool.deque();
do Thread::start {
stampede(w, s, 4, 10000);
}
}).to_owned_vec();
for thread in threads.move_iter() {
thread.join();
}
}
#[test]
fn stress() {
static AMT: int = 100000;
static NTHREADS: int = 8;
static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
static mut HITS: AtomicUint = INIT_ATOMIC_UINT;
let mut pool = BufferPool::<int>::new();
let (mut w, s) = pool.deque();
let threads = range(0, NTHREADS).map(|_| {
let s = s.clone();
do Thread::start {
unsafe {
let mut s = s;
loop {
match s.steal() {
Data(2) => { HITS.fetch_add(1, SeqCst); }
Data(..) => fail!(),
_ if DONE.load(SeqCst) => break,
_ => {}
}
}
}
}
}).to_owned_vec();
let mut rng = rand::task_rng();
let mut expected = 0;
while expected < AMT {
if rng.gen_range(0, 3) == 2 {
match w.pop() {
None => {}
Some(2) => unsafe { HITS.fetch_add(1, SeqCst); },
Some(_) => fail!(),
}
} else {
expected += 1;
w.push(2);
}
}
unsafe {
while HITS.load(SeqCst) < AMT as uint {
match w.pop() {
None => {}
Some(2) => { HITS.fetch_add(1, SeqCst); },
Some(_) => fail!(),
}
}
DONE.store(true, SeqCst);
}
for thread in threads.move_iter() {
thread.join();
}
assert_eq!(unsafe { HITS.load(SeqCst) }, expected as uint);
}
#[test]
#[ignore( | // NB: the "buffer pool" strategy is not done for speed, but rather for
// correctness. For more info, see the comment on `swap_buffer` | random_line_split |
|
deque.rs | is
// probably overkill
use cast;
use clone::Clone;
use iter::range;
use kinds::Send;
use libc;
use mem;
use ops::Drop;
use option::{Option, Some, None};
use ptr;
use unstable::atomics::{AtomicInt, AtomicPtr, SeqCst};
use unstable::sync::{UnsafeArc, Exclusive};
// Once the queue is less than 1/K full, then it will be downsized. Note that
// the deque requires that this number be less than 2.
static K: int = 4;
// Minimum number of bits that a buffer size should be. No buffer will resize to
// under this value, and all deques will initially contain a buffer of this
// size.
//
// The size in question is 1 << MIN_BITS
static MIN_BITS: int = 7;
struct Deque<T> {
bottom: AtomicInt,
top: AtomicInt,
array: AtomicPtr<Buffer<T>>,
pool: BufferPool<T>,
}
/// Worker half of the work-stealing deque. This worker has exclusive access to
/// one side of the deque, and uses `push` and `pop` method to manipulate it.
///
/// There may only be one worker per deque.
pub struct Worker<T> {
priv deque: UnsafeArc<Deque<T>>,
}
/// The stealing half of the work-stealing deque. Stealers have access to the
/// opposite end of the deque from the worker, and they only have access to the
/// `steal` method.
pub struct Stealer<T> {
priv deque: UnsafeArc<Deque<T>>,
}
/// When stealing some data, this is an enumeration of the possible outcomes.
#[deriving(Eq)]
pub enum Stolen<T> {
/// The deque was empty at the time of stealing
Empty,
/// The stealer lost the race for stealing data, and a retry may return more
/// data.
Abort,
/// The stealer has successfully stolen some data.
Data(T),
}
/// The allocation pool for buffers used by work-stealing deques. Right now this
/// structure is used for reclamation of memory after it is no longer in use by
/// deques.
///
/// This data structure is protected by a mutex, but it is rarely used. Deques
/// will only use this structure when allocating a new buffer or deallocating a
/// previous one.
pub struct BufferPool<T> {
priv pool: Exclusive<~[~Buffer<T>]>,
}
/// An internal buffer used by the chase-lev deque. This structure is actually
/// implemented as a circular buffer, and is used as the intermediate storage of
/// the data in the deque.
///
/// This type is implemented with *T instead of ~[T] for two reasons:
///
/// 1. There is nothing safe about using this buffer. This easily allows the
/// same value to be read twice in to rust, and there is nothing to
/// prevent this. The usage by the deque must ensure that one of the
/// values is forgotten. Furthermore, we only ever want to manually run
/// destructors for values in this buffer (on drop) because the bounds
/// are defined by the deque it's owned by.
///
/// 2. We can certainly avoid bounds checks using *T instead of ~[T], although
/// LLVM is probably pretty good at doing this already.
struct Buffer<T> {
storage: *T,
log_size: int,
}
impl<T: Send> BufferPool<T> {
/// Allocates a new buffer pool which in turn can be used to allocate new
/// deques.
pub fn new() -> BufferPool<T> {
BufferPool { pool: Exclusive::new(~[]) }
}
/// Allocates a new work-stealing deque which will send/receiving memory to
/// and from this buffer pool.
pub fn deque(&mut self) -> (Worker<T>, Stealer<T>) {
let (a, b) = UnsafeArc::new2(Deque::new(self.clone()));
(Worker { deque: a }, Stealer { deque: b })
}
fn alloc(&mut self, bits: int) -> ~Buffer<T> {
unsafe {
self.pool.with(|pool| {
match pool.iter().position(|x| x.size() >= (1 << bits)) {
Some(i) => pool.remove(i),
None => ~Buffer::new(bits)
}
})
}
}
fn free(&mut self, buf: ~Buffer<T>) {
unsafe {
let mut buf = Some(buf);
self.pool.with(|pool| {
let buf = buf.take_unwrap();
match pool.iter().position(|v| v.size() > buf.size()) {
Some(i) => pool.insert(i, buf),
None => pool.push(buf),
}
})
}
}
}
impl<T: Send> Clone for BufferPool<T> {
fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
}
impl<T: Send> Worker<T> {
/// Pushes data onto the front of this work queue.
pub fn push(&mut self, t: T) {
unsafe { (*self.deque.get()).push(t) }
}
/// Pops data off the front of the work queue, returning `None` on an empty
/// queue.
pub fn pop(&mut self) -> Option<T> {
unsafe { (*self.deque.get()).pop() }
}
/// Gets access to the buffer pool that this worker is attached to. This can
/// be used to create more deques which share the same buffer pool as this
/// deque.
pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
unsafe { &mut (*self.deque.get()).pool }
}
}
impl<T: Send> Stealer<T> {
/// Steals work off the end of the queue (opposite of the worker's end)
pub fn steal(&mut self) -> Stolen<T> {
unsafe { (*self.deque.get()).steal() }
}
/// Gets access to the buffer pool that this stealer is attached to. This
/// can be used to create more deques which share the same buffer pool as
/// this deque.
pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
unsafe { &mut (*self.deque.get()).pool }
}
}
impl<T: Send> Clone for Stealer<T> {
fn clone(&self) -> Stealer<T> { Stealer { deque: self.deque.clone() } }
}
// Almost all of this code can be found directly in the paper so I'm not
// personally going to heavily comment what's going on here.
impl<T: Send> Deque<T> {
fn new(mut pool: BufferPool<T>) -> Deque<T> {
let buf = pool.alloc(MIN_BITS);
Deque {
bottom: AtomicInt::new(0),
top: AtomicInt::new(0),
array: AtomicPtr::new(unsafe { cast::transmute(buf) }),
pool: pool,
}
}
unsafe fn push(&mut self, data: T) {
let mut b = self.bottom.load(SeqCst);
let t = self.top.load(SeqCst);
let mut a = self.array.load(SeqCst);
let size = b - t;
if size >= (*a).size() - 1 {
// You won't find this code in the chase-lev deque paper. This is
// alluded to in a small footnote, however. We always free a buffer
// when growing in order to prevent leaks.
a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
b = self.bottom.load(SeqCst);
}
(*a).put(b, data);
self.bottom.store(b + 1, SeqCst);
}
unsafe fn pop(&mut self) -> Option<T> {
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let b = b - 1;
self.bottom.store(b, SeqCst);
let t = self.top.load(SeqCst);
let size = b - t;
if size < 0 {
self.bottom.store(t, SeqCst);
return None;
}
let data = (*a).get(b);
if size > 0 {
self.maybe_shrink(b, t);
return Some(data);
}
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
self.bottom.store(t + 1, SeqCst);
return Some(data);
} else {
self.bottom.store(t + 1, SeqCst);
cast::forget(data); // someone else stole this value
return None;
}
}
unsafe fn steal(&mut self) -> Stolen<T> {
let t = self.top.load(SeqCst);
let old = self.array.load(SeqCst);
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let size = b - t;
if size <= 0 { return Empty }
if size % (*a).size() == 0 {
if a == old && t == self.top.load(SeqCst) {
return Empty
}
return Abort
}
let data = (*a).get(t);
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
Data(data)
} else {
cast::forget(data); // someone else stole this value
Abort
}
}
unsafe fn maybe_shrink(&mut self, b: int, t: int) {
let a = self.array.load(SeqCst);
if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
self.swap_buffer(b, a, (*a).resize(b, t, -1));
}
}
// Helper routine not mentioned in the paper which is used in growing and
// shrinking buffers to swap in a new buffer into place. As a bit of a
// recap, the whole point that we need a buffer pool rather than just
// calling malloc/free directly is that stealers can continue using buffers
// after this method has called 'free' on it. The continued usage is simply
// a read followed by a forget, but we must make sure that the memory can
// continue to be read after we flag this buffer for reclamation.
unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>,
buf: Buffer<T>) -> *mut Buffer<T> {
let newbuf: *mut Buffer<T> = cast::transmute(~buf);
self.array.store(newbuf, SeqCst);
let ss = (*newbuf).size();
self.bottom.store(b + ss, SeqCst);
let t = self.top.load(SeqCst);
if self.top.compare_and_swap(t, t + ss, SeqCst)!= t {
self.bottom.store(b, SeqCst);
}
self.pool.free(cast::transmute(old));
return newbuf;
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Deque<T> {
fn drop(&mut self) {
let t = self.top.load(SeqCst);
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
// Free whatever is leftover in the dequeue, and then move the buffer
// back into the pool.
for i in range(t, b) {
let _: T = unsafe { (*a).get(i) };
}
self.pool.free(unsafe { cast::transmute(a) });
}
}
impl<T: Send> Buffer<T> {
unsafe fn new(log_size: int) -> Buffer<T> {
let size = (1 << log_size) * mem::size_of::<T>();
let buffer = libc::malloc(size as libc::size_t);
assert!(!buffer.is_null());
Buffer {
storage: buffer as *T,
log_size: log_size,
}
}
fn size(&self) -> int { 1 << self.log_size }
// Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
fn mask(&self) -> int { (1 << self.log_size) - 1 }
// This does not protect against loading duplicate values of the same cell,
// nor does this clear out the contents contained within. Hence, this is a
// very unsafe method which the caller needs to treat specially in case a
// race is lost.
unsafe fn get(&self, i: int) -> T {
ptr::read_ptr(self.storage.offset(i & self.mask()))
}
// Unsafe because this unsafely overwrites possibly uninitialized or
// initialized data.
unsafe fn put(&mut self, i: int, t: T) {
let ptr = self.storage.offset(i & self.mask());
ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1);
cast::forget(t);
}
// Again, unsafe because this has incredibly dubious ownership violations.
// It is assumed that this buffer is immediately dropped.
unsafe fn | (&self, b: int, t: int, delta: int) -> Buffer<T> {
let mut buf = Buffer::new(self.log_size + delta);
for i in range(t, b) {
buf.put(i, self.get(i));
}
return buf;
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Buffer<T> {
fn drop(&mut self) {
// It is assumed that all buffers are empty on drop.
unsafe { libc::free(self.storage as *libc::c_void) }
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
use cast;
use rt::thread::Thread;
use rand;
use rand::Rng;
use unstable::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
AtomicUint, INIT_ATOMIC_UINT};
use vec;
#[test]
fn smoke() {
let mut pool = BufferPool::new();
let (mut w, mut s) = pool.deque();
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(1);
assert_eq!(w.pop(), Some(1));
w.push(1);
assert_eq!(s.steal(), Data(1));
w.push(1);
assert_eq!(s.clone().steal(), Data(1));
}
#[test]
fn stealpush() {
static AMT: int = 100000;
let mut pool = BufferPool::<int>::new();
let (mut w, s) = pool.deque();
let t = do Thread::start {
let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
Data(i) => {
assert_eq!(i, 1);
left -= 1;
}
Abort | Empty => {}
}
}
};
for _ in range(0, AMT) {
w.push(1);
}
t.join();
}
#[test]
fn stealpush_large() {
static AMT: int = 100000;
let mut pool = BufferPool::<(int, int)>::new();
let (mut w, s) = pool.deque();
let t = do Thread::start {
let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
Data((1, 10)) => { left -= 1; }
Data(..) => fail!(),
Abort | Empty => {}
}
}
};
for _ in range(0, AMT) {
w.push((1, 10));
}
t.join();
}
fn stampede(mut w: Worker<~int>, s: Stealer<~int>,
nthreads: int, amt: uint) {
for _ in range(0, amt) {
w.push(~20);
}
let mut remaining = AtomicUint::new(amt);
let unsafe_remaining: *mut AtomicUint = &mut remaining;
let threads = range(0, nthreads).map(|_| {
let s = s.clone();
do Thread::start {
unsafe {
let mut s = s;
while (*unsafe_remaining).load(SeqCst) > 0 {
match s.steal() {
Data(~20) => {
(*unsafe_remaining).fetch_sub(1, SeqCst);
}
Data(..) => fail!(),
Abort | Empty => {}
}
}
}
}
}).to_owned_vec();
while remaining.load(SeqCst) > 0 {
match w.pop() {
Some(~20) => { remaining.fetch_sub(1, SeqCst); }
Some(..) => fail!(),
None => {}
}
}
for thread in threads.move_iter() {
thread.join();
}
}
#[test]
fn run_stampede() {
let mut pool = BufferPool::<~int>::new();
let (w, s) = pool.deque();
stampede(w, s, 8, 10000);
}
#[test]
fn many_stampede() {
static AMT: uint = 4;
let mut pool = BufferPool::<~int>::new();
let threads = range(0, AMT).map(|_| {
let (w, s) = pool.deque();
do Thread::start {
stampede(w, s, 4, 10000);
}
}).to_owned_vec();
for thread in threads.move_iter() {
thread.join();
}
}
#[test]
fn stress() {
static AMT: int = 100000;
static NTHREADS: int = 8;
static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
static mut HITS: AtomicUint = INIT_ATOMIC_UINT;
let mut pool = BufferPool::<int>::new();
let (mut w, s) = pool.deque();
let threads = range(0, NTHREADS).map(|_| {
let s = s.clone();
do Thread::start {
unsafe {
let mut s = s;
loop {
match s.steal() {
Data(2) => { HITS.fetch_add(1, SeqCst); }
Data(..) => fail!(),
_ if DONE.load(SeqCst) => break,
_ => {}
}
}
}
}
}).to_owned_vec();
let mut rng = rand::task_rng();
let mut expected = 0;
while expected < AMT {
if rng.gen_range(0, 3) == 2 {
match w.pop() {
None => {}
Some(2) => unsafe { HITS.fetch_add(1, SeqCst); },
Some(_) => fail!(),
}
} else {
expected += 1;
w.push(2);
}
}
unsafe {
while HITS.load(SeqCst) < AMT as uint {
match w.pop() {
None => {}
Some(2) => { HITS.fetch_add(1, SeqCst); },
Some(_) => fail!(),
}
}
DONE.store(true, SeqCst);
}
for thread in threads.move_iter() {
thread.join();
}
assert_eq!(unsafe { HITS.load(SeqCst) }, expected as uint);
}
#[test]
#[ignore(cfg(windows))] // apparently windows scheduling is weird?
fn no_starvation() {
static AMT: int = 10000;
static NTHREADS: int = 4;
| resize | identifier_name |
deque.rs | is
// probably overkill
use cast;
use clone::Clone;
use iter::range;
use kinds::Send;
use libc;
use mem;
use ops::Drop;
use option::{Option, Some, None};
use ptr;
use unstable::atomics::{AtomicInt, AtomicPtr, SeqCst};
use unstable::sync::{UnsafeArc, Exclusive};
// Once the queue is less than 1/K full, then it will be downsized. Note that
// the deque requires that this number be less than 2.
static K: int = 4;
// Minimum number of bits that a buffer size should be. No buffer will resize to
// under this value, and all deques will initially contain a buffer of this
// size.
//
// The size in question is 1 << MIN_BITS
static MIN_BITS: int = 7;
struct Deque<T> {
bottom: AtomicInt,
top: AtomicInt,
array: AtomicPtr<Buffer<T>>,
pool: BufferPool<T>,
}
/// Worker half of the work-stealing deque. This worker has exclusive access to
/// one side of the deque, and uses `push` and `pop` method to manipulate it.
///
/// There may only be one worker per deque.
pub struct Worker<T> {
priv deque: UnsafeArc<Deque<T>>,
}
/// The stealing half of the work-stealing deque. Stealers have access to the
/// opposite end of the deque from the worker, and they only have access to the
/// `steal` method.
pub struct Stealer<T> {
priv deque: UnsafeArc<Deque<T>>,
}
/// When stealing some data, this is an enumeration of the possible outcomes.
#[deriving(Eq)]
pub enum Stolen<T> {
/// The deque was empty at the time of stealing
Empty,
/// The stealer lost the race for stealing data, and a retry may return more
/// data.
Abort,
/// The stealer has successfully stolen some data.
Data(T),
}
/// The allocation pool for buffers used by work-stealing deques. Right now this
/// structure is used for reclamation of memory after it is no longer in use by
/// deques.
///
/// This data structure is protected by a mutex, but it is rarely used. Deques
/// will only use this structure when allocating a new buffer or deallocating a
/// previous one.
pub struct BufferPool<T> {
priv pool: Exclusive<~[~Buffer<T>]>,
}
/// An internal buffer used by the chase-lev deque. This structure is actually
/// implemented as a circular buffer, and is used as the intermediate storage of
/// the data in the deque.
///
/// This type is implemented with *T instead of ~[T] for two reasons:
///
/// 1. There is nothing safe about using this buffer. This easily allows the
/// same value to be read twice in to rust, and there is nothing to
/// prevent this. The usage by the deque must ensure that one of the
/// values is forgotten. Furthermore, we only ever want to manually run
/// destructors for values in this buffer (on drop) because the bounds
/// are defined by the deque it's owned by.
///
/// 2. We can certainly avoid bounds checks using *T instead of ~[T], although
/// LLVM is probably pretty good at doing this already.
struct Buffer<T> {
storage: *T,
log_size: int,
}
impl<T: Send> BufferPool<T> {
/// Allocates a new buffer pool which in turn can be used to allocate new
/// deques.
pub fn new() -> BufferPool<T> {
BufferPool { pool: Exclusive::new(~[]) }
}
/// Allocates a new work-stealing deque which will send/receiving memory to
/// and from this buffer pool.
pub fn deque(&mut self) -> (Worker<T>, Stealer<T>) {
let (a, b) = UnsafeArc::new2(Deque::new(self.clone()));
(Worker { deque: a }, Stealer { deque: b })
}
fn alloc(&mut self, bits: int) -> ~Buffer<T> {
unsafe {
self.pool.with(|pool| {
match pool.iter().position(|x| x.size() >= (1 << bits)) {
Some(i) => pool.remove(i),
None => ~Buffer::new(bits)
}
})
}
}
fn free(&mut self, buf: ~Buffer<T>) {
unsafe {
let mut buf = Some(buf);
self.pool.with(|pool| {
let buf = buf.take_unwrap();
match pool.iter().position(|v| v.size() > buf.size()) {
Some(i) => pool.insert(i, buf),
None => pool.push(buf),
}
})
}
}
}
impl<T: Send> Clone for BufferPool<T> {
fn clone(&self) -> BufferPool<T> { BufferPool { pool: self.pool.clone() } }
}
impl<T: Send> Worker<T> {
/// Pushes data onto the front of this work queue.
pub fn push(&mut self, t: T) {
unsafe { (*self.deque.get()).push(t) }
}
/// Pops data off the front of the work queue, returning `None` on an empty
/// queue.
pub fn pop(&mut self) -> Option<T> {
unsafe { (*self.deque.get()).pop() }
}
/// Gets access to the buffer pool that this worker is attached to. This can
/// be used to create more deques which share the same buffer pool as this
/// deque.
pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
unsafe { &mut (*self.deque.get()).pool }
}
}
impl<T: Send> Stealer<T> {
/// Steals work off the end of the queue (opposite of the worker's end)
pub fn steal(&mut self) -> Stolen<T> {
unsafe { (*self.deque.get()).steal() }
}
/// Gets access to the buffer pool that this stealer is attached to. This
/// can be used to create more deques which share the same buffer pool as
/// this deque.
pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool<T> {
unsafe { &mut (*self.deque.get()).pool }
}
}
impl<T: Send> Clone for Stealer<T> {
fn clone(&self) -> Stealer<T> { Stealer { deque: self.deque.clone() } }
}
// Almost all of this code can be found directly in the paper so I'm not
// personally going to heavily comment what's going on here.
impl<T: Send> Deque<T> {
fn new(mut pool: BufferPool<T>) -> Deque<T> {
let buf = pool.alloc(MIN_BITS);
Deque {
bottom: AtomicInt::new(0),
top: AtomicInt::new(0),
array: AtomicPtr::new(unsafe { cast::transmute(buf) }),
pool: pool,
}
}
unsafe fn push(&mut self, data: T) {
let mut b = self.bottom.load(SeqCst);
let t = self.top.load(SeqCst);
let mut a = self.array.load(SeqCst);
let size = b - t;
if size >= (*a).size() - 1 {
// You won't find this code in the chase-lev deque paper. This is
// alluded to in a small footnote, however. We always free a buffer
// when growing in order to prevent leaks.
a = self.swap_buffer(b, a, (*a).resize(b, t, 1));
b = self.bottom.load(SeqCst);
}
(*a).put(b, data);
self.bottom.store(b + 1, SeqCst);
}
unsafe fn pop(&mut self) -> Option<T> {
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let b = b - 1;
self.bottom.store(b, SeqCst);
let t = self.top.load(SeqCst);
let size = b - t;
if size < 0 {
self.bottom.store(t, SeqCst);
return None;
}
let data = (*a).get(b);
if size > 0 {
self.maybe_shrink(b, t);
return Some(data);
}
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
self.bottom.store(t + 1, SeqCst);
return Some(data);
} else {
self.bottom.store(t + 1, SeqCst);
cast::forget(data); // someone else stole this value
return None;
}
}
unsafe fn steal(&mut self) -> Stolen<T> {
let t = self.top.load(SeqCst);
let old = self.array.load(SeqCst);
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
let size = b - t;
if size <= 0 { return Empty }
if size % (*a).size() == 0 {
if a == old && t == self.top.load(SeqCst) {
return Empty
}
return Abort
}
let data = (*a).get(t);
if self.top.compare_and_swap(t, t + 1, SeqCst) == t | else {
cast::forget(data); // someone else stole this value
Abort
}
}
unsafe fn maybe_shrink(&mut self, b: int, t: int) {
let a = self.array.load(SeqCst);
if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
self.swap_buffer(b, a, (*a).resize(b, t, -1));
}
}
// Helper routine not mentioned in the paper which is used in growing and
// shrinking buffers to swap in a new buffer into place. As a bit of a
// recap, the whole point that we need a buffer pool rather than just
// calling malloc/free directly is that stealers can continue using buffers
// after this method has called 'free' on it. The continued usage is simply
// a read followed by a forget, but we must make sure that the memory can
// continue to be read after we flag this buffer for reclamation.
unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>,
buf: Buffer<T>) -> *mut Buffer<T> {
let newbuf: *mut Buffer<T> = cast::transmute(~buf);
self.array.store(newbuf, SeqCst);
let ss = (*newbuf).size();
self.bottom.store(b + ss, SeqCst);
let t = self.top.load(SeqCst);
if self.top.compare_and_swap(t, t + ss, SeqCst)!= t {
self.bottom.store(b, SeqCst);
}
self.pool.free(cast::transmute(old));
return newbuf;
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Deque<T> {
fn drop(&mut self) {
let t = self.top.load(SeqCst);
let b = self.bottom.load(SeqCst);
let a = self.array.load(SeqCst);
// Free whatever is leftover in the dequeue, and then move the buffer
// back into the pool.
for i in range(t, b) {
let _: T = unsafe { (*a).get(i) };
}
self.pool.free(unsafe { cast::transmute(a) });
}
}
impl<T: Send> Buffer<T> {
unsafe fn new(log_size: int) -> Buffer<T> {
let size = (1 << log_size) * mem::size_of::<T>();
let buffer = libc::malloc(size as libc::size_t);
assert!(!buffer.is_null());
Buffer {
storage: buffer as *T,
log_size: log_size,
}
}
fn size(&self) -> int { 1 << self.log_size }
// Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
fn mask(&self) -> int { (1 << self.log_size) - 1 }
// This does not protect against loading duplicate values of the same cell,
// nor does this clear out the contents contained within. Hence, this is a
// very unsafe method which the caller needs to treat specially in case a
// race is lost.
unsafe fn get(&self, i: int) -> T {
ptr::read_ptr(self.storage.offset(i & self.mask()))
}
// Unsafe because this unsafely overwrites possibly uninitialized or
// initialized data.
unsafe fn put(&mut self, i: int, t: T) {
let ptr = self.storage.offset(i & self.mask());
ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1);
cast::forget(t);
}
// Again, unsafe because this has incredibly dubious ownership violations.
// It is assumed that this buffer is immediately dropped.
unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
let mut buf = Buffer::new(self.log_size + delta);
for i in range(t, b) {
buf.put(i, self.get(i));
}
return buf;
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Buffer<T> {
fn drop(&mut self) {
// It is assumed that all buffers are empty on drop.
unsafe { libc::free(self.storage as *libc::c_void) }
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::{Data, BufferPool, Abort, Empty, Worker, Stealer};
use cast;
use rt::thread::Thread;
use rand;
use rand::Rng;
use unstable::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
AtomicUint, INIT_ATOMIC_UINT};
use vec;
#[test]
fn smoke() {
let mut pool = BufferPool::new();
let (mut w, mut s) = pool.deque();
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(1);
assert_eq!(w.pop(), Some(1));
w.push(1);
assert_eq!(s.steal(), Data(1));
w.push(1);
assert_eq!(s.clone().steal(), Data(1));
}
#[test]
fn stealpush() {
static AMT: int = 100000;
let mut pool = BufferPool::<int>::new();
let (mut w, s) = pool.deque();
let t = do Thread::start {
let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
Data(i) => {
assert_eq!(i, 1);
left -= 1;
}
Abort | Empty => {}
}
}
};
for _ in range(0, AMT) {
w.push(1);
}
t.join();
}
#[test]
fn stealpush_large() {
static AMT: int = 100000;
let mut pool = BufferPool::<(int, int)>::new();
let (mut w, s) = pool.deque();
let t = do Thread::start {
let mut s = s;
let mut left = AMT;
while left > 0 {
match s.steal() {
Data((1, 10)) => { left -= 1; }
Data(..) => fail!(),
Abort | Empty => {}
}
}
};
for _ in range(0, AMT) {
w.push((1, 10));
}
t.join();
}
fn stampede(mut w: Worker<~int>, s: Stealer<~int>,
nthreads: int, amt: uint) {
for _ in range(0, amt) {
w.push(~20);
}
let mut remaining = AtomicUint::new(amt);
let unsafe_remaining: *mut AtomicUint = &mut remaining;
let threads = range(0, nthreads).map(|_| {
let s = s.clone();
do Thread::start {
unsafe {
let mut s = s;
while (*unsafe_remaining).load(SeqCst) > 0 {
match s.steal() {
Data(~20) => {
(*unsafe_remaining).fetch_sub(1, SeqCst);
}
Data(..) => fail!(),
Abort | Empty => {}
}
}
}
}
}).to_owned_vec();
while remaining.load(SeqCst) > 0 {
match w.pop() {
Some(~20) => { remaining.fetch_sub(1, SeqCst); }
Some(..) => fail!(),
None => {}
}
}
for thread in threads.move_iter() {
thread.join();
}
}
#[test]
fn run_stampede() {
let mut pool = BufferPool::<~int>::new();
let (w, s) = pool.deque();
stampede(w, s, 8, 10000);
}
#[test]
fn many_stampede() {
static AMT: uint = 4;
let mut pool = BufferPool::<~int>::new();
let threads = range(0, AMT).map(|_| {
let (w, s) = pool.deque();
do Thread::start {
stampede(w, s, 4, 10000);
}
}).to_owned_vec();
for thread in threads.move_iter() {
thread.join();
}
}
#[test]
fn stress() {
static AMT: int = 100000;
static NTHREADS: int = 8;
static mut DONE: AtomicBool = INIT_ATOMIC_BOOL;
static mut HITS: AtomicUint = INIT_ATOMIC_UINT;
let mut pool = BufferPool::<int>::new();
let (mut w, s) = pool.deque();
let threads = range(0, NTHREADS).map(|_| {
let s = s.clone();
do Thread::start {
unsafe {
let mut s = s;
loop {
match s.steal() {
Data(2) => { HITS.fetch_add(1, SeqCst); }
Data(..) => fail!(),
_ if DONE.load(SeqCst) => break,
_ => {}
}
}
}
}
}).to_owned_vec();
let mut rng = rand::task_rng();
let mut expected = 0;
while expected < AMT {
if rng.gen_range(0, 3) == 2 {
match w.pop() {
None => {}
Some(2) => unsafe { HITS.fetch_add(1, SeqCst); },
Some(_) => fail!(),
}
} else {
expected += 1;
w.push(2);
}
}
unsafe {
while HITS.load(SeqCst) < AMT as uint {
match w.pop() {
None => {}
Some(2) => { HITS.fetch_add(1, SeqCst); },
Some(_) => fail!(),
}
}
DONE.store(true, SeqCst);
}
for thread in threads.move_iter() {
thread.join();
}
assert_eq!(unsafe { HITS.load(SeqCst) }, expected as uint);
}
#[test]
#[ignore(cfg(windows))] // apparently windows scheduling is weird?
fn no_starvation() {
static AMT: int = 10000;
static NTHREADS: int = 4;
| {
Data(data)
} | conditional_block |
show_off.rs |
use data_type::data_type::Data;
pub struct ShowOff;
impl ShowOff {
pub fn | (host: &str, data: Data) -> String {
"".to_string()
// html!{
// link rel="stylesheet" type="text/css"
// href={"http://" (host) "/style/style.css"} /
//
// div.gallery {
// @for image in &data.data {
// a.super-item href={"http://" (host) "/" (&image.id)} id={(&image.id)}{
// img.item.lazy data-original={"http://" (host) "/images/min" (&image.src.split_at(9).1)} {}
//
// }
// }
// }
// div{
// "© 2017 Romain ASSIE"
// a href="https://github.com/warnp/vision_rs" {
// svg height="32" viewBox="0 0 16 16" width="32" {
// path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0.67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2.27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0.21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"{
// }
// }
// }
// }
// (PreEscaped("<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js\"></script>"))
// (PreEscaped("<script src=\"http://".to_string() + host+"/js/jquery.lazyload.min.js\"></script>"))
// (PreEscaped("<script src=\"http://".to_string() + host+"/js/customloading.js\"></script>"))
//
// }
}
}
| get_page | identifier_name |
show_off.rs |
use data_type::data_type::Data;
pub struct ShowOff;
impl ShowOff {
pub fn get_page(host: &str, data: Data) -> String | // }
// }
// }
// (PreEscaped("<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js\"></script>"))
// (PreEscaped("<script src=\"http://".to_string() + host+"/js/jquery.lazyload.min.js\"></script>"))
// (PreEscaped("<script src=\"http://".to_string() + host+"/js/customloading.js\"></script>"))
//
// }
}
}
| {
"".to_string()
// html!{
// link rel="stylesheet" type="text/css"
// href={"http://" (host) "/style/style.css"} /
//
// div.gallery {
// @for image in &data.data {
// a.super-item href={"http://" (host) "/" (&image.id)} id={(&image.id)}{
// img.item.lazy data-original={"http://" (host) "/images/min" (&image.src.split_at(9).1)} {}
//
// }
// }
// }
// div{
// "© 2017 Romain ASSIE"
// a href="https://github.com/warnp/vision_rs" {
// svg height="32" viewBox="0 0 16 16" width="32" {
// path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"{
// } | identifier_body |
show_off.rs | use data_type::data_type::Data;
pub struct ShowOff;
impl ShowOff {
pub fn get_page(host: &str, data: Data) -> String {
"".to_string()
// html!{ | // href={"http://" (host) "/style/style.css"} /
//
// div.gallery {
// @for image in &data.data {
// a.super-item href={"http://" (host) "/" (&image.id)} id={(&image.id)}{
// img.item.lazy data-original={"http://" (host) "/images/min" (&image.src.split_at(9).1)} {}
//
// }
// }
// }
// div{
// "© 2017 Romain ASSIE"
// a href="https://github.com/warnp/vision_rs" {
// svg height="32" viewBox="0 0 16 16" width="32" {
// path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0.67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2.27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0.21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"{
// }
// }
// }
// }
// (PreEscaped("<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js\"></script>"))
// (PreEscaped("<script src=\"http://".to_string() + host+"/js/jquery.lazyload.min.js\"></script>"))
// (PreEscaped("<script src=\"http://".to_string() + host+"/js/customloading.js\"></script>"))
//
// }
}
} | // link rel="stylesheet" type="text/css" | random_line_split |
empty.rs | use consumer::*;
use parallel_stream::*;
use stream::*;
/// A [`Stream`](./trait.Stream.html) that do not emits any element.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
///
/// let mut count = 0;
///
/// Empty
/// .inspect(|_| count += 1)
/// .subscribe();
///
/// assert!(count == 0, "count = {}", count);
/// ```
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct Empty;
impl Stream for Empty {
type Item = ();
fn consume<C: Consumer<()>>(self, _: C) {} |
/// A [`ParallelStream`](./trait.ParallelStream.html) that do not emits any element.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
/// use std::sync::atomic::{AtomicBool, Ordering};
/// use std::sync::Arc;
///
/// let found = Arc::new(AtomicBool::new(false));
///
/// ParallelEmpty
/// .inspect(|_| found.store(true, Ordering::Release))
/// .subscribe();
///
/// assert!(!found.load(Ordering::Acquire), "found = {}", found.load(Ordering::Acquire));
/// ```
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct ParallelEmpty;
impl ParallelStream for ParallelEmpty {
type Item = ();
fn consume<C: ParallelConsumer<()>>(self, _: C) {}
} | } | random_line_split |
empty.rs | use consumer::*;
use parallel_stream::*;
use stream::*;
/// A [`Stream`](./trait.Stream.html) that do not emits any element.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
///
/// let mut count = 0;
///
/// Empty
/// .inspect(|_| count += 1)
/// .subscribe();
///
/// assert!(count == 0, "count = {}", count);
/// ```
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct Empty;
impl Stream for Empty {
type Item = ();
fn consume<C: Consumer<()>>(self, _: C) {}
}
/// A [`ParallelStream`](./trait.ParallelStream.html) that do not emits any element.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
/// use std::sync::atomic::{AtomicBool, Ordering};
/// use std::sync::Arc;
///
/// let found = Arc::new(AtomicBool::new(false));
///
/// ParallelEmpty
/// .inspect(|_| found.store(true, Ordering::Release))
/// .subscribe();
///
/// assert!(!found.load(Ordering::Acquire), "found = {}", found.load(Ordering::Acquire));
/// ```
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct | ;
impl ParallelStream for ParallelEmpty {
type Item = ();
fn consume<C: ParallelConsumer<()>>(self, _: C) {}
}
| ParallelEmpty | identifier_name |
empty.rs | use consumer::*;
use parallel_stream::*;
use stream::*;
/// A [`Stream`](./trait.Stream.html) that do not emits any element.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
///
/// let mut count = 0;
///
/// Empty
/// .inspect(|_| count += 1)
/// .subscribe();
///
/// assert!(count == 0, "count = {}", count);
/// ```
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct Empty;
impl Stream for Empty {
type Item = ();
fn consume<C: Consumer<()>>(self, _: C) {}
}
/// A [`ParallelStream`](./trait.ParallelStream.html) that do not emits any element.
///
/// # Examples
///
/// ```
/// use asyncplify::*;
/// use std::sync::atomic::{AtomicBool, Ordering};
/// use std::sync::Arc;
///
/// let found = Arc::new(AtomicBool::new(false));
///
/// ParallelEmpty
/// .inspect(|_| found.store(true, Ordering::Release))
/// .subscribe();
///
/// assert!(!found.load(Ordering::Acquire), "found = {}", found.load(Ordering::Acquire));
/// ```
#[must_use = "stream adaptors are lazy and do nothing unless consumed"]
pub struct ParallelEmpty;
impl ParallelStream for ParallelEmpty {
type Item = ();
fn consume<C: ParallelConsumer<()>>(self, _: C) |
}
| {} | identifier_body |
reexported_static_methods.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use sub_foo::Foo;
pub use self::Bar as Baz;
pub use sub_foo::Boz;
pub use sub_foo::Bort;
pub trait Bar {
fn bar() -> Self;
}
impl Bar for int {
fn bar() -> int { 84 }
}
pub mod sub_foo {
pub trait Foo {
fn foo() -> Self;
}
impl Foo for int {
fn foo() -> int { 42 }
}
pub struct Boz {
unused_str: String
}
impl Boz {
pub fn boz(i: int) -> bool {
i > 0
}
}
pub enum Bort {
Bort1,
Bort2
}
| impl Bort {
pub fn bort() -> String {
"bort()".to_string()
}
}
} | random_line_split |
|
reexported_static_methods.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use sub_foo::Foo;
pub use self::Bar as Baz;
pub use sub_foo::Boz;
pub use sub_foo::Bort;
pub trait Bar {
fn bar() -> Self;
}
impl Bar for int {
fn bar() -> int { 84 }
}
pub mod sub_foo {
pub trait Foo {
fn foo() -> Self;
}
impl Foo for int {
fn foo() -> int { 42 }
}
pub struct | {
unused_str: String
}
impl Boz {
pub fn boz(i: int) -> bool {
i > 0
}
}
pub enum Bort {
Bort1,
Bort2
}
impl Bort {
pub fn bort() -> String {
"bort()".to_string()
}
}
}
| Boz | identifier_name |
reexported_static_methods.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use sub_foo::Foo;
pub use self::Bar as Baz;
pub use sub_foo::Boz;
pub use sub_foo::Bort;
pub trait Bar {
fn bar() -> Self;
}
impl Bar for int {
fn bar() -> int |
}
pub mod sub_foo {
pub trait Foo {
fn foo() -> Self;
}
impl Foo for int {
fn foo() -> int { 42 }
}
pub struct Boz {
unused_str: String
}
impl Boz {
pub fn boz(i: int) -> bool {
i > 0
}
}
pub enum Bort {
Bort1,
Bort2
}
impl Bort {
pub fn bort() -> String {
"bort()".to_string()
}
}
}
| { 84 } | identifier_body |
example2.rs | #[feature(link_args)];
extern crate cairo;
#[link_args = "-L /Users/Jens/.homebrew/lib"]
extern {}
fn | () {
use cairo;
use cairo::surface;
use cairo::surface::Surface;
let (width, height) = (500.0, 500.0);
let mut s = Surface::create_image(surface::format::ARGB32, width as i32, height as i32);
let mut cairo = cairo::Cairo::create(&mut s);
cairo.save();
cairo.set_source_rgb(0.3, 0.3, 1.0);
cairo.paint();
cairo.restore();
cairo.move_to(0.0, 0.0);
cairo.line_to(2.0 * width / 6.0, 2.0 * height / 6.0);
cairo.line_to(3.0 * width / 6.0, 1.0 * height / 6.0);
cairo.line_to(4.0 * width / 6.0, 2.0 * height / 6.0);
cairo.line_to(6.0 * width / 6.0, 0.0 * height / 6.0);
cairo.close_path();
cairo.save();
cairo.set_line_width(6.0);
cairo.stroke_preserve();
cairo.set_source_rgb(0.3, 0.3, 0.3);
cairo.fill();
cairo.restore();
cairo.save();
cairo.set_line_width(6.0);
cairo.arc(1.0 * width / 6.0, 3.0 * height / 6.0, 0.5 * width / 6.0, 0.0 * height / 6.0, 2.0 * std::f64::consts::PI);
cairo.stroke_preserve();
cairo.set_source_rgb(1.0, 1.0, 0.0);
cairo.fill();
cairo.restore();
s.write_to_png("example2.png");
s.finish();
} | main | identifier_name |
example2.rs | #[feature(link_args)];
extern crate cairo;
#[link_args = "-L /Users/Jens/.homebrew/lib"]
extern {}
fn main() | cairo.close_path();
cairo.save();
cairo.set_line_width(6.0);
cairo.stroke_preserve();
cairo.set_source_rgb(0.3, 0.3, 0.3);
cairo.fill();
cairo.restore();
cairo.save();
cairo.set_line_width(6.0);
cairo.arc(1.0 * width / 6.0, 3.0 * height / 6.0, 0.5 * width / 6.0, 0.0 * height / 6.0, 2.0 * std::f64::consts::PI);
cairo.stroke_preserve();
cairo.set_source_rgb(1.0, 1.0, 0.0);
cairo.fill();
cairo.restore();
s.write_to_png("example2.png");
s.finish();
} | {
use cairo;
use cairo::surface;
use cairo::surface::Surface;
let (width, height) = (500.0, 500.0);
let mut s = Surface::create_image(surface::format::ARGB32, width as i32, height as i32);
let mut cairo = cairo::Cairo::create(&mut s);
cairo.save();
cairo.set_source_rgb(0.3, 0.3, 1.0);
cairo.paint();
cairo.restore();
cairo.move_to(0.0, 0.0);
cairo.line_to(2.0 * width / 6.0, 2.0 * height / 6.0);
cairo.line_to(3.0 * width / 6.0, 1.0 * height / 6.0);
cairo.line_to(4.0 * width / 6.0, 2.0 * height / 6.0);
cairo.line_to(6.0 * width / 6.0, 0.0 * height / 6.0); | identifier_body |
example2.rs | #[feature(link_args)];
extern crate cairo;
#[link_args = "-L /Users/Jens/.homebrew/lib"]
extern {}
fn main() {
use cairo;
use cairo::surface;
use cairo::surface::Surface;
let (width, height) = (500.0, 500.0); |
let mut cairo = cairo::Cairo::create(&mut s);
cairo.save();
cairo.set_source_rgb(0.3, 0.3, 1.0);
cairo.paint();
cairo.restore();
cairo.move_to(0.0, 0.0);
cairo.line_to(2.0 * width / 6.0, 2.0 * height / 6.0);
cairo.line_to(3.0 * width / 6.0, 1.0 * height / 6.0);
cairo.line_to(4.0 * width / 6.0, 2.0 * height / 6.0);
cairo.line_to(6.0 * width / 6.0, 0.0 * height / 6.0);
cairo.close_path();
cairo.save();
cairo.set_line_width(6.0);
cairo.stroke_preserve();
cairo.set_source_rgb(0.3, 0.3, 0.3);
cairo.fill();
cairo.restore();
cairo.save();
cairo.set_line_width(6.0);
cairo.arc(1.0 * width / 6.0, 3.0 * height / 6.0, 0.5 * width / 6.0, 0.0 * height / 6.0, 2.0 * std::f64::consts::PI);
cairo.stroke_preserve();
cairo.set_source_rgb(1.0, 1.0, 0.0);
cairo.fill();
cairo.restore();
s.write_to_png("example2.png");
s.finish();
} | let mut s = Surface::create_image(surface::format::ARGB32, width as i32, height as i32); | random_line_split |
util.rs | use std::io::{Cursor, Read};
use std::io;
use psoserial::Serial;
use psomsg::util::*;
static COLOR_RED: &'static str = "\x1B[31m";
static COLOR_RESET: &'static str = "\x1B[0m";
/// Creates a 3-column view of the buffer with index, bytes, and ASCII
/// representation.
pub fn hex_view(buf: &[u8]) -> String {
let rows = buf.len() / 16 + { if buf.len() % 16 > 0 {1} else {0} };
let mut output = String::new();
for row in 0..rows {
// write the index
output.push_str(&format!("{:08X} | ", row * 16));
// write the next 16 bytes
let leftover;
let end = {
if buf.len() > row * 16 + 16 {
leftover = 0;
row * 16 + 16
} else {
leftover = row * 16 + 16 - buf.len();
buf.len()
}
};
for b in &buf[(row * 16)..end] {
output.push_str(&format!("{:02X} ", b))
}
for _ in 0..leftover {
output.push_str(" ");
}
output.push_str("| ");
// write the ascii representation
for b in &buf[(row * 16)..end] {
if *b > 31u8 && *b < 127u8 {
output.push(*b as char);
} else {
output.push('.');
}
}
// new line
output.push('\n');
}
output
}
pub fn hex_view_serial<S: Serial>(s: &S) -> String |
/// Shows the serialized hex view of the first argument, with different bytes
/// in ANSI escaped red.
pub fn hex_view_diff<S: Serial>(s: &S, buf: &[u8]) -> String {
let mut cursor = Cursor::new(Vec::new());
s.serialize(&mut cursor).unwrap();
let array = cursor.into_inner();
let rows = array.len() / 16 + { if array.len() % 16 > 0 {1} else {0} };
let mut output = String::new();
for row in 0..rows {
// First row is Serial version
// write the index
output.push_str(&format!("{:08X} | ", row * 16));
// write the next 16 bytes
let leftover;
let end = {
if array.len() > row * 16 + 16 {
leftover = 0;
row * 16 + 16
} else {
leftover = row * 16 + 16 - array.len();
array.len()
}
};
for i in (row * 16)..end {
if (buf.len() > i && buf[i]!= array[i]) || buf.len() <= i {
output.push_str(&format!("{}{:02X}{} ", COLOR_RED, array[i], COLOR_RESET));
} else {
output.push_str(&format!("{:02X} ", array[i]));
}
}
// for b in &buf[(row * 16)..end] {
// output.push_str(&format!("{:02X} ", b))
// }
for _ in 0..leftover {
output.push_str(" ");
}
output.push_str("| ");
// write the ascii representation
for b in &array[(row * 16)..end] {
if *b > 31u8 && *b < 127u8 {
output.push(*b as char);
} else {
output.push('.');
}
}
output.push('\n');
}
output
}
/// Reads a raw BB message buffer
pub fn read_bb_msg(src: &mut Read) -> io::Result<Vec<u8>> {
let mut hdr = vec![0u8; 8];
debug!("reading header...");
try!(read_exact(src, &mut hdr));
let size = hdr[0] as usize + ((hdr[1] as usize) << 8);
debug!("msg size: {}", size);
let padding = if size % 8 == 0 { 0 } else { 8 - (size % 8) };
let mut body = vec![0u8; size + padding - 8];
debug!("reading body...");
try!(read_exact(src, &mut body));
hdr.append(&mut body);
Ok(hdr)
}
| {
let mut cursor = Cursor::new(Vec::new());
s.serialize(&mut cursor).unwrap();
let array = cursor.into_inner();
hex_view(&array)
} | identifier_body |
util.rs | use std::io::{Cursor, Read};
use std::io;
use psoserial::Serial;
use psomsg::util::*;
static COLOR_RED: &'static str = "\x1B[31m";
static COLOR_RESET: &'static str = "\x1B[0m";
/// Creates a 3-column view of the buffer with index, bytes, and ASCII
/// representation.
pub fn hex_view(buf: &[u8]) -> String {
let rows = buf.len() / 16 + { if buf.len() % 16 > 0 {1} else {0} };
let mut output = String::new();
for row in 0..rows {
// write the index
output.push_str(&format!("{:08X} | ", row * 16));
// write the next 16 bytes
let leftover;
let end = {
if buf.len() > row * 16 + 16 {
leftover = 0;
row * 16 + 16
} else {
leftover = row * 16 + 16 - buf.len();
buf.len()
}
};
for b in &buf[(row * 16)..end] {
output.push_str(&format!("{:02X} ", b))
}
for _ in 0..leftover {
output.push_str(" ");
}
output.push_str("| ");
// write the ascii representation
for b in &buf[(row * 16)..end] {
if *b > 31u8 && *b < 127u8 {
output.push(*b as char);
} else {
output.push('.');
}
}
// new line
output.push('\n');
}
output
}
pub fn hex_view_serial<S: Serial>(s: &S) -> String {
let mut cursor = Cursor::new(Vec::new());
s.serialize(&mut cursor).unwrap();
let array = cursor.into_inner();
hex_view(&array)
}
/// Shows the serialized hex view of the first argument, with different bytes
/// in ANSI escaped red.
pub fn hex_view_diff<S: Serial>(s: &S, buf: &[u8]) -> String {
let mut cursor = Cursor::new(Vec::new());
s.serialize(&mut cursor).unwrap();
let array = cursor.into_inner();
let rows = array.len() / 16 + { if array.len() % 16 > 0 {1} else {0} };
let mut output = String::new();
for row in 0..rows {
// First row is Serial version
// write the index
output.push_str(&format!("{:08X} | ", row * 16));
// write the next 16 bytes
let leftover;
let end = {
if array.len() > row * 16 + 16 | else {
leftover = row * 16 + 16 - array.len();
array.len()
}
};
for i in (row * 16)..end {
if (buf.len() > i && buf[i]!= array[i]) || buf.len() <= i {
output.push_str(&format!("{}{:02X}{} ", COLOR_RED, array[i], COLOR_RESET));
} else {
output.push_str(&format!("{:02X} ", array[i]));
}
}
// for b in &buf[(row * 16)..end] {
// output.push_str(&format!("{:02X} ", b))
// }
for _ in 0..leftover {
output.push_str(" ");
}
output.push_str("| ");
// write the ascii representation
for b in &array[(row * 16)..end] {
if *b > 31u8 && *b < 127u8 {
output.push(*b as char);
} else {
output.push('.');
}
}
output.push('\n');
}
output
}
/// Reads a raw BB message buffer
pub fn read_bb_msg(src: &mut Read) -> io::Result<Vec<u8>> {
let mut hdr = vec![0u8; 8];
debug!("reading header...");
try!(read_exact(src, &mut hdr));
let size = hdr[0] as usize + ((hdr[1] as usize) << 8);
debug!("msg size: {}", size);
let padding = if size % 8 == 0 { 0 } else { 8 - (size % 8) };
let mut body = vec![0u8; size + padding - 8];
debug!("reading body...");
try!(read_exact(src, &mut body));
hdr.append(&mut body);
Ok(hdr)
}
| {
leftover = 0;
row * 16 + 16
} | conditional_block |
util.rs | use std::io::{Cursor, Read};
use std::io;
use psoserial::Serial;
use psomsg::util::*;
static COLOR_RED: &'static str = "\x1B[31m";
static COLOR_RESET: &'static str = "\x1B[0m";
/// Creates a 3-column view of the buffer with index, bytes, and ASCII
/// representation.
pub fn hex_view(buf: &[u8]) -> String {
let rows = buf.len() / 16 + { if buf.len() % 16 > 0 {1} else {0} };
let mut output = String::new();
for row in 0..rows {
// write the index
output.push_str(&format!("{:08X} | ", row * 16));
// write the next 16 bytes
let leftover;
let end = {
if buf.len() > row * 16 + 16 {
leftover = 0;
row * 16 + 16
} else {
leftover = row * 16 + 16 - buf.len();
buf.len()
}
};
for b in &buf[(row * 16)..end] {
output.push_str(&format!("{:02X} ", b))
}
for _ in 0..leftover {
output.push_str(" ");
}
output.push_str("| ");
// write the ascii representation
for b in &buf[(row * 16)..end] {
if *b > 31u8 && *b < 127u8 {
output.push(*b as char);
} else {
output.push('.');
}
}
// new line
output.push('\n');
}
output
}
pub fn hex_view_serial<S: Serial>(s: &S) -> String {
let mut cursor = Cursor::new(Vec::new());
s.serialize(&mut cursor).unwrap();
let array = cursor.into_inner();
hex_view(&array)
}
/// Shows the serialized hex view of the first argument, with different bytes
/// in ANSI escaped red.
pub fn | <S: Serial>(s: &S, buf: &[u8]) -> String {
let mut cursor = Cursor::new(Vec::new());
s.serialize(&mut cursor).unwrap();
let array = cursor.into_inner();
let rows = array.len() / 16 + { if array.len() % 16 > 0 {1} else {0} };
let mut output = String::new();
for row in 0..rows {
// First row is Serial version
// write the index
output.push_str(&format!("{:08X} | ", row * 16));
// write the next 16 bytes
let leftover;
let end = {
if array.len() > row * 16 + 16 {
leftover = 0;
row * 16 + 16
} else {
leftover = row * 16 + 16 - array.len();
array.len()
}
};
for i in (row * 16)..end {
if (buf.len() > i && buf[i]!= array[i]) || buf.len() <= i {
output.push_str(&format!("{}{:02X}{} ", COLOR_RED, array[i], COLOR_RESET));
} else {
output.push_str(&format!("{:02X} ", array[i]));
}
}
// for b in &buf[(row * 16)..end] {
// output.push_str(&format!("{:02X} ", b))
// }
for _ in 0..leftover {
output.push_str(" ");
}
output.push_str("| ");
// write the ascii representation
for b in &array[(row * 16)..end] {
if *b > 31u8 && *b < 127u8 {
output.push(*b as char);
} else {
output.push('.');
}
}
output.push('\n');
}
output
}
/// Reads a raw BB message buffer
pub fn read_bb_msg(src: &mut Read) -> io::Result<Vec<u8>> {
let mut hdr = vec![0u8; 8];
debug!("reading header...");
try!(read_exact(src, &mut hdr));
let size = hdr[0] as usize + ((hdr[1] as usize) << 8);
debug!("msg size: {}", size);
let padding = if size % 8 == 0 { 0 } else { 8 - (size % 8) };
let mut body = vec![0u8; size + padding - 8];
debug!("reading body...");
try!(read_exact(src, &mut body));
hdr.append(&mut body);
Ok(hdr)
}
| hex_view_diff | identifier_name |
util.rs | use std::io::{Cursor, Read};
use std::io;
use psoserial::Serial;
use psomsg::util::*;
static COLOR_RED: &'static str = "\x1B[31m";
static COLOR_RESET: &'static str = "\x1B[0m";
/// Creates a 3-column view of the buffer with index, bytes, and ASCII
/// representation.
pub fn hex_view(buf: &[u8]) -> String {
let rows = buf.len() / 16 + { if buf.len() % 16 > 0 {1} else {0} };
let mut output = String::new();
for row in 0..rows {
// write the index
output.push_str(&format!("{:08X} | ", row * 16));
// write the next 16 bytes
let leftover;
let end = {
if buf.len() > row * 16 + 16 {
leftover = 0;
row * 16 + 16
} else {
leftover = row * 16 + 16 - buf.len();
buf.len()
}
};
for b in &buf[(row * 16)..end] {
output.push_str(&format!("{:02X} ", b))
}
for _ in 0..leftover {
output.push_str(" ");
}
output.push_str("| ");
// write the ascii representation
for b in &buf[(row * 16)..end] {
if *b > 31u8 && *b < 127u8 {
output.push(*b as char);
} else {
output.push('.');
}
}
// new line
output.push('\n');
}
output
}
pub fn hex_view_serial<S: Serial>(s: &S) -> String { | hex_view(&array)
}
/// Shows the serialized hex view of the first argument, with different bytes
/// in ANSI escaped red.
pub fn hex_view_diff<S: Serial>(s: &S, buf: &[u8]) -> String {
let mut cursor = Cursor::new(Vec::new());
s.serialize(&mut cursor).unwrap();
let array = cursor.into_inner();
let rows = array.len() / 16 + { if array.len() % 16 > 0 {1} else {0} };
let mut output = String::new();
for row in 0..rows {
// First row is Serial version
// write the index
output.push_str(&format!("{:08X} | ", row * 16));
// write the next 16 bytes
let leftover;
let end = {
if array.len() > row * 16 + 16 {
leftover = 0;
row * 16 + 16
} else {
leftover = row * 16 + 16 - array.len();
array.len()
}
};
for i in (row * 16)..end {
if (buf.len() > i && buf[i]!= array[i]) || buf.len() <= i {
output.push_str(&format!("{}{:02X}{} ", COLOR_RED, array[i], COLOR_RESET));
} else {
output.push_str(&format!("{:02X} ", array[i]));
}
}
// for b in &buf[(row * 16)..end] {
// output.push_str(&format!("{:02X} ", b))
// }
for _ in 0..leftover {
output.push_str(" ");
}
output.push_str("| ");
// write the ascii representation
for b in &array[(row * 16)..end] {
if *b > 31u8 && *b < 127u8 {
output.push(*b as char);
} else {
output.push('.');
}
}
output.push('\n');
}
output
}
/// Reads a raw BB message buffer
pub fn read_bb_msg(src: &mut Read) -> io::Result<Vec<u8>> {
let mut hdr = vec![0u8; 8];
debug!("reading header...");
try!(read_exact(src, &mut hdr));
let size = hdr[0] as usize + ((hdr[1] as usize) << 8);
debug!("msg size: {}", size);
let padding = if size % 8 == 0 { 0 } else { 8 - (size % 8) };
let mut body = vec![0u8; size + padding - 8];
debug!("reading body...");
try!(read_exact(src, &mut body));
hdr.append(&mut body);
Ok(hdr)
} | let mut cursor = Cursor::new(Vec::new());
s.serialize(&mut cursor).unwrap();
let array = cursor.into_inner(); | random_line_split |
last-use-in-block.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #1818
fn lp<T, F>(s: String, mut f: F) -> T where F: FnMut(String) -> T {
while false {
let r = f(s);
return (r);
}
panic!();
}
fn | <T, F>(s: String, mut f: F) -> T where F: FnMut(String) -> T {
fn g<T, F>(s: String, mut f: F) -> T where F: FnMut(String) -> T {f(s)}
g(s, |v| { let r = f(v); r })
}
pub fn main() {}
| apply | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.