file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
members.rs
|
use db::Conn;
use errors::Result;
use models::{Member, MemberChangeset};
use schema::members;
use std::ops::Deref;
use rocket::Route;
use rocket_contrib::Json;
use diesel;
use diesel::prelude::*;
pub fn routes() -> Vec<Route> {
routes![index, update]
}
#[get("/", format = "application/json")]
fn
|
(conn: Conn) -> Result<Json<Vec<Member>>> {
let members = members::dsl::members.load(conn.deref())?;
Ok(Json(members))
}
#[patch("/<id>", format = "application/json", data = "<member_changeset>")]
fn update(id: i32, member_changeset: Json<MemberChangeset>, conn: Conn) -> Result<Json<Member>> {
let member: Member = diesel::update(members::table.find(id))
.set(&member_changeset.into_inner())
.get_result::<Member>(conn.deref())?;
Ok(Json(member))
}
#[cfg(test)]
mod test {
extern crate serde_json;
use db::default_pool;
use models::{NewTeam, Team, NewMember, Member};
use schema::{members, teams};
use web::app;
use diesel::prelude::*;
use diesel;
use rocket::http::{ContentType, Status};
use rocket::local::Client;
use std::ops::Deref;
#[test]
fn test_index() {
let app = app(default_pool());
let client = Client::new(app).unwrap();
let response = client
.get("/members")
.header(ContentType::JSON)
.dispatch();
assert_eq!(response.status(), Status::Ok);
}
#[test]
fn test_update() {
let pool = default_pool();
let app = app(pool.clone());
let conn = pool.get().unwrap();
let new_team = NewTeam::new(5.0);
let team = diesel::insert(&new_team)
.into(teams::table)
.get_result::<Team>(conn.deref())
.unwrap();
let mike = NewMember::new(&team, "Mike", 1, true);
let member = diesel::insert(&mike)
.into(members::table)
.get_result::<Member>(conn.deref())
.unwrap();
let request_body = json!({ "driver": false });
let client = Client::new(app).unwrap();
let mut response = client
.patch(format!("/members/{}", member.id))
.header(ContentType::JSON)
.body(request_body.to_string())
.dispatch();
let body = response.body().unwrap().into_string().unwrap();
let member_response: Member = serde_json::from_str(&body).unwrap();
assert_eq!(response.status(), Status::Ok);
assert_eq!(member_response.driver, false);
}
}
|
index
|
identifier_name
|
members.rs
|
use db::Conn;
use errors::Result;
use models::{Member, MemberChangeset};
use schema::members;
use std::ops::Deref;
use rocket::Route;
use rocket_contrib::Json;
use diesel;
use diesel::prelude::*;
pub fn routes() -> Vec<Route> {
routes![index, update]
}
#[get("/", format = "application/json")]
fn index(conn: Conn) -> Result<Json<Vec<Member>>> {
let members = members::dsl::members.load(conn.deref())?;
Ok(Json(members))
}
#[patch("/<id>", format = "application/json", data = "<member_changeset>")]
fn update(id: i32, member_changeset: Json<MemberChangeset>, conn: Conn) -> Result<Json<Member>> {
let member: Member = diesel::update(members::table.find(id))
.set(&member_changeset.into_inner())
.get_result::<Member>(conn.deref())?;
Ok(Json(member))
}
#[cfg(test)]
mod test {
extern crate serde_json;
use db::default_pool;
use models::{NewTeam, Team, NewMember, Member};
use schema::{members, teams};
use web::app;
use diesel::prelude::*;
use diesel;
use rocket::http::{ContentType, Status};
use rocket::local::Client;
use std::ops::Deref;
#[test]
fn test_index() {
let app = app(default_pool());
let client = Client::new(app).unwrap();
let response = client
.get("/members")
.header(ContentType::JSON)
.dispatch();
assert_eq!(response.status(), Status::Ok);
}
|
#[test]
fn test_update() {
let pool = default_pool();
let app = app(pool.clone());
let conn = pool.get().unwrap();
let new_team = NewTeam::new(5.0);
let team = diesel::insert(&new_team)
.into(teams::table)
.get_result::<Team>(conn.deref())
.unwrap();
let mike = NewMember::new(&team, "Mike", 1, true);
let member = diesel::insert(&mike)
.into(members::table)
.get_result::<Member>(conn.deref())
.unwrap();
let request_body = json!({ "driver": false });
let client = Client::new(app).unwrap();
let mut response = client
.patch(format!("/members/{}", member.id))
.header(ContentType::JSON)
.body(request_body.to_string())
.dispatch();
let body = response.body().unwrap().into_string().unwrap();
let member_response: Member = serde_json::from_str(&body).unwrap();
assert_eq!(response.status(), Status::Ok);
assert_eq!(member_response.driver, false);
}
}
|
random_line_split
|
|
members.rs
|
use db::Conn;
use errors::Result;
use models::{Member, MemberChangeset};
use schema::members;
use std::ops::Deref;
use rocket::Route;
use rocket_contrib::Json;
use diesel;
use diesel::prelude::*;
pub fn routes() -> Vec<Route> {
routes![index, update]
}
#[get("/", format = "application/json")]
fn index(conn: Conn) -> Result<Json<Vec<Member>>> {
let members = members::dsl::members.load(conn.deref())?;
Ok(Json(members))
}
#[patch("/<id>", format = "application/json", data = "<member_changeset>")]
fn update(id: i32, member_changeset: Json<MemberChangeset>, conn: Conn) -> Result<Json<Member>> {
let member: Member = diesel::update(members::table.find(id))
.set(&member_changeset.into_inner())
.get_result::<Member>(conn.deref())?;
Ok(Json(member))
}
#[cfg(test)]
mod test {
extern crate serde_json;
use db::default_pool;
use models::{NewTeam, Team, NewMember, Member};
use schema::{members, teams};
use web::app;
use diesel::prelude::*;
use diesel;
use rocket::http::{ContentType, Status};
use rocket::local::Client;
use std::ops::Deref;
#[test]
fn test_index()
|
#[test]
fn test_update() {
let pool = default_pool();
let app = app(pool.clone());
let conn = pool.get().unwrap();
let new_team = NewTeam::new(5.0);
let team = diesel::insert(&new_team)
.into(teams::table)
.get_result::<Team>(conn.deref())
.unwrap();
let mike = NewMember::new(&team, "Mike", 1, true);
let member = diesel::insert(&mike)
.into(members::table)
.get_result::<Member>(conn.deref())
.unwrap();
let request_body = json!({ "driver": false });
let client = Client::new(app).unwrap();
let mut response = client
.patch(format!("/members/{}", member.id))
.header(ContentType::JSON)
.body(request_body.to_string())
.dispatch();
let body = response.body().unwrap().into_string().unwrap();
let member_response: Member = serde_json::from_str(&body).unwrap();
assert_eq!(response.status(), Status::Ok);
assert_eq!(member_response.driver, false);
}
}
|
{
let app = app(default_pool());
let client = Client::new(app).unwrap();
let response = client
.get("/members")
.header(ContentType::JSON)
.dispatch();
assert_eq!(response.status(), Status::Ok);
}
|
identifier_body
|
flags.rs
|
use std;
use libc::c_int;
/// Rust alternative to Qt's `QFlags` types.
///
/// `Flags<E>` is an OR-combination of integer values of the enum type `E`.
#[derive(Clone, Copy)]
pub struct Flags<E: FlaggableEnum> {
value: c_int,
_phantom_data: std::marker::PhantomData<E>,
}
impl<E: FlaggableEnum> Flags<E> {
/// Converts integer `value` to `Flags`.
pub fn from_int(value: c_int) -> Self {
Flags {
value: value,
_phantom_data: std::marker::PhantomData,
}
}
/// Converts `value` to `Flags` containing that single value.
pub fn from_enum(value: E) -> Self {
Self::from_int(value.to_flag_value())
}
/// Converts `Flags` to integer.
pub fn to_int(self) -> c_int {
self.value
}
/// Returns `true` if `flag` is enabled in `self`.
pub fn test_flag(self, flag: E) -> bool {
self.value & flag.to_flag_value()!= 0
}
/// Returns `true` if this value has no flags enabled.
pub fn is_empty(self) -> bool {
self.value == 0
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitOr<T> for Flags<E> {
type Output = Flags<E>;
fn bitor(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value |= rhs.to_flags().to_int();
r
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitAnd<T> for Flags<E> {
type Output = Flags<E>;
fn bitand(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value &= rhs.to_flags().to_int();
r
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitXor<T> for Flags<E> {
type Output = Flags<E>;
fn bitxor(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value ^= rhs.to_flags().to_int();
r
}
}
/// Enum type with values suitable for constructing OR-combinations for `Flags`.
pub trait FlaggableEnum: Sized + Clone {
/// Returns integer value of this enum variant.
fn to_flag_value(self) -> c_int;
/// Returns name of the type for debug output.
fn enum_name() -> &'static str;
}
/// Trait representing types that can be converted to `Flags`.
pub trait EnumOrFlags<T: FlaggableEnum> {
fn to_flags(self) -> Flags<T>;
}
// TODO: use Into and From traits instead
impl<T: FlaggableEnum> EnumOrFlags<T> for Flags<T>
where T: Clone
{
fn to_flags(self) -> Flags<T> {
self.clone()
}
}
impl<T: FlaggableEnum> std::fmt::Debug for Flags<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result
|
}
impl<T: FlaggableEnum> Default for Flags<T> {
fn default() -> Self {
Flags {
value: 0,
_phantom_data: std::marker::PhantomData,
}
}
}
|
{
write!(f, "Flags<{}>({})", T::enum_name(), self.value)
}
|
identifier_body
|
flags.rs
|
use std;
use libc::c_int;
/// Rust alternative to Qt's `QFlags` types.
///
/// `Flags<E>` is an OR-combination of integer values of the enum type `E`.
#[derive(Clone, Copy)]
pub struct Flags<E: FlaggableEnum> {
value: c_int,
_phantom_data: std::marker::PhantomData<E>,
}
impl<E: FlaggableEnum> Flags<E> {
/// Converts integer `value` to `Flags`.
pub fn from_int(value: c_int) -> Self {
Flags {
value: value,
_phantom_data: std::marker::PhantomData,
}
}
/// Converts `value` to `Flags` containing that single value.
pub fn
|
(value: E) -> Self {
Self::from_int(value.to_flag_value())
}
/// Converts `Flags` to integer.
pub fn to_int(self) -> c_int {
self.value
}
/// Returns `true` if `flag` is enabled in `self`.
pub fn test_flag(self, flag: E) -> bool {
self.value & flag.to_flag_value()!= 0
}
/// Returns `true` if this value has no flags enabled.
pub fn is_empty(self) -> bool {
self.value == 0
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitOr<T> for Flags<E> {
type Output = Flags<E>;
fn bitor(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value |= rhs.to_flags().to_int();
r
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitAnd<T> for Flags<E> {
type Output = Flags<E>;
fn bitand(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value &= rhs.to_flags().to_int();
r
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitXor<T> for Flags<E> {
type Output = Flags<E>;
fn bitxor(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value ^= rhs.to_flags().to_int();
r
}
}
/// Enum type with values suitable for constructing OR-combinations for `Flags`.
pub trait FlaggableEnum: Sized + Clone {
/// Returns integer value of this enum variant.
fn to_flag_value(self) -> c_int;
/// Returns name of the type for debug output.
fn enum_name() -> &'static str;
}
/// Trait representing types that can be converted to `Flags`.
pub trait EnumOrFlags<T: FlaggableEnum> {
fn to_flags(self) -> Flags<T>;
}
// TODO: use Into and From traits instead
impl<T: FlaggableEnum> EnumOrFlags<T> for Flags<T>
where T: Clone
{
fn to_flags(self) -> Flags<T> {
self.clone()
}
}
impl<T: FlaggableEnum> std::fmt::Debug for Flags<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Flags<{}>({})", T::enum_name(), self.value)
}
}
impl<T: FlaggableEnum> Default for Flags<T> {
fn default() -> Self {
Flags {
value: 0,
_phantom_data: std::marker::PhantomData,
}
}
}
|
from_enum
|
identifier_name
|
flags.rs
|
use std;
use libc::c_int;
/// Rust alternative to Qt's `QFlags` types.
///
/// `Flags<E>` is an OR-combination of integer values of the enum type `E`.
#[derive(Clone, Copy)]
pub struct Flags<E: FlaggableEnum> {
value: c_int,
_phantom_data: std::marker::PhantomData<E>,
}
impl<E: FlaggableEnum> Flags<E> {
/// Converts integer `value` to `Flags`.
pub fn from_int(value: c_int) -> Self {
Flags {
value: value,
_phantom_data: std::marker::PhantomData,
}
}
/// Converts `value` to `Flags` containing that single value.
pub fn from_enum(value: E) -> Self {
Self::from_int(value.to_flag_value())
}
/// Converts `Flags` to integer.
pub fn to_int(self) -> c_int {
self.value
}
/// Returns `true` if `flag` is enabled in `self`.
pub fn test_flag(self, flag: E) -> bool {
self.value & flag.to_flag_value()!= 0
}
/// Returns `true` if this value has no flags enabled.
pub fn is_empty(self) -> bool {
self.value == 0
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitOr<T> for Flags<E> {
type Output = Flags<E>;
fn bitor(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value |= rhs.to_flags().to_int();
r
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitAnd<T> for Flags<E> {
type Output = Flags<E>;
fn bitand(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value &= rhs.to_flags().to_int();
r
}
}
impl<E: FlaggableEnum, T: EnumOrFlags<E>> std::ops::BitXor<T> for Flags<E> {
type Output = Flags<E>;
fn bitxor(self, rhs: T) -> Flags<E> {
let mut r = self.clone();
r.value ^= rhs.to_flags().to_int();
r
}
}
/// Enum type with values suitable for constructing OR-combinations for `Flags`.
pub trait FlaggableEnum: Sized + Clone {
/// Returns integer value of this enum variant.
fn to_flag_value(self) -> c_int;
/// Returns name of the type for debug output.
fn enum_name() -> &'static str;
}
|
/// Trait representing types that can be converted to `Flags`.
pub trait EnumOrFlags<T: FlaggableEnum> {
fn to_flags(self) -> Flags<T>;
}
// TODO: use Into and From traits instead
impl<T: FlaggableEnum> EnumOrFlags<T> for Flags<T>
where T: Clone
{
fn to_flags(self) -> Flags<T> {
self.clone()
}
}
impl<T: FlaggableEnum> std::fmt::Debug for Flags<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Flags<{}>({})", T::enum_name(), self.value)
}
}
impl<T: FlaggableEnum> Default for Flags<T> {
fn default() -> Self {
Flags {
value: 0,
_phantom_data: std::marker::PhantomData,
}
}
}
|
random_line_split
|
|
scopes.rs
|
use {ast, typeinf, util};
use core::{Src, CompletionType, Session};
#[cfg(test)] use core;
use std::iter::Iterator;
use std::path::Path;
use std::str::from_utf8;
use util::char_at;
fn find_close<'a, A>(iter: A, open: u8, close: u8, level_end: u32) -> Option<usize> where A: Iterator<Item=&'a u8> {
let mut levels = 0u32;
for (count, &b) in iter.enumerate() {
if b == close {
if levels == level_end { return Some(count); }
levels -= 1;
} else if b == open { levels += 1; }
}
None
}
pub fn find_closing_paren(src: &str, pos: usize) -> usize {
find_close(src.as_bytes()[pos..].iter(), b'(', b')', 0)
.map_or(src.len(), |count| pos + count)
}
pub fn scope_start(src: Src, point: usize) -> usize {
let masked_src = mask_comments(src.to(point));
find_close(masked_src.as_bytes().iter().rev(), b'}', b'{', 0)
.map_or(0, |count| point - count)
}
pub fn find_stmt_start(msrc: Src, point: usize) -> Option<usize> {
// iterate the scope to find the start of the statement
let scopestart = scope_start(msrc, point);
msrc.from(scopestart).iter_stmts()
.find(|&(_, end)| scopestart + end > point)
.map(|(start, _)| scopestart + start)
}
pub fn get_local_module_path(msrc: Src, point: usize) -> Vec<String> {
let mut v = Vec::new();
get_local_module_path_(msrc, point, &mut v);
v
}
fn get_local_module_path_(msrc: Src, point: usize, out: &mut Vec<String>) {
for (start, end) in msrc.iter_stmts() {
if start < point && end > point {
let blob = msrc.from_to(start, end);
if blob.starts_with("pub mod ") || blob.starts_with("mod ") {
let p = typeinf::generate_skeleton_for_parsing(&blob);
ast::parse_mod(p).name.map(|name| {
out.push(name);
let newstart = blob.find("{").unwrap() + 1;
get_local_module_path_(blob.from(newstart),
point - start - newstart, out);
});
}
}
}
}
pub fn find_impl_start(msrc: Src, point: usize, scopestart: usize) -> Option<usize> {
let len = point-scopestart;
match msrc.from(scopestart).iter_stmts().find(|&(_, end)| end > len) {
Some((start, _)) => {
let blob = msrc.from(scopestart + start);
// TODO:: the following is a bit weak at matching traits. make this better
if blob.starts_with("impl") || blob.starts_with("trait") || blob.starts_with("pub trait") {
Some(scopestart + start)
} else {
let newstart = blob.find("{").unwrap() + 1;
find_impl_start(msrc, point, scopestart+start+newstart)
}
},
None => None
}
}
#[test]
fn finds_subnested_module() {
use core;
let src = "
pub mod foo {
pub mod bar {
here
}
}";
let point = coords_to_point(&src, 4, 12);
let src = core::new_source(String::from(src));
let v = get_local_module_path(src.as_ref(), point);
assert_eq!("foo", &v[0][..]);
assert_eq!("bar", &v[1][..]);
let point = coords_to_point(&src, 3, 8);
let v = get_local_module_path(src.as_ref(), point);
assert_eq!("foo", &v[0][..]);
}
pub fn split_into_context_and_completion(s: &str) -> (&str, &str, CompletionType) {
match s.char_indices().rev().find(|&(_, c)|!util::is_ident_char(c)) {
Some((i,c)) => {
//println!("PHIL s '{}' i {} c '{}'",s,i,c);
match c {
'.' => (&s[..i], &s[(i+1)..], CompletionType::CompleteField),
':' if s.len() > 1 => (&s[..(i-1)], &s[(i+1)..], CompletionType::CompletePath),
_ => (&s[..(i+1)], &s[(i+1)..], CompletionType::CompletePath)
}
},
None => ("", s, CompletionType::CompletePath)
}
}
pub fn get_start_of_search_expr(src: &str, point: usize) -> usize {
let mut i = point;
let mut levels = 0u32;
for &b in src.as_bytes()[..point].iter().rev() {
i -= 1;
match b {
b'(' => {
if levels == 0 { return i+1; }
levels -= 1;
},
b')' => { levels += 1; },
_ => {
if levels == 0 &&
!util::is_search_expr_char(char_at(src, i)) ||
util::is_double_dot(src,i) {
return i+1;
}
}
}
}
0
}
pub fn get_start_of_pattern(src: &str, point: usize) -> usize {
let mut i = point-1;
let mut levels = 0u32;
for &b in src.as_bytes()[..point].iter().rev() {
match b {
b'(' => {
if levels == 0 { return i+1; }
levels -= 1;
},
b')' => { levels += 1; },
_ => {
if levels == 0 &&
!util::is_pattern_char(char_at(src, i)) {
return i+1;
}
}
}
i -= 1;
}
0
}
#[test]
fn get_start_of_pattern_handles_variant() {
|
assert_eq!(4, get_start_of_pattern("bla, ast::PatTup(ref tuple_elements) => {",36));
}
pub fn expand_search_expr(msrc: &str, point: usize) -> (usize, usize) {
let start = get_start_of_search_expr(msrc, point);
(start, util::find_ident_end(msrc, point))
}
#[test]
fn expand_search_expr_finds_ident() {
assert_eq!((0, 7), expand_search_expr("foo.bar", 5))
}
#[test]
fn expand_search_expr_handles_chained_calls() {
assert_eq!((0, 20), expand_search_expr("yeah::blah.foo().bar", 18))
}
#[test]
fn expand_search_expr_handles_inline_closures() {
assert_eq!((0, 24), expand_search_expr("yeah::blah.foo(||{}).bar", 22))
}
#[test]
fn expand_search_expr_handles_a_function_arg() {
assert_eq!((5, 25), expand_search_expr("myfn(foo::new().baz().com)", 23))
}
#[test]
fn expand_search_expr_handles_macros() {
assert_eq!((0, 9), expand_search_expr("my_macro!()", 9))
}
#[test]
fn expand_search_expr_handles_pos_at_end_of_search_str() {
assert_eq!((0, 7), expand_search_expr("foo.bar", 7))
}
pub fn mask_comments(src: Src) -> String {
let mut result = String::with_capacity(src.len());
let buf_byte = &[b' '; 128];
let buffer = from_utf8(buf_byte).unwrap();
let mut prev: usize = 0;
for (start, end) in src.chunk_indices() {
for _ in 0..((start-prev)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((start-prev)%128)]);
result.push_str(&src[start..end]);
prev = end;
}
result
}
pub fn mask_sub_scopes(src: &str) -> String {
let mut result = String::with_capacity(src.len());
let buf_byte = [b' '; 128];
let buffer = from_utf8(&buf_byte).unwrap();
let mut levels = 0i32;
let mut start = 0usize;
let mut pos = 0usize;
for &b in src.as_bytes() {
pos += 1;
match b {
b'{' => {
if levels == 0 {
result.push_str(&src[start..(pos)]);
start = pos+1;
}
levels += 1;
},
b'}' => {
if levels == 1 {
let num_spaces = pos-start;
for _ in 0..(num_spaces/128) { result.push_str(buffer); }
result.push_str(&buffer[..((num_spaces)%128)]);
result.push_str("}");
start = pos;
}
levels -= 1;
},
b'\n' if levels > 0 => {
for _ in 0..((pos-start)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((pos-start)%128)]);
result.push('\n');
start = pos+1;
},
_ => {}
}
}
if start > pos {
start = pos;
}
if levels > 0 {
for _ in 0..((pos - start)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((pos-start)%128)]);
} else {
result.push_str(&src[start..pos]);
}
result
}
pub fn end_of_next_scope(src: &str) -> &str {
match find_close(src.as_bytes().iter(), b'{', b'}', 1) {
Some(count) => &src[..count+1],
None => ""
}
}
pub fn coords_to_point(src: &str, mut linenum: usize, col: usize) -> usize {
let mut point = 0;
for line in src.split('\n') {
linenum -= 1;
if linenum == 0 { break }
point += line.len() + 1; // +1 for the \n
}
point + col
}
pub fn point_to_coords(src: &str, point: usize) -> (usize, usize) {
let mut linestart = 0;
let mut nlines = 1; // lines start at 1
for (i, &b) in src[..point].as_bytes().iter().enumerate() {
if b == b'\n' {
nlines += 1;
linestart = i+1;
}
}
(nlines, point - linestart)
}
pub fn point_to_coords_from_file(path: &Path, point: usize, session: &Session) -> Option<(usize, usize)> {
let mut p = 0;
for (lineno, line) in session.load_file(path).split('\n').enumerate() {
if point < (p + line.len()) {
return Some((lineno+1, point - p));
}
p += line.len() + 1; // +1 for the newline char
}
None
}
#[test]
fn coords_to_point_works() {
let src = "
fn myfn() {
let a = 3;
print(a);
}";
assert!(coords_to_point(src, 3, 5) == 18);
}
#[test]
fn test_scope_start() {
let src = String::from("
fn myfn() {
let a = 3;
print(a);
}
");
let src = core::new_source(src);
let point = coords_to_point(&src, 4, 10);
let start = scope_start(src.as_ref(), point);
assert!(start == 12);
}
#[test]
fn test_scope_start_handles_sub_scopes() {
let src = String::from("
fn myfn() {
let a = 3;
{
let b = 4;
}
print(a);
}
");
let src = core::new_source(src);
let point = coords_to_point(&src, 7, 10);
let start = scope_start(src.as_ref(), point);
assert!(start == 12);
}
#[test]
fn masks_out_comments() {
let src = String::from("
this is some code
this is a line // with a comment
some more
");
let src = core::new_source(src);
let r = mask_comments(src.as_ref());
assert!(src.len() == r.len());
// characters at the start are the same
assert!(src.as_bytes()[5] == r.as_bytes()[5]);
// characters in the comments are masked
let commentoffset = coords_to_point(&src,3,23);
assert!(char_at(&r, commentoffset) =='');
assert!(src.as_bytes()[commentoffset]!= r.as_bytes()[commentoffset]);
// characters afterwards are the same
assert!(src.as_bytes()[src.len()-3] == r.as_bytes()[src.len()-3]);
}
#[test]
fn test_point_to_coords() {
let src = "
fn myfn(b:usize) {
let a = 3;
if b == 12 {
let a = 24;
do_something_with(a);
}
do_something_with(a);
}
";
round_trip_point_and_coords(src, 4, 5);
}
pub fn round_trip_point_and_coords(src: &str, lineno: usize, charno: usize) {
let (a,b) = point_to_coords(src, coords_to_point(src, lineno, charno));
assert_eq!((a,b), (lineno,charno));
}
#[test]
fn finds_end_of_struct_scope() {
let src="
struct foo {
a: usize,
blah: ~str
}
Some other junk";
let expected="
struct foo {
a: usize,
blah: ~str
}";
let s = end_of_next_scope(src);
assert_eq!(expected, s);
}
|
assert_eq!(4, get_start_of_pattern("foo, Some(a) =>",13));
}
#[test]
fn get_start_of_pattern_handles_variant2() {
|
random_line_split
|
scopes.rs
|
use {ast, typeinf, util};
use core::{Src, CompletionType, Session};
#[cfg(test)] use core;
use std::iter::Iterator;
use std::path::Path;
use std::str::from_utf8;
use util::char_at;
fn find_close<'a, A>(iter: A, open: u8, close: u8, level_end: u32) -> Option<usize> where A: Iterator<Item=&'a u8> {
let mut levels = 0u32;
for (count, &b) in iter.enumerate() {
if b == close {
if levels == level_end { return Some(count); }
levels -= 1;
} else if b == open { levels += 1; }
}
None
}
pub fn find_closing_paren(src: &str, pos: usize) -> usize {
find_close(src.as_bytes()[pos..].iter(), b'(', b')', 0)
.map_or(src.len(), |count| pos + count)
}
pub fn scope_start(src: Src, point: usize) -> usize {
let masked_src = mask_comments(src.to(point));
find_close(masked_src.as_bytes().iter().rev(), b'}', b'{', 0)
.map_or(0, |count| point - count)
}
pub fn find_stmt_start(msrc: Src, point: usize) -> Option<usize>
|
pub fn get_local_module_path(msrc: Src, point: usize) -> Vec<String> {
let mut v = Vec::new();
get_local_module_path_(msrc, point, &mut v);
v
}
fn get_local_module_path_(msrc: Src, point: usize, out: &mut Vec<String>) {
for (start, end) in msrc.iter_stmts() {
if start < point && end > point {
let blob = msrc.from_to(start, end);
if blob.starts_with("pub mod ") || blob.starts_with("mod ") {
let p = typeinf::generate_skeleton_for_parsing(&blob);
ast::parse_mod(p).name.map(|name| {
out.push(name);
let newstart = blob.find("{").unwrap() + 1;
get_local_module_path_(blob.from(newstart),
point - start - newstart, out);
});
}
}
}
}
pub fn find_impl_start(msrc: Src, point: usize, scopestart: usize) -> Option<usize> {
let len = point-scopestart;
match msrc.from(scopestart).iter_stmts().find(|&(_, end)| end > len) {
Some((start, _)) => {
let blob = msrc.from(scopestart + start);
// TODO:: the following is a bit weak at matching traits. make this better
if blob.starts_with("impl") || blob.starts_with("trait") || blob.starts_with("pub trait") {
Some(scopestart + start)
} else {
let newstart = blob.find("{").unwrap() + 1;
find_impl_start(msrc, point, scopestart+start+newstart)
}
},
None => None
}
}
#[test]
fn finds_subnested_module() {
use core;
let src = "
pub mod foo {
pub mod bar {
here
}
}";
let point = coords_to_point(&src, 4, 12);
let src = core::new_source(String::from(src));
let v = get_local_module_path(src.as_ref(), point);
assert_eq!("foo", &v[0][..]);
assert_eq!("bar", &v[1][..]);
let point = coords_to_point(&src, 3, 8);
let v = get_local_module_path(src.as_ref(), point);
assert_eq!("foo", &v[0][..]);
}
pub fn split_into_context_and_completion(s: &str) -> (&str, &str, CompletionType) {
match s.char_indices().rev().find(|&(_, c)|!util::is_ident_char(c)) {
Some((i,c)) => {
//println!("PHIL s '{}' i {} c '{}'",s,i,c);
match c {
'.' => (&s[..i], &s[(i+1)..], CompletionType::CompleteField),
':' if s.len() > 1 => (&s[..(i-1)], &s[(i+1)..], CompletionType::CompletePath),
_ => (&s[..(i+1)], &s[(i+1)..], CompletionType::CompletePath)
}
},
None => ("", s, CompletionType::CompletePath)
}
}
pub fn get_start_of_search_expr(src: &str, point: usize) -> usize {
let mut i = point;
let mut levels = 0u32;
for &b in src.as_bytes()[..point].iter().rev() {
i -= 1;
match b {
b'(' => {
if levels == 0 { return i+1; }
levels -= 1;
},
b')' => { levels += 1; },
_ => {
if levels == 0 &&
!util::is_search_expr_char(char_at(src, i)) ||
util::is_double_dot(src,i) {
return i+1;
}
}
}
}
0
}
pub fn get_start_of_pattern(src: &str, point: usize) -> usize {
let mut i = point-1;
let mut levels = 0u32;
for &b in src.as_bytes()[..point].iter().rev() {
match b {
b'(' => {
if levels == 0 { return i+1; }
levels -= 1;
},
b')' => { levels += 1; },
_ => {
if levels == 0 &&
!util::is_pattern_char(char_at(src, i)) {
return i+1;
}
}
}
i -= 1;
}
0
}
#[test]
fn get_start_of_pattern_handles_variant() {
assert_eq!(4, get_start_of_pattern("foo, Some(a) =>",13));
}
#[test]
fn get_start_of_pattern_handles_variant2() {
assert_eq!(4, get_start_of_pattern("bla, ast::PatTup(ref tuple_elements) => {",36));
}
pub fn expand_search_expr(msrc: &str, point: usize) -> (usize, usize) {
let start = get_start_of_search_expr(msrc, point);
(start, util::find_ident_end(msrc, point))
}
#[test]
fn expand_search_expr_finds_ident() {
assert_eq!((0, 7), expand_search_expr("foo.bar", 5))
}
#[test]
fn expand_search_expr_handles_chained_calls() {
assert_eq!((0, 20), expand_search_expr("yeah::blah.foo().bar", 18))
}
#[test]
fn expand_search_expr_handles_inline_closures() {
assert_eq!((0, 24), expand_search_expr("yeah::blah.foo(||{}).bar", 22))
}
#[test]
fn expand_search_expr_handles_a_function_arg() {
assert_eq!((5, 25), expand_search_expr("myfn(foo::new().baz().com)", 23))
}
#[test]
fn expand_search_expr_handles_macros() {
assert_eq!((0, 9), expand_search_expr("my_macro!()", 9))
}
#[test]
fn expand_search_expr_handles_pos_at_end_of_search_str() {
assert_eq!((0, 7), expand_search_expr("foo.bar", 7))
}
pub fn mask_comments(src: Src) -> String {
let mut result = String::with_capacity(src.len());
let buf_byte = &[b' '; 128];
let buffer = from_utf8(buf_byte).unwrap();
let mut prev: usize = 0;
for (start, end) in src.chunk_indices() {
for _ in 0..((start-prev)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((start-prev)%128)]);
result.push_str(&src[start..end]);
prev = end;
}
result
}
pub fn mask_sub_scopes(src: &str) -> String {
let mut result = String::with_capacity(src.len());
let buf_byte = [b' '; 128];
let buffer = from_utf8(&buf_byte).unwrap();
let mut levels = 0i32;
let mut start = 0usize;
let mut pos = 0usize;
for &b in src.as_bytes() {
pos += 1;
match b {
b'{' => {
if levels == 0 {
result.push_str(&src[start..(pos)]);
start = pos+1;
}
levels += 1;
},
b'}' => {
if levels == 1 {
let num_spaces = pos-start;
for _ in 0..(num_spaces/128) { result.push_str(buffer); }
result.push_str(&buffer[..((num_spaces)%128)]);
result.push_str("}");
start = pos;
}
levels -= 1;
},
b'\n' if levels > 0 => {
for _ in 0..((pos-start)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((pos-start)%128)]);
result.push('\n');
start = pos+1;
},
_ => {}
}
}
if start > pos {
start = pos;
}
if levels > 0 {
for _ in 0..((pos - start)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((pos-start)%128)]);
} else {
result.push_str(&src[start..pos]);
}
result
}
pub fn end_of_next_scope(src: &str) -> &str {
match find_close(src.as_bytes().iter(), b'{', b'}', 1) {
Some(count) => &src[..count+1],
None => ""
}
}
pub fn coords_to_point(src: &str, mut linenum: usize, col: usize) -> usize {
let mut point = 0;
for line in src.split('\n') {
linenum -= 1;
if linenum == 0 { break }
point += line.len() + 1; // +1 for the \n
}
point + col
}
pub fn point_to_coords(src: &str, point: usize) -> (usize, usize) {
let mut linestart = 0;
let mut nlines = 1; // lines start at 1
for (i, &b) in src[..point].as_bytes().iter().enumerate() {
if b == b'\n' {
nlines += 1;
linestart = i+1;
}
}
(nlines, point - linestart)
}
pub fn point_to_coords_from_file(path: &Path, point: usize, session: &Session) -> Option<(usize, usize)> {
let mut p = 0;
for (lineno, line) in session.load_file(path).split('\n').enumerate() {
if point < (p + line.len()) {
return Some((lineno+1, point - p));
}
p += line.len() + 1; // +1 for the newline char
}
None
}
#[test]
fn coords_to_point_works() {
let src = "
fn myfn() {
let a = 3;
print(a);
}";
assert!(coords_to_point(src, 3, 5) == 18);
}
#[test]
fn test_scope_start() {
let src = String::from("
fn myfn() {
let a = 3;
print(a);
}
");
let src = core::new_source(src);
let point = coords_to_point(&src, 4, 10);
let start = scope_start(src.as_ref(), point);
assert!(start == 12);
}
#[test]
fn test_scope_start_handles_sub_scopes() {
let src = String::from("
fn myfn() {
let a = 3;
{
let b = 4;
}
print(a);
}
");
let src = core::new_source(src);
let point = coords_to_point(&src, 7, 10);
let start = scope_start(src.as_ref(), point);
assert!(start == 12);
}
#[test]
fn masks_out_comments() {
let src = String::from("
this is some code
this is a line // with a comment
some more
");
let src = core::new_source(src);
let r = mask_comments(src.as_ref());
assert!(src.len() == r.len());
// characters at the start are the same
assert!(src.as_bytes()[5] == r.as_bytes()[5]);
// characters in the comments are masked
let commentoffset = coords_to_point(&src,3,23);
assert!(char_at(&r, commentoffset) =='');
assert!(src.as_bytes()[commentoffset]!= r.as_bytes()[commentoffset]);
// characters afterwards are the same
assert!(src.as_bytes()[src.len()-3] == r.as_bytes()[src.len()-3]);
}
#[test]
fn test_point_to_coords() {
let src = "
fn myfn(b:usize) {
let a = 3;
if b == 12 {
let a = 24;
do_something_with(a);
}
do_something_with(a);
}
";
round_trip_point_and_coords(src, 4, 5);
}
pub fn round_trip_point_and_coords(src: &str, lineno: usize, charno: usize) {
let (a,b) = point_to_coords(src, coords_to_point(src, lineno, charno));
assert_eq!((a,b), (lineno,charno));
}
#[test]
fn finds_end_of_struct_scope() {
let src="
struct foo {
a: usize,
blah: ~str
}
Some other junk";
let expected="
struct foo {
a: usize,
blah: ~str
}";
let s = end_of_next_scope(src);
assert_eq!(expected, s);
}
|
{
// iterate the scope to find the start of the statement
let scopestart = scope_start(msrc, point);
msrc.from(scopestart).iter_stmts()
.find(|&(_, end)| scopestart + end > point)
.map(|(start, _)| scopestart + start)
}
|
identifier_body
|
scopes.rs
|
use {ast, typeinf, util};
use core::{Src, CompletionType, Session};
#[cfg(test)] use core;
use std::iter::Iterator;
use std::path::Path;
use std::str::from_utf8;
use util::char_at;
fn find_close<'a, A>(iter: A, open: u8, close: u8, level_end: u32) -> Option<usize> where A: Iterator<Item=&'a u8> {
let mut levels = 0u32;
for (count, &b) in iter.enumerate() {
if b == close {
if levels == level_end { return Some(count); }
levels -= 1;
} else if b == open { levels += 1; }
}
None
}
pub fn find_closing_paren(src: &str, pos: usize) -> usize {
find_close(src.as_bytes()[pos..].iter(), b'(', b')', 0)
.map_or(src.len(), |count| pos + count)
}
pub fn scope_start(src: Src, point: usize) -> usize {
let masked_src = mask_comments(src.to(point));
find_close(masked_src.as_bytes().iter().rev(), b'}', b'{', 0)
.map_or(0, |count| point - count)
}
pub fn find_stmt_start(msrc: Src, point: usize) -> Option<usize> {
// iterate the scope to find the start of the statement
let scopestart = scope_start(msrc, point);
msrc.from(scopestart).iter_stmts()
.find(|&(_, end)| scopestart + end > point)
.map(|(start, _)| scopestart + start)
}
pub fn get_local_module_path(msrc: Src, point: usize) -> Vec<String> {
let mut v = Vec::new();
get_local_module_path_(msrc, point, &mut v);
v
}
fn get_local_module_path_(msrc: Src, point: usize, out: &mut Vec<String>) {
for (start, end) in msrc.iter_stmts() {
if start < point && end > point {
let blob = msrc.from_to(start, end);
if blob.starts_with("pub mod ") || blob.starts_with("mod ") {
let p = typeinf::generate_skeleton_for_parsing(&blob);
ast::parse_mod(p).name.map(|name| {
out.push(name);
let newstart = blob.find("{").unwrap() + 1;
get_local_module_path_(blob.from(newstart),
point - start - newstart, out);
});
}
}
}
}
pub fn find_impl_start(msrc: Src, point: usize, scopestart: usize) -> Option<usize> {
let len = point-scopestart;
match msrc.from(scopestart).iter_stmts().find(|&(_, end)| end > len) {
Some((start, _)) => {
let blob = msrc.from(scopestart + start);
// TODO:: the following is a bit weak at matching traits. make this better
if blob.starts_with("impl") || blob.starts_with("trait") || blob.starts_with("pub trait") {
Some(scopestart + start)
} else {
let newstart = blob.find("{").unwrap() + 1;
find_impl_start(msrc, point, scopestart+start+newstart)
}
},
None => None
}
}
#[test]
fn finds_subnested_module() {
use core;
let src = "
pub mod foo {
pub mod bar {
here
}
}";
let point = coords_to_point(&src, 4, 12);
let src = core::new_source(String::from(src));
let v = get_local_module_path(src.as_ref(), point);
assert_eq!("foo", &v[0][..]);
assert_eq!("bar", &v[1][..]);
let point = coords_to_point(&src, 3, 8);
let v = get_local_module_path(src.as_ref(), point);
assert_eq!("foo", &v[0][..]);
}
pub fn split_into_context_and_completion(s: &str) -> (&str, &str, CompletionType) {
match s.char_indices().rev().find(|&(_, c)|!util::is_ident_char(c)) {
Some((i,c)) => {
//println!("PHIL s '{}' i {} c '{}'",s,i,c);
match c {
'.' => (&s[..i], &s[(i+1)..], CompletionType::CompleteField),
':' if s.len() > 1 => (&s[..(i-1)], &s[(i+1)..], CompletionType::CompletePath),
_ => (&s[..(i+1)], &s[(i+1)..], CompletionType::CompletePath)
}
},
None => ("", s, CompletionType::CompletePath)
}
}
pub fn get_start_of_search_expr(src: &str, point: usize) -> usize {
let mut i = point;
let mut levels = 0u32;
for &b in src.as_bytes()[..point].iter().rev() {
i -= 1;
match b {
b'(' => {
if levels == 0 { return i+1; }
levels -= 1;
},
b')' => { levels += 1; },
_ => {
if levels == 0 &&
!util::is_search_expr_char(char_at(src, i)) ||
util::is_double_dot(src,i) {
return i+1;
}
}
}
}
0
}
pub fn get_start_of_pattern(src: &str, point: usize) -> usize {
let mut i = point-1;
let mut levels = 0u32;
for &b in src.as_bytes()[..point].iter().rev() {
match b {
b'(' => {
if levels == 0 { return i+1; }
levels -= 1;
},
b')' => { levels += 1; },
_ => {
if levels == 0 &&
!util::is_pattern_char(char_at(src, i)) {
return i+1;
}
}
}
i -= 1;
}
0
}
#[test]
fn get_start_of_pattern_handles_variant() {
assert_eq!(4, get_start_of_pattern("foo, Some(a) =>",13));
}
#[test]
fn get_start_of_pattern_handles_variant2() {
assert_eq!(4, get_start_of_pattern("bla, ast::PatTup(ref tuple_elements) => {",36));
}
pub fn expand_search_expr(msrc: &str, point: usize) -> (usize, usize) {
let start = get_start_of_search_expr(msrc, point);
(start, util::find_ident_end(msrc, point))
}
#[test]
fn expand_search_expr_finds_ident() {
assert_eq!((0, 7), expand_search_expr("foo.bar", 5))
}
#[test]
fn
|
() {
assert_eq!((0, 20), expand_search_expr("yeah::blah.foo().bar", 18))
}
#[test]
fn expand_search_expr_handles_inline_closures() {
assert_eq!((0, 24), expand_search_expr("yeah::blah.foo(||{}).bar", 22))
}
#[test]
fn expand_search_expr_handles_a_function_arg() {
assert_eq!((5, 25), expand_search_expr("myfn(foo::new().baz().com)", 23))
}
#[test]
fn expand_search_expr_handles_macros() {
assert_eq!((0, 9), expand_search_expr("my_macro!()", 9))
}
#[test]
fn expand_search_expr_handles_pos_at_end_of_search_str() {
assert_eq!((0, 7), expand_search_expr("foo.bar", 7))
}
pub fn mask_comments(src: Src) -> String {
let mut result = String::with_capacity(src.len());
let buf_byte = &[b' '; 128];
let buffer = from_utf8(buf_byte).unwrap();
let mut prev: usize = 0;
for (start, end) in src.chunk_indices() {
for _ in 0..((start-prev)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((start-prev)%128)]);
result.push_str(&src[start..end]);
prev = end;
}
result
}
pub fn mask_sub_scopes(src: &str) -> String {
let mut result = String::with_capacity(src.len());
let buf_byte = [b' '; 128];
let buffer = from_utf8(&buf_byte).unwrap();
let mut levels = 0i32;
let mut start = 0usize;
let mut pos = 0usize;
for &b in src.as_bytes() {
pos += 1;
match b {
b'{' => {
if levels == 0 {
result.push_str(&src[start..(pos)]);
start = pos+1;
}
levels += 1;
},
b'}' => {
if levels == 1 {
let num_spaces = pos-start;
for _ in 0..(num_spaces/128) { result.push_str(buffer); }
result.push_str(&buffer[..((num_spaces)%128)]);
result.push_str("}");
start = pos;
}
levels -= 1;
},
b'\n' if levels > 0 => {
for _ in 0..((pos-start)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((pos-start)%128)]);
result.push('\n');
start = pos+1;
},
_ => {}
}
}
if start > pos {
start = pos;
}
if levels > 0 {
for _ in 0..((pos - start)/128) { result.push_str(buffer); }
result.push_str(&buffer[..((pos-start)%128)]);
} else {
result.push_str(&src[start..pos]);
}
result
}
pub fn end_of_next_scope(src: &str) -> &str {
match find_close(src.as_bytes().iter(), b'{', b'}', 1) {
Some(count) => &src[..count+1],
None => ""
}
}
pub fn coords_to_point(src: &str, mut linenum: usize, col: usize) -> usize {
let mut point = 0;
for line in src.split('\n') {
linenum -= 1;
if linenum == 0 { break }
point += line.len() + 1; // +1 for the \n
}
point + col
}
pub fn point_to_coords(src: &str, point: usize) -> (usize, usize) {
let mut linestart = 0;
let mut nlines = 1; // lines start at 1
for (i, &b) in src[..point].as_bytes().iter().enumerate() {
if b == b'\n' {
nlines += 1;
linestart = i+1;
}
}
(nlines, point - linestart)
}
pub fn point_to_coords_from_file(path: &Path, point: usize, session: &Session) -> Option<(usize, usize)> {
let mut p = 0;
for (lineno, line) in session.load_file(path).split('\n').enumerate() {
if point < (p + line.len()) {
return Some((lineno+1, point - p));
}
p += line.len() + 1; // +1 for the newline char
}
None
}
#[test]
fn coords_to_point_works() {
let src = "
fn myfn() {
let a = 3;
print(a);
}";
assert!(coords_to_point(src, 3, 5) == 18);
}
#[test]
fn test_scope_start() {
let src = String::from("
fn myfn() {
let a = 3;
print(a);
}
");
let src = core::new_source(src);
let point = coords_to_point(&src, 4, 10);
let start = scope_start(src.as_ref(), point);
assert!(start == 12);
}
#[test]
fn test_scope_start_handles_sub_scopes() {
let src = String::from("
fn myfn() {
let a = 3;
{
let b = 4;
}
print(a);
}
");
let src = core::new_source(src);
let point = coords_to_point(&src, 7, 10);
let start = scope_start(src.as_ref(), point);
assert!(start == 12);
}
#[test]
fn masks_out_comments() {
let src = String::from("
this is some code
this is a line // with a comment
some more
");
let src = core::new_source(src);
let r = mask_comments(src.as_ref());
assert!(src.len() == r.len());
// characters at the start are the same
assert!(src.as_bytes()[5] == r.as_bytes()[5]);
// characters in the comments are masked
let commentoffset = coords_to_point(&src,3,23);
assert!(char_at(&r, commentoffset) =='');
assert!(src.as_bytes()[commentoffset]!= r.as_bytes()[commentoffset]);
// characters afterwards are the same
assert!(src.as_bytes()[src.len()-3] == r.as_bytes()[src.len()-3]);
}
#[test]
fn test_point_to_coords() {
let src = "
fn myfn(b:usize) {
let a = 3;
if b == 12 {
let a = 24;
do_something_with(a);
}
do_something_with(a);
}
";
round_trip_point_and_coords(src, 4, 5);
}
pub fn round_trip_point_and_coords(src: &str, lineno: usize, charno: usize) {
let (a,b) = point_to_coords(src, coords_to_point(src, lineno, charno));
assert_eq!((a,b), (lineno,charno));
}
#[test]
fn finds_end_of_struct_scope() {
let src="
struct foo {
a: usize,
blah: ~str
}
Some other junk";
let expected="
struct foo {
a: usize,
blah: ~str
}";
let s = end_of_next_scope(src);
assert_eq!(expected, s);
}
|
expand_search_expr_handles_chained_calls
|
identifier_name
|
mod.rs
|
use sea_canal::Analyzer;
use sea_canal::Pattern;
use sea_canal::PatternElem::*;
#[test]
fn meta_find_any_pattern() {
let slice = &[1, 2, 4, 7, 11];
let analyzer = Analyzer::with_meta(slice);
assert_eq!(Some(pat!(Meta(pat!(Plus(1), Plus(2), Plus(3), Plus(4))))), analyzer.find_any_pattern(1));
}
#[test]
fn meta_find_patterns_of_length() {
let slice = &[10, 11, 10, 12, 10, 13, 10];
let analyzer = Analyzer::with_meta(slice);
assert_eq!(Vec::<Pattern>::new(), analyzer.find_patterns_of_length(1));
assert_eq!(
vec![
pat!(Meta(pat!(Plus(1), Plus(2), Plus(3))), Const(10)),
pat!(Meta(pat!(Plus(1), Plus(2), Plus(3))), Meta(pat!(Plus(-1), Plus(-2), Plus(-3))))
],
analyzer.find_patterns_of_length(2));
}
#[test]
fn mixed_operand_meta_pattern() {
let slice = &[10, 11, 10, 20, 10, 13, 10, 40];
|
assert_eq!(Some(pat!(Meta(pat!(Plus(1), Mult(2), Plus(3), Mult(4))), Const(10))), analyzer.find_any_pattern(4));
}
|
let analyzer = Analyzer::with_meta(slice);
assert_eq!(None, analyzer.find_any_pattern(1));
|
random_line_split
|
mod.rs
|
use sea_canal::Analyzer;
use sea_canal::Pattern;
use sea_canal::PatternElem::*;
#[test]
fn meta_find_any_pattern() {
let slice = &[1, 2, 4, 7, 11];
let analyzer = Analyzer::with_meta(slice);
assert_eq!(Some(pat!(Meta(pat!(Plus(1), Plus(2), Plus(3), Plus(4))))), analyzer.find_any_pattern(1));
}
#[test]
fn meta_find_patterns_of_length() {
let slice = &[10, 11, 10, 12, 10, 13, 10];
let analyzer = Analyzer::with_meta(slice);
assert_eq!(Vec::<Pattern>::new(), analyzer.find_patterns_of_length(1));
assert_eq!(
vec![
pat!(Meta(pat!(Plus(1), Plus(2), Plus(3))), Const(10)),
pat!(Meta(pat!(Plus(1), Plus(2), Plus(3))), Meta(pat!(Plus(-1), Plus(-2), Plus(-3))))
],
analyzer.find_patterns_of_length(2));
}
#[test]
fn
|
() {
let slice = &[10, 11, 10, 20, 10, 13, 10, 40];
let analyzer = Analyzer::with_meta(slice);
assert_eq!(None, analyzer.find_any_pattern(1));
assert_eq!(Some(pat!(Meta(pat!(Plus(1), Mult(2), Plus(3), Mult(4))), Const(10))), analyzer.find_any_pattern(4));
}
|
mixed_operand_meta_pattern
|
identifier_name
|
mod.rs
|
use sea_canal::Analyzer;
use sea_canal::Pattern;
use sea_canal::PatternElem::*;
#[test]
fn meta_find_any_pattern() {
let slice = &[1, 2, 4, 7, 11];
let analyzer = Analyzer::with_meta(slice);
assert_eq!(Some(pat!(Meta(pat!(Plus(1), Plus(2), Plus(3), Plus(4))))), analyzer.find_any_pattern(1));
}
#[test]
fn meta_find_patterns_of_length() {
let slice = &[10, 11, 10, 12, 10, 13, 10];
let analyzer = Analyzer::with_meta(slice);
assert_eq!(Vec::<Pattern>::new(), analyzer.find_patterns_of_length(1));
assert_eq!(
vec![
pat!(Meta(pat!(Plus(1), Plus(2), Plus(3))), Const(10)),
pat!(Meta(pat!(Plus(1), Plus(2), Plus(3))), Meta(pat!(Plus(-1), Plus(-2), Plus(-3))))
],
analyzer.find_patterns_of_length(2));
}
#[test]
fn mixed_operand_meta_pattern()
|
{
let slice = &[10, 11, 10, 20, 10, 13, 10, 40];
let analyzer = Analyzer::with_meta(slice);
assert_eq!(None, analyzer.find_any_pattern(1));
assert_eq!(Some(pat!(Meta(pat!(Plus(1), Mult(2), Plus(3), Mult(4))), Const(10))), analyzer.find_any_pattern(4));
}
|
identifier_body
|
|
multi_product.rs
|
#![cfg(feature = "use_std")]
use crate::size_hint;
use crate::Itertools;
#[derive(Clone)]
/// An iterator adaptor that iterates over the cartesian product of
/// multiple iterators of type `I`.
///
/// An iterator element type is `Vec<I>`.
///
/// See [`.multi_cartesian_product()`](../trait.Itertools.html#method.multi_cartesian_product)
/// for more information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct MultiProduct<I>(Vec<MultiProductIter<I>>)
where I: Iterator + Clone,
I::Item: Clone;
/// Create a new cartesian product iterator over an arbitrary number
/// of iterators of the same type.
///
/// Iterator element is of type `Vec<H::Item::Item>`.
pub fn multi_cartesian_product<H>(iters: H) -> MultiProduct<<H::Item as IntoIterator>::IntoIter>
where H: Iterator,
H::Item: IntoIterator,
<H::Item as IntoIterator>::IntoIter: Clone,
<H::Item as IntoIterator>::Item: Clone
{
MultiProduct(iters.map(|i| MultiProductIter::new(i.into_iter())).collect())
}
#[derive(Clone, Debug)]
/// Holds the state of a single iterator within a MultiProduct.
struct MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
cur: Option<I::Item>,
iter: I,
iter_orig: I,
}
/// Holds the current state during an iteration of a MultiProduct.
#[derive(Debug)]
enum MultiProductIterState {
StartOfIter,
MidIter { on_first_iter: bool },
}
impl<I> MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
/// Iterates the rightmost iterator, then recursively iterates iterators
/// to the left if necessary.
///
/// Returns true if the iteration succeeded, else false.
fn iterate_last(
multi_iters: &mut [MultiProductIter<I>],
mut state: MultiProductIterState
) -> bool {
use self::MultiProductIterState::*;
if let Some((last, rest)) = multi_iters.split_last_mut() {
let on_first_iter = match state {
StartOfIter => {
let on_first_iter =!last.in_progress();
state = MidIter { on_first_iter };
on_first_iter
},
MidIter { on_first_iter } => on_first_iter
};
if!on_first_iter {
last.iterate();
}
if last.in_progress() {
true
} else if MultiProduct::iterate_last(rest, state) {
last.reset();
last.iterate();
// If iterator is None twice consecutively, then iterator is
// empty; whole product is empty.
last.in_progress()
} else {
false
}
} else {
// Reached end of iterator list. On initialisation, return true.
// At end of iteration (final iterator finishes), finish.
match state {
StartOfIter => false,
MidIter { on_first_iter } => on_first_iter
}
}
}
/// Returns the unwrapped value of the next iteration.
fn curr_iterator(&self) -> Vec<I::Item> {
self.0.iter().map(|multi_iter| {
multi_iter.cur.clone().unwrap()
}).collect()
}
/// Returns true if iteration has started and has not yet finished; false
/// otherwise.
fn in_progress(&self) -> bool {
if let Some(last) = self.0.last() {
last.in_progress()
} else {
false
}
}
}
impl<I> MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
fn new(iter: I) -> Self {
MultiProductIter {
cur: None,
iter: iter.clone(),
iter_orig: iter
}
}
/// Iterate the managed iterator.
fn iterate(&mut self) {
self.cur = self.iter.next();
}
/// Reset the managed iterator.
fn reset(&mut self) {
self.iter = self.iter_orig.clone();
}
/// Returns true if the current iterator has been started and has not yet
/// finished; false otherwise.
fn in_progress(&self) -> bool
|
}
impl<I> Iterator for MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
type Item = Vec<I::Item>;
fn next(&mut self) -> Option<Self::Item> {
if MultiProduct::iterate_last(
&mut self.0,
MultiProductIterState::StartOfIter
) {
Some(self.curr_iterator())
} else {
None
}
}
fn count(self) -> usize {
if self.0.len() == 0 {
return 0;
}
if!self.in_progress() {
return self.0.into_iter().fold(1, |acc, multi_iter| {
acc * multi_iter.iter.count()
});
}
self.0.into_iter().fold(
0,
|acc, MultiProductIter { iter, iter_orig, cur: _ }| {
let total_count = iter_orig.count();
let cur_count = iter.count();
acc * total_count + cur_count
}
)
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Not ExactSizeIterator because size may be larger than usize
if self.0.len() == 0 {
return (0, Some(0));
}
if!self.in_progress() {
return self.0.iter().fold((1, Some(1)), |acc, multi_iter| {
size_hint::mul(acc, multi_iter.iter.size_hint())
});
}
self.0.iter().fold(
(0, Some(0)),
|acc, &MultiProductIter { ref iter, ref iter_orig, cur: _ }| {
let cur_size = iter.size_hint();
let total_size = iter_orig.size_hint();
size_hint::add(size_hint::mul(acc, total_size), cur_size)
}
)
}
fn last(self) -> Option<Self::Item> {
let iter_count = self.0.len();
let lasts: Self::Item = self.0.into_iter()
.map(|multi_iter| multi_iter.iter.last())
.while_some()
.collect();
if lasts.len() == iter_count {
Some(lasts)
} else {
None
}
}
}
|
{
self.cur.is_some()
}
|
identifier_body
|
multi_product.rs
|
#![cfg(feature = "use_std")]
use crate::size_hint;
use crate::Itertools;
#[derive(Clone)]
/// An iterator adaptor that iterates over the cartesian product of
/// multiple iterators of type `I`.
///
/// An iterator element type is `Vec<I>`.
///
/// See [`.multi_cartesian_product()`](../trait.Itertools.html#method.multi_cartesian_product)
/// for more information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct MultiProduct<I>(Vec<MultiProductIter<I>>)
where I: Iterator + Clone,
I::Item: Clone;
/// Create a new cartesian product iterator over an arbitrary number
/// of iterators of the same type.
///
/// Iterator element is of type `Vec<H::Item::Item>`.
pub fn multi_cartesian_product<H>(iters: H) -> MultiProduct<<H::Item as IntoIterator>::IntoIter>
where H: Iterator,
H::Item: IntoIterator,
<H::Item as IntoIterator>::IntoIter: Clone,
<H::Item as IntoIterator>::Item: Clone
{
MultiProduct(iters.map(|i| MultiProductIter::new(i.into_iter())).collect())
}
#[derive(Clone, Debug)]
/// Holds the state of a single iterator within a MultiProduct.
struct MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
cur: Option<I::Item>,
iter: I,
iter_orig: I,
}
/// Holds the current state during an iteration of a MultiProduct.
#[derive(Debug)]
enum MultiProductIterState {
StartOfIter,
MidIter { on_first_iter: bool },
}
impl<I> MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
/// Iterates the rightmost iterator, then recursively iterates iterators
/// to the left if necessary.
///
/// Returns true if the iteration succeeded, else false.
fn iterate_last(
multi_iters: &mut [MultiProductIter<I>],
mut state: MultiProductIterState
) -> bool {
use self::MultiProductIterState::*;
if let Some((last, rest)) = multi_iters.split_last_mut() {
let on_first_iter = match state {
StartOfIter => {
let on_first_iter =!last.in_progress();
state = MidIter { on_first_iter };
on_first_iter
},
MidIter { on_first_iter } => on_first_iter
};
if!on_first_iter {
last.iterate();
}
if last.in_progress() {
true
} else if MultiProduct::iterate_last(rest, state) {
last.reset();
last.iterate();
// If iterator is None twice consecutively, then iterator is
// empty; whole product is empty.
last.in_progress()
} else {
false
}
} else {
// Reached end of iterator list. On initialisation, return true.
// At end of iteration (final iterator finishes), finish.
match state {
StartOfIter => false,
MidIter { on_first_iter } => on_first_iter
}
}
}
|
self.0.iter().map(|multi_iter| {
multi_iter.cur.clone().unwrap()
}).collect()
}
/// Returns true if iteration has started and has not yet finished; false
/// otherwise.
fn in_progress(&self) -> bool {
if let Some(last) = self.0.last() {
last.in_progress()
} else {
false
}
}
}
impl<I> MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
fn new(iter: I) -> Self {
MultiProductIter {
cur: None,
iter: iter.clone(),
iter_orig: iter
}
}
/// Iterate the managed iterator.
fn iterate(&mut self) {
self.cur = self.iter.next();
}
/// Reset the managed iterator.
fn reset(&mut self) {
self.iter = self.iter_orig.clone();
}
/// Returns true if the current iterator has been started and has not yet
/// finished; false otherwise.
fn in_progress(&self) -> bool {
self.cur.is_some()
}
}
impl<I> Iterator for MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
type Item = Vec<I::Item>;
fn next(&mut self) -> Option<Self::Item> {
if MultiProduct::iterate_last(
&mut self.0,
MultiProductIterState::StartOfIter
) {
Some(self.curr_iterator())
} else {
None
}
}
fn count(self) -> usize {
if self.0.len() == 0 {
return 0;
}
if!self.in_progress() {
return self.0.into_iter().fold(1, |acc, multi_iter| {
acc * multi_iter.iter.count()
});
}
self.0.into_iter().fold(
0,
|acc, MultiProductIter { iter, iter_orig, cur: _ }| {
let total_count = iter_orig.count();
let cur_count = iter.count();
acc * total_count + cur_count
}
)
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Not ExactSizeIterator because size may be larger than usize
if self.0.len() == 0 {
return (0, Some(0));
}
if!self.in_progress() {
return self.0.iter().fold((1, Some(1)), |acc, multi_iter| {
size_hint::mul(acc, multi_iter.iter.size_hint())
});
}
self.0.iter().fold(
(0, Some(0)),
|acc, &MultiProductIter { ref iter, ref iter_orig, cur: _ }| {
let cur_size = iter.size_hint();
let total_size = iter_orig.size_hint();
size_hint::add(size_hint::mul(acc, total_size), cur_size)
}
)
}
fn last(self) -> Option<Self::Item> {
let iter_count = self.0.len();
let lasts: Self::Item = self.0.into_iter()
.map(|multi_iter| multi_iter.iter.last())
.while_some()
.collect();
if lasts.len() == iter_count {
Some(lasts)
} else {
None
}
}
}
|
/// Returns the unwrapped value of the next iteration.
fn curr_iterator(&self) -> Vec<I::Item> {
|
random_line_split
|
multi_product.rs
|
#![cfg(feature = "use_std")]
use crate::size_hint;
use crate::Itertools;
#[derive(Clone)]
/// An iterator adaptor that iterates over the cartesian product of
/// multiple iterators of type `I`.
///
/// An iterator element type is `Vec<I>`.
///
/// See [`.multi_cartesian_product()`](../trait.Itertools.html#method.multi_cartesian_product)
/// for more information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct MultiProduct<I>(Vec<MultiProductIter<I>>)
where I: Iterator + Clone,
I::Item: Clone;
/// Create a new cartesian product iterator over an arbitrary number
/// of iterators of the same type.
///
/// Iterator element is of type `Vec<H::Item::Item>`.
pub fn multi_cartesian_product<H>(iters: H) -> MultiProduct<<H::Item as IntoIterator>::IntoIter>
where H: Iterator,
H::Item: IntoIterator,
<H::Item as IntoIterator>::IntoIter: Clone,
<H::Item as IntoIterator>::Item: Clone
{
MultiProduct(iters.map(|i| MultiProductIter::new(i.into_iter())).collect())
}
#[derive(Clone, Debug)]
/// Holds the state of a single iterator within a MultiProduct.
struct MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
cur: Option<I::Item>,
iter: I,
iter_orig: I,
}
/// Holds the current state during an iteration of a MultiProduct.
#[derive(Debug)]
enum MultiProductIterState {
StartOfIter,
MidIter { on_first_iter: bool },
}
impl<I> MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
/// Iterates the rightmost iterator, then recursively iterates iterators
/// to the left if necessary.
///
/// Returns true if the iteration succeeded, else false.
fn iterate_last(
multi_iters: &mut [MultiProductIter<I>],
mut state: MultiProductIterState
) -> bool {
use self::MultiProductIterState::*;
if let Some((last, rest)) = multi_iters.split_last_mut() {
let on_first_iter = match state {
StartOfIter => {
let on_first_iter =!last.in_progress();
state = MidIter { on_first_iter };
on_first_iter
},
MidIter { on_first_iter } => on_first_iter
};
if!on_first_iter {
last.iterate();
}
if last.in_progress() {
true
} else if MultiProduct::iterate_last(rest, state) {
last.reset();
last.iterate();
// If iterator is None twice consecutively, then iterator is
// empty; whole product is empty.
last.in_progress()
} else {
false
}
} else {
// Reached end of iterator list. On initialisation, return true.
// At end of iteration (final iterator finishes), finish.
match state {
StartOfIter => false,
MidIter { on_first_iter } => on_first_iter
}
}
}
/// Returns the unwrapped value of the next iteration.
fn curr_iterator(&self) -> Vec<I::Item> {
self.0.iter().map(|multi_iter| {
multi_iter.cur.clone().unwrap()
}).collect()
}
/// Returns true if iteration has started and has not yet finished; false
/// otherwise.
fn in_progress(&self) -> bool {
if let Some(last) = self.0.last() {
last.in_progress()
} else {
false
}
}
}
impl<I> MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
fn new(iter: I) -> Self {
MultiProductIter {
cur: None,
iter: iter.clone(),
iter_orig: iter
}
}
/// Iterate the managed iterator.
fn
|
(&mut self) {
self.cur = self.iter.next();
}
/// Reset the managed iterator.
fn reset(&mut self) {
self.iter = self.iter_orig.clone();
}
/// Returns true if the current iterator has been started and has not yet
/// finished; false otherwise.
fn in_progress(&self) -> bool {
self.cur.is_some()
}
}
impl<I> Iterator for MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
type Item = Vec<I::Item>;
fn next(&mut self) -> Option<Self::Item> {
if MultiProduct::iterate_last(
&mut self.0,
MultiProductIterState::StartOfIter
) {
Some(self.curr_iterator())
} else {
None
}
}
fn count(self) -> usize {
if self.0.len() == 0 {
return 0;
}
if!self.in_progress() {
return self.0.into_iter().fold(1, |acc, multi_iter| {
acc * multi_iter.iter.count()
});
}
self.0.into_iter().fold(
0,
|acc, MultiProductIter { iter, iter_orig, cur: _ }| {
let total_count = iter_orig.count();
let cur_count = iter.count();
acc * total_count + cur_count
}
)
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Not ExactSizeIterator because size may be larger than usize
if self.0.len() == 0 {
return (0, Some(0));
}
if!self.in_progress() {
return self.0.iter().fold((1, Some(1)), |acc, multi_iter| {
size_hint::mul(acc, multi_iter.iter.size_hint())
});
}
self.0.iter().fold(
(0, Some(0)),
|acc, &MultiProductIter { ref iter, ref iter_orig, cur: _ }| {
let cur_size = iter.size_hint();
let total_size = iter_orig.size_hint();
size_hint::add(size_hint::mul(acc, total_size), cur_size)
}
)
}
fn last(self) -> Option<Self::Item> {
let iter_count = self.0.len();
let lasts: Self::Item = self.0.into_iter()
.map(|multi_iter| multi_iter.iter.last())
.while_some()
.collect();
if lasts.len() == iter_count {
Some(lasts)
} else {
None
}
}
}
|
iterate
|
identifier_name
|
multi_product.rs
|
#![cfg(feature = "use_std")]
use crate::size_hint;
use crate::Itertools;
#[derive(Clone)]
/// An iterator adaptor that iterates over the cartesian product of
/// multiple iterators of type `I`.
///
/// An iterator element type is `Vec<I>`.
///
/// See [`.multi_cartesian_product()`](../trait.Itertools.html#method.multi_cartesian_product)
/// for more information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct MultiProduct<I>(Vec<MultiProductIter<I>>)
where I: Iterator + Clone,
I::Item: Clone;
/// Create a new cartesian product iterator over an arbitrary number
/// of iterators of the same type.
///
/// Iterator element is of type `Vec<H::Item::Item>`.
pub fn multi_cartesian_product<H>(iters: H) -> MultiProduct<<H::Item as IntoIterator>::IntoIter>
where H: Iterator,
H::Item: IntoIterator,
<H::Item as IntoIterator>::IntoIter: Clone,
<H::Item as IntoIterator>::Item: Clone
{
MultiProduct(iters.map(|i| MultiProductIter::new(i.into_iter())).collect())
}
#[derive(Clone, Debug)]
/// Holds the state of a single iterator within a MultiProduct.
struct MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
cur: Option<I::Item>,
iter: I,
iter_orig: I,
}
/// Holds the current state during an iteration of a MultiProduct.
#[derive(Debug)]
enum MultiProductIterState {
StartOfIter,
MidIter { on_first_iter: bool },
}
impl<I> MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
/// Iterates the rightmost iterator, then recursively iterates iterators
/// to the left if necessary.
///
/// Returns true if the iteration succeeded, else false.
fn iterate_last(
multi_iters: &mut [MultiProductIter<I>],
mut state: MultiProductIterState
) -> bool {
use self::MultiProductIterState::*;
if let Some((last, rest)) = multi_iters.split_last_mut() {
let on_first_iter = match state {
StartOfIter => {
let on_first_iter =!last.in_progress();
state = MidIter { on_first_iter };
on_first_iter
},
MidIter { on_first_iter } => on_first_iter
};
if!on_first_iter
|
if last.in_progress() {
true
} else if MultiProduct::iterate_last(rest, state) {
last.reset();
last.iterate();
// If iterator is None twice consecutively, then iterator is
// empty; whole product is empty.
last.in_progress()
} else {
false
}
} else {
// Reached end of iterator list. On initialisation, return true.
// At end of iteration (final iterator finishes), finish.
match state {
StartOfIter => false,
MidIter { on_first_iter } => on_first_iter
}
}
}
/// Returns the unwrapped value of the next iteration.
fn curr_iterator(&self) -> Vec<I::Item> {
self.0.iter().map(|multi_iter| {
multi_iter.cur.clone().unwrap()
}).collect()
}
/// Returns true if iteration has started and has not yet finished; false
/// otherwise.
fn in_progress(&self) -> bool {
if let Some(last) = self.0.last() {
last.in_progress()
} else {
false
}
}
}
impl<I> MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
fn new(iter: I) -> Self {
MultiProductIter {
cur: None,
iter: iter.clone(),
iter_orig: iter
}
}
/// Iterate the managed iterator.
fn iterate(&mut self) {
self.cur = self.iter.next();
}
/// Reset the managed iterator.
fn reset(&mut self) {
self.iter = self.iter_orig.clone();
}
/// Returns true if the current iterator has been started and has not yet
/// finished; false otherwise.
fn in_progress(&self) -> bool {
self.cur.is_some()
}
}
impl<I> Iterator for MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
type Item = Vec<I::Item>;
fn next(&mut self) -> Option<Self::Item> {
if MultiProduct::iterate_last(
&mut self.0,
MultiProductIterState::StartOfIter
) {
Some(self.curr_iterator())
} else {
None
}
}
fn count(self) -> usize {
if self.0.len() == 0 {
return 0;
}
if!self.in_progress() {
return self.0.into_iter().fold(1, |acc, multi_iter| {
acc * multi_iter.iter.count()
});
}
self.0.into_iter().fold(
0,
|acc, MultiProductIter { iter, iter_orig, cur: _ }| {
let total_count = iter_orig.count();
let cur_count = iter.count();
acc * total_count + cur_count
}
)
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Not ExactSizeIterator because size may be larger than usize
if self.0.len() == 0 {
return (0, Some(0));
}
if!self.in_progress() {
return self.0.iter().fold((1, Some(1)), |acc, multi_iter| {
size_hint::mul(acc, multi_iter.iter.size_hint())
});
}
self.0.iter().fold(
(0, Some(0)),
|acc, &MultiProductIter { ref iter, ref iter_orig, cur: _ }| {
let cur_size = iter.size_hint();
let total_size = iter_orig.size_hint();
size_hint::add(size_hint::mul(acc, total_size), cur_size)
}
)
}
fn last(self) -> Option<Self::Item> {
let iter_count = self.0.len();
let lasts: Self::Item = self.0.into_iter()
.map(|multi_iter| multi_iter.iter.last())
.while_some()
.collect();
if lasts.len() == iter_count {
Some(lasts)
} else {
None
}
}
}
|
{
last.iterate();
}
|
conditional_block
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate serde;
use euclid::default::{Point2D, Rect, Size2D};
use malloc_size_of_derive::MallocSizeOf;
use std::borrow::Cow;
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum PixelFormat {
/// Luminance channel only
K8,
/// Luminance + alpha
KA8,
/// RGB, 8 bits per channel
RGB8,
/// RGB + alpha, 8 bits per channel
RGBA8,
/// BGR + alpha, 8 bits per channel
BGRA8,
}
pub fn rgba8_get_rect(pixels: &[u8], size: Size2D<u64>, rect: Rect<u64>) -> Cow<[u8]> {
assert!(!rect.is_empty());
assert!(Rect::from_size(size).contains_rect(&rect));
assert_eq!(pixels.len() % 4, 0);
assert_eq!(size.area() as usize, pixels.len() / 4);
let area = rect.size.area() as usize;
let first_column_start = rect.origin.x as usize * 4;
let row_length = size.width as usize * 4;
let first_row_start = rect.origin.y as usize * row_length;
if rect.origin.x == 0 && rect.size.width == size.width || rect.size.height == 1 {
let start = first_column_start + first_row_start;
return Cow::Borrowed(&pixels[start..start + area * 4]);
}
let mut data = Vec::with_capacity(area * 4);
for row in pixels[first_row_start..]
.chunks(row_length)
.take(rect.size.height as usize)
{
data.extend_from_slice(&row[first_column_start..][..rect.size.width as usize * 4]);
}
data.into()
}
// TODO(pcwalton): Speed up with SIMD, or better yet, find some way to not do this.
pub fn rgba8_byte_swap_colors_inplace(pixels: &mut [u8]) {
assert!(pixels.len() % 4 == 0);
for rgba in pixels.chunks_mut(4) {
let b = rgba[0];
rgba[0] = rgba[2];
rgba[2] = b;
}
}
pub fn rgba8_byte_swap_and_premultiply_inplace(pixels: &mut [u8]) {
assert!(pixels.len() % 4 == 0);
for rgba in pixels.chunks_mut(4) {
let b = rgba[0];
rgba[0] = multiply_u8_color(rgba[2], rgba[3]);
rgba[1] = multiply_u8_color(rgba[1], rgba[3]);
rgba[2] = multiply_u8_color(b, rgba[3]);
}
}
/// Returns true if the pixels were found to be completely opaque.
pub fn rgba8_premultiply_inplace(pixels: &mut [u8]) -> bool {
assert!(pixels.len() % 4 == 0);
let mut is_opaque = true;
for rgba in pixels.chunks_mut(4) {
rgba[0] = multiply_u8_color(rgba[0], rgba[3]);
rgba[1] = multiply_u8_color(rgba[1], rgba[3]);
rgba[2] = multiply_u8_color(rgba[2], rgba[3]);
is_opaque = is_opaque && rgba[3] == 255;
}
is_opaque
}
pub fn multiply_u8_color(a: u8, b: u8) -> u8 {
return (a as u32 * b as u32 / 255) as u8;
}
pub fn clip(
mut origin: Point2D<i32>,
mut size: Size2D<u64>,
surface: Size2D<u64>,
) -> Option<Rect<u64>> {
if origin.x < 0 {
size.width = size.width.saturating_sub(-origin.x as u64);
origin.x = 0;
}
if origin.y < 0
|
let origin = Point2D::new(origin.x as u64, origin.y as u64);
Rect::new(origin, size)
.intersection(&Rect::from_size(surface))
.filter(|rect|!rect.is_empty())
}
|
{
size.height = size.height.saturating_sub(-origin.y as u64);
origin.y = 0;
}
|
conditional_block
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate serde;
use euclid::default::{Point2D, Rect, Size2D};
use malloc_size_of_derive::MallocSizeOf;
use std::borrow::Cow;
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum PixelFormat {
/// Luminance channel only
K8,
/// Luminance + alpha
KA8,
/// RGB, 8 bits per channel
RGB8,
/// RGB + alpha, 8 bits per channel
RGBA8,
/// BGR + alpha, 8 bits per channel
BGRA8,
}
pub fn rgba8_get_rect(pixels: &[u8], size: Size2D<u64>, rect: Rect<u64>) -> Cow<[u8]> {
assert!(!rect.is_empty());
assert!(Rect::from_size(size).contains_rect(&rect));
assert_eq!(pixels.len() % 4, 0);
assert_eq!(size.area() as usize, pixels.len() / 4);
let area = rect.size.area() as usize;
let first_column_start = rect.origin.x as usize * 4;
let row_length = size.width as usize * 4;
let first_row_start = rect.origin.y as usize * row_length;
if rect.origin.x == 0 && rect.size.width == size.width || rect.size.height == 1 {
let start = first_column_start + first_row_start;
return Cow::Borrowed(&pixels[start..start + area * 4]);
}
let mut data = Vec::with_capacity(area * 4);
for row in pixels[first_row_start..]
.chunks(row_length)
.take(rect.size.height as usize)
{
data.extend_from_slice(&row[first_column_start..][..rect.size.width as usize * 4]);
}
data.into()
}
// TODO(pcwalton): Speed up with SIMD, or better yet, find some way to not do this.
pub fn rgba8_byte_swap_colors_inplace(pixels: &mut [u8]) {
assert!(pixels.len() % 4 == 0);
for rgba in pixels.chunks_mut(4) {
let b = rgba[0];
rgba[0] = rgba[2];
rgba[2] = b;
}
}
pub fn rgba8_byte_swap_and_premultiply_inplace(pixels: &mut [u8]) {
assert!(pixels.len() % 4 == 0);
for rgba in pixels.chunks_mut(4) {
let b = rgba[0];
rgba[0] = multiply_u8_color(rgba[2], rgba[3]);
rgba[1] = multiply_u8_color(rgba[1], rgba[3]);
rgba[2] = multiply_u8_color(b, rgba[3]);
}
}
/// Returns true if the pixels were found to be completely opaque.
pub fn rgba8_premultiply_inplace(pixels: &mut [u8]) -> bool {
assert!(pixels.len() % 4 == 0);
let mut is_opaque = true;
for rgba in pixels.chunks_mut(4) {
rgba[0] = multiply_u8_color(rgba[0], rgba[3]);
rgba[1] = multiply_u8_color(rgba[1], rgba[3]);
rgba[2] = multiply_u8_color(rgba[2], rgba[3]);
is_opaque = is_opaque && rgba[3] == 255;
}
is_opaque
}
pub fn multiply_u8_color(a: u8, b: u8) -> u8 {
return (a as u32 * b as u32 / 255) as u8;
}
pub fn clip(
mut origin: Point2D<i32>,
mut size: Size2D<u64>,
surface: Size2D<u64>,
) -> Option<Rect<u64>> {
if origin.x < 0 {
size.width = size.width.saturating_sub(-origin.x as u64);
origin.x = 0;
}
if origin.y < 0 {
size.height = size.height.saturating_sub(-origin.y as u64);
origin.y = 0;
}
let origin = Point2D::new(origin.x as u64, origin.y as u64);
Rect::new(origin, size)
.intersection(&Rect::from_size(surface))
|
.filter(|rect|!rect.is_empty())
}
|
random_line_split
|
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate serde;
use euclid::default::{Point2D, Rect, Size2D};
use malloc_size_of_derive::MallocSizeOf;
use std::borrow::Cow;
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum PixelFormat {
/// Luminance channel only
K8,
/// Luminance + alpha
KA8,
/// RGB, 8 bits per channel
RGB8,
/// RGB + alpha, 8 bits per channel
RGBA8,
/// BGR + alpha, 8 bits per channel
BGRA8,
}
pub fn rgba8_get_rect(pixels: &[u8], size: Size2D<u64>, rect: Rect<u64>) -> Cow<[u8]> {
assert!(!rect.is_empty());
assert!(Rect::from_size(size).contains_rect(&rect));
assert_eq!(pixels.len() % 4, 0);
assert_eq!(size.area() as usize, pixels.len() / 4);
let area = rect.size.area() as usize;
let first_column_start = rect.origin.x as usize * 4;
let row_length = size.width as usize * 4;
let first_row_start = rect.origin.y as usize * row_length;
if rect.origin.x == 0 && rect.size.width == size.width || rect.size.height == 1 {
let start = first_column_start + first_row_start;
return Cow::Borrowed(&pixels[start..start + area * 4]);
}
let mut data = Vec::with_capacity(area * 4);
for row in pixels[first_row_start..]
.chunks(row_length)
.take(rect.size.height as usize)
{
data.extend_from_slice(&row[first_column_start..][..rect.size.width as usize * 4]);
}
data.into()
}
// TODO(pcwalton): Speed up with SIMD, or better yet, find some way to not do this.
pub fn rgba8_byte_swap_colors_inplace(pixels: &mut [u8]) {
assert!(pixels.len() % 4 == 0);
for rgba in pixels.chunks_mut(4) {
let b = rgba[0];
rgba[0] = rgba[2];
rgba[2] = b;
}
}
pub fn rgba8_byte_swap_and_premultiply_inplace(pixels: &mut [u8]) {
assert!(pixels.len() % 4 == 0);
for rgba in pixels.chunks_mut(4) {
let b = rgba[0];
rgba[0] = multiply_u8_color(rgba[2], rgba[3]);
rgba[1] = multiply_u8_color(rgba[1], rgba[3]);
rgba[2] = multiply_u8_color(b, rgba[3]);
}
}
/// Returns true if the pixels were found to be completely opaque.
pub fn rgba8_premultiply_inplace(pixels: &mut [u8]) -> bool {
assert!(pixels.len() % 4 == 0);
let mut is_opaque = true;
for rgba in pixels.chunks_mut(4) {
rgba[0] = multiply_u8_color(rgba[0], rgba[3]);
rgba[1] = multiply_u8_color(rgba[1], rgba[3]);
rgba[2] = multiply_u8_color(rgba[2], rgba[3]);
is_opaque = is_opaque && rgba[3] == 255;
}
is_opaque
}
pub fn
|
(a: u8, b: u8) -> u8 {
return (a as u32 * b as u32 / 255) as u8;
}
pub fn clip(
mut origin: Point2D<i32>,
mut size: Size2D<u64>,
surface: Size2D<u64>,
) -> Option<Rect<u64>> {
if origin.x < 0 {
size.width = size.width.saturating_sub(-origin.x as u64);
origin.x = 0;
}
if origin.y < 0 {
size.height = size.height.saturating_sub(-origin.y as u64);
origin.y = 0;
}
let origin = Point2D::new(origin.x as u64, origin.y as u64);
Rect::new(origin, size)
.intersection(&Rect::from_size(surface))
.filter(|rect|!rect.is_empty())
}
|
multiply_u8_color
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#[macro_use]
extern crate serde;
use euclid::default::{Point2D, Rect, Size2D};
use malloc_size_of_derive::MallocSizeOf;
use std::borrow::Cow;
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum PixelFormat {
/// Luminance channel only
K8,
/// Luminance + alpha
KA8,
/// RGB, 8 bits per channel
RGB8,
/// RGB + alpha, 8 bits per channel
RGBA8,
/// BGR + alpha, 8 bits per channel
BGRA8,
}
pub fn rgba8_get_rect(pixels: &[u8], size: Size2D<u64>, rect: Rect<u64>) -> Cow<[u8]> {
assert!(!rect.is_empty());
assert!(Rect::from_size(size).contains_rect(&rect));
assert_eq!(pixels.len() % 4, 0);
assert_eq!(size.area() as usize, pixels.len() / 4);
let area = rect.size.area() as usize;
let first_column_start = rect.origin.x as usize * 4;
let row_length = size.width as usize * 4;
let first_row_start = rect.origin.y as usize * row_length;
if rect.origin.x == 0 && rect.size.width == size.width || rect.size.height == 1 {
let start = first_column_start + first_row_start;
return Cow::Borrowed(&pixels[start..start + area * 4]);
}
let mut data = Vec::with_capacity(area * 4);
for row in pixels[first_row_start..]
.chunks(row_length)
.take(rect.size.height as usize)
{
data.extend_from_slice(&row[first_column_start..][..rect.size.width as usize * 4]);
}
data.into()
}
// TODO(pcwalton): Speed up with SIMD, or better yet, find some way to not do this.
pub fn rgba8_byte_swap_colors_inplace(pixels: &mut [u8]) {
assert!(pixels.len() % 4 == 0);
for rgba in pixels.chunks_mut(4) {
let b = rgba[0];
rgba[0] = rgba[2];
rgba[2] = b;
}
}
pub fn rgba8_byte_swap_and_premultiply_inplace(pixels: &mut [u8]) {
assert!(pixels.len() % 4 == 0);
for rgba in pixels.chunks_mut(4) {
let b = rgba[0];
rgba[0] = multiply_u8_color(rgba[2], rgba[3]);
rgba[1] = multiply_u8_color(rgba[1], rgba[3]);
rgba[2] = multiply_u8_color(b, rgba[3]);
}
}
/// Returns true if the pixels were found to be completely opaque.
pub fn rgba8_premultiply_inplace(pixels: &mut [u8]) -> bool {
assert!(pixels.len() % 4 == 0);
let mut is_opaque = true;
for rgba in pixels.chunks_mut(4) {
rgba[0] = multiply_u8_color(rgba[0], rgba[3]);
rgba[1] = multiply_u8_color(rgba[1], rgba[3]);
rgba[2] = multiply_u8_color(rgba[2], rgba[3]);
is_opaque = is_opaque && rgba[3] == 255;
}
is_opaque
}
pub fn multiply_u8_color(a: u8, b: u8) -> u8
|
pub fn clip(
mut origin: Point2D<i32>,
mut size: Size2D<u64>,
surface: Size2D<u64>,
) -> Option<Rect<u64>> {
if origin.x < 0 {
size.width = size.width.saturating_sub(-origin.x as u64);
origin.x = 0;
}
if origin.y < 0 {
size.height = size.height.saturating_sub(-origin.y as u64);
origin.y = 0;
}
let origin = Point2D::new(origin.x as u64, origin.y as u64);
Rect::new(origin, size)
.intersection(&Rect::from_size(surface))
.filter(|rect|!rect.is_empty())
}
|
{
return (a as u32 * b as u32 / 255) as u8;
}
|
identifier_body
|
unique-object-move.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #5192
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax)]
pub trait EventLoop { fn foo(&self)
|
}
pub struct UvEventLoop {
uvio: isize
}
impl EventLoop for UvEventLoop { }
pub fn main() {
let loop_: Box<EventLoop> = box UvEventLoop { uvio: 0 } as Box<EventLoop>;
let _loop2_ = loop_;
}
|
{}
|
identifier_body
|
unique-object-move.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #5192
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
|
pub trait EventLoop { fn foo(&self) {} }
pub struct UvEventLoop {
uvio: isize
}
impl EventLoop for UvEventLoop { }
pub fn main() {
let loop_: Box<EventLoop> = box UvEventLoop { uvio: 0 } as Box<EventLoop>;
let _loop2_ = loop_;
}
|
#![feature(box_syntax)]
|
random_line_split
|
unique-object-move.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #5192
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax)]
pub trait EventLoop { fn foo(&self) {} }
pub struct
|
{
uvio: isize
}
impl EventLoop for UvEventLoop { }
pub fn main() {
let loop_: Box<EventLoop> = box UvEventLoop { uvio: 0 } as Box<EventLoop>;
let _loop2_ = loop_;
}
|
UvEventLoop
|
identifier_name
|
outline.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Method %>
|
additional_methods=[Method("outline_has_nonzero_width", "bool")]) %>
// TODO(pcwalton): `invert`
${helpers.predefined_type("outline-color", "CSSColor", "::cssparser::Color::CurrentColor",
animatable=True)}
<%helpers:longhand name="outline-style" need_clone="True" animatable="False">
pub use values::specified::BorderStyle as SpecifiedValue;
pub fn get_initial_value() -> SpecifiedValue { SpecifiedValue::none }
pub mod computed_value {
pub use values::specified::BorderStyle as T;
}
pub fn parse(_context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
match SpecifiedValue::parse(input) {
Ok(SpecifiedValue::hidden) => Err(()),
result => result
}
}
</%helpers:longhand>
<%helpers:longhand name="outline-width" animatable="True">
use app_units::Au;
use cssparser::ToCss;
use std::fmt;
use values::LocalToCss;
use values::HasViewportPercentage;
impl ToCss for SpecifiedValue {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.0.to_css(dest)
}
}
pub fn parse(_context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
specified::parse_border_width(input).map(SpecifiedValue)
}
impl HasViewportPercentage for SpecifiedValue {
fn has_viewport_percentage(&self) -> bool {
let &SpecifiedValue(length) = self;
length.has_viewport_percentage()
}
}
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct SpecifiedValue(pub specified::Length);
pub mod computed_value {
use app_units::Au;
pub type T = Au;
}
pub use super::border_top_width::get_initial_value;
impl ToComputedValue for SpecifiedValue {
type ComputedValue = computed_value::T;
#[inline]
fn to_computed_value(&self, context: &Context) -> computed_value::T {
self.0.to_computed_value(context)
}
}
</%helpers:longhand>
// The -moz-outline-radius-* properties are non-standard and not on a standards track.
// TODO: Should they animate?
% for corner in ["topleft", "topright", "bottomright", "bottomleft"]:
${helpers.predefined_type("-moz-outline-radius-" + corner, "BorderRadiusSize",
"computed::BorderRadiusSize::zero()",
"parse", products="gecko",
animatable=False)}
% endfor
${helpers.predefined_type("outline-offset", "Length", "Au(0)", animatable=True)}
|
<% data.new_style_struct("Outline",
inherited=False,
|
random_line_split
|
outline.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Method %>
<% data.new_style_struct("Outline",
inherited=False,
additional_methods=[Method("outline_has_nonzero_width", "bool")]) %>
// TODO(pcwalton): `invert`
${helpers.predefined_type("outline-color", "CSSColor", "::cssparser::Color::CurrentColor",
animatable=True)}
<%helpers:longhand name="outline-style" need_clone="True" animatable="False">
pub use values::specified::BorderStyle as SpecifiedValue;
pub fn get_initial_value() -> SpecifiedValue { SpecifiedValue::none }
pub mod computed_value {
pub use values::specified::BorderStyle as T;
}
pub fn parse(_context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
match SpecifiedValue::parse(input) {
Ok(SpecifiedValue::hidden) => Err(()),
result => result
}
}
</%helpers:longhand>
<%helpers:longhand name="outline-width" animatable="True">
use app_units::Au;
use cssparser::ToCss;
use std::fmt;
use values::LocalToCss;
use values::HasViewportPercentage;
impl ToCss for SpecifiedValue {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.0.to_css(dest)
}
}
pub fn parse(_context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
specified::parse_border_width(input).map(SpecifiedValue)
}
impl HasViewportPercentage for SpecifiedValue {
fn has_viewport_percentage(&self) -> bool {
let &SpecifiedValue(length) = self;
length.has_viewport_percentage()
}
}
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct SpecifiedValue(pub specified::Length);
pub mod computed_value {
use app_units::Au;
pub type T = Au;
}
pub use super::border_top_width::get_initial_value;
impl ToComputedValue for SpecifiedValue {
type ComputedValue = computed_value::T;
#[inline]
fn
|
(&self, context: &Context) -> computed_value::T {
self.0.to_computed_value(context)
}
}
</%helpers:longhand>
// The -moz-outline-radius-* properties are non-standard and not on a standards track.
// TODO: Should they animate?
% for corner in ["topleft", "topright", "bottomright", "bottomleft"]:
${helpers.predefined_type("-moz-outline-radius-" + corner, "BorderRadiusSize",
"computed::BorderRadiusSize::zero()",
"parse", products="gecko",
animatable=False)}
% endfor
${helpers.predefined_type("outline-offset", "Length", "Au(0)", animatable=True)}
|
to_computed_value
|
identifier_name
|
outline.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Method %>
<% data.new_style_struct("Outline",
inherited=False,
additional_methods=[Method("outline_has_nonzero_width", "bool")]) %>
// TODO(pcwalton): `invert`
${helpers.predefined_type("outline-color", "CSSColor", "::cssparser::Color::CurrentColor",
animatable=True)}
<%helpers:longhand name="outline-style" need_clone="True" animatable="False">
pub use values::specified::BorderStyle as SpecifiedValue;
pub fn get_initial_value() -> SpecifiedValue { SpecifiedValue::none }
pub mod computed_value {
pub use values::specified::BorderStyle as T;
}
pub fn parse(_context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
match SpecifiedValue::parse(input) {
Ok(SpecifiedValue::hidden) => Err(()),
result => result
}
}
</%helpers:longhand>
<%helpers:longhand name="outline-width" animatable="True">
use app_units::Au;
use cssparser::ToCss;
use std::fmt;
use values::LocalToCss;
use values::HasViewportPercentage;
impl ToCss for SpecifiedValue {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write
|
}
pub fn parse(_context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
specified::parse_border_width(input).map(SpecifiedValue)
}
impl HasViewportPercentage for SpecifiedValue {
fn has_viewport_percentage(&self) -> bool {
let &SpecifiedValue(length) = self;
length.has_viewport_percentage()
}
}
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct SpecifiedValue(pub specified::Length);
pub mod computed_value {
use app_units::Au;
pub type T = Au;
}
pub use super::border_top_width::get_initial_value;
impl ToComputedValue for SpecifiedValue {
type ComputedValue = computed_value::T;
#[inline]
fn to_computed_value(&self, context: &Context) -> computed_value::T {
self.0.to_computed_value(context)
}
}
</%helpers:longhand>
// The -moz-outline-radius-* properties are non-standard and not on a standards track.
// TODO: Should they animate?
% for corner in ["topleft", "topright", "bottomright", "bottomleft"]:
${helpers.predefined_type("-moz-outline-radius-" + corner, "BorderRadiusSize",
"computed::BorderRadiusSize::zero()",
"parse", products="gecko",
animatable=False)}
% endfor
${helpers.predefined_type("outline-offset", "Length", "Au(0)", animatable=True)}
|
{
self.0.to_css(dest)
}
|
identifier_body
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{CharacterDataCast, TextCast};
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{ChildrenMutation, Node, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl HTMLTitleElementMethods for HTMLTitleElement {
// https://www.whatwg.org/html/#dom-title-text
fn Text(&self) -> DOMString {
let node = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<&Text> = TextCast::to_ref(child.r());
match text {
Some(text) => content.push_str(&CharacterDataCast::from_ref(text).data()),
None => (),
}
}
content
}
// https://www.whatwg.org/html/#dom-title-text
fn SetText(&self, value: DOMString) {
let node = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
|
}
}
impl VirtualMethods for HTMLTitleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(self);
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = NodeCast::from_ref(self);
if is_in_doc {
let document = node.owner_doc();
document.r().title_changed();
}
}
}
|
random_line_split
|
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{CharacterDataCast, TextCast};
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{ChildrenMutation, Node, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl HTMLTitleElementMethods for HTMLTitleElement {
// https://www.whatwg.org/html/#dom-title-text
fn Text(&self) -> DOMString {
let node = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<&Text> = TextCast::to_ref(child.r());
match text {
Some(text) => content.push_str(&CharacterDataCast::from_ref(text).data()),
None => (),
}
}
content
}
// https://www.whatwg.org/html/#dom-title-text
fn SetText(&self, value: DOMString) {
let node = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl VirtualMethods for HTMLTitleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(self);
if node.is_in_doc()
|
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = NodeCast::from_ref(self);
if is_in_doc {
let document = node.owner_doc();
document.r().title_changed();
}
}
}
|
{
node.owner_doc().title_changed();
}
|
conditional_block
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{CharacterDataCast, TextCast};
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{ChildrenMutation, Node, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl HTMLTitleElementMethods for HTMLTitleElement {
// https://www.whatwg.org/html/#dom-title-text
fn
|
(&self) -> DOMString {
let node = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<&Text> = TextCast::to_ref(child.r());
match text {
Some(text) => content.push_str(&CharacterDataCast::from_ref(text).data()),
None => (),
}
}
content
}
// https://www.whatwg.org/html/#dom-title-text
fn SetText(&self, value: DOMString) {
let node = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl VirtualMethods for HTMLTitleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(self);
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = NodeCast::from_ref(self);
if is_in_doc {
let document = node.owner_doc();
document.r().title_changed();
}
}
}
|
Text
|
identifier_name
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{CharacterDataCast, TextCast};
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{ChildrenMutation, Node, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement
|
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl HTMLTitleElementMethods for HTMLTitleElement {
// https://www.whatwg.org/html/#dom-title-text
fn Text(&self) -> DOMString {
let node = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<&Text> = TextCast::to_ref(child.r());
match text {
Some(text) => content.push_str(&CharacterDataCast::from_ref(text).data()),
None => (),
}
}
content
}
// https://www.whatwg.org/html/#dom-title-text
fn SetText(&self, value: DOMString) {
let node = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl VirtualMethods for HTMLTitleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &HTMLElement = HTMLElementCast::from_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(self);
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = NodeCast::from_ref(self);
if is_in_doc {
let document = node.owner_doc();
document.r().title_changed();
}
}
}
|
{
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
|
identifier_body
|
emoji.rs
|
//=============================================================================
//
// WARNING: This file is AUTO-GENERATED
//
// Do not make changes directly to this file.
//
// If you would like to make a change to the library, please update the schema
// definitions at https://github.com/slack-rs/slack-api-schemas
//
// If you would like to make a change how the library was generated,
|
// please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen
//
//=============================================================================
pub use crate::mod_types::emoji_types::*;
use crate::sync::requests::SlackWebRequestSender;
/// Lists custom emoji for a team.
///
/// Wraps https://api.slack.com/methods/emoji.list
pub fn list<R>(client: &R, token: &str) -> Result<ListResponse, ListError<R::Error>>
where
R: SlackWebRequestSender,
{
let params = &[("token", token)];
let url = crate::get_slack_url_for_method("emoji.list");
client
.send(&url, ¶ms[..])
.map_err(ListError::Client)
.and_then(|result| {
serde_json::from_str::<ListResponse>(&result)
.map_err(|e| ListError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
}
|
random_line_split
|
|
emoji.rs
|
//=============================================================================
//
// WARNING: This file is AUTO-GENERATED
//
// Do not make changes directly to this file.
//
// If you would like to make a change to the library, please update the schema
// definitions at https://github.com/slack-rs/slack-api-schemas
//
// If you would like to make a change how the library was generated,
// please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen
//
//=============================================================================
pub use crate::mod_types::emoji_types::*;
use crate::sync::requests::SlackWebRequestSender;
/// Lists custom emoji for a team.
///
/// Wraps https://api.slack.com/methods/emoji.list
pub fn
|
<R>(client: &R, token: &str) -> Result<ListResponse, ListError<R::Error>>
where
R: SlackWebRequestSender,
{
let params = &[("token", token)];
let url = crate::get_slack_url_for_method("emoji.list");
client
.send(&url, ¶ms[..])
.map_err(ListError::Client)
.and_then(|result| {
serde_json::from_str::<ListResponse>(&result)
.map_err(|e| ListError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
}
|
list
|
identifier_name
|
emoji.rs
|
//=============================================================================
//
// WARNING: This file is AUTO-GENERATED
//
// Do not make changes directly to this file.
//
// If you would like to make a change to the library, please update the schema
// definitions at https://github.com/slack-rs/slack-api-schemas
//
// If you would like to make a change how the library was generated,
// please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen
//
//=============================================================================
pub use crate::mod_types::emoji_types::*;
use crate::sync::requests::SlackWebRequestSender;
/// Lists custom emoji for a team.
///
/// Wraps https://api.slack.com/methods/emoji.list
pub fn list<R>(client: &R, token: &str) -> Result<ListResponse, ListError<R::Error>>
where
R: SlackWebRequestSender,
|
{
let params = &[("token", token)];
let url = crate::get_slack_url_for_method("emoji.list");
client
.send(&url, ¶ms[..])
.map_err(ListError::Client)
.and_then(|result| {
serde_json::from_str::<ListResponse>(&result)
.map_err(|e| ListError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
}
|
identifier_body
|
|
build.rs
|
use std::env;
use std::fs;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use protobuf_codegen::Codegen;
use protobuf_test_common::build::*;
use protobuf_test_common::print_rerun_if_changed_recursively;
fn copy_test<P1: AsRef<Path>, P2: AsRef<Path>>(src: P1, dst: P2) {
eprintln!("copy {:?} to {:?}", src.as_ref(), dst.as_ref());
let mut content = Vec::new();
fs::File::open(src.as_ref())
.expect(&format!("open {}", src.as_ref().display()))
.read_to_end(&mut content)
.expect(&format!("read_to_end {}", src.as_ref().display()));
let mut write = fs::File::create(dst).expect("create");
writeln!(write, "// @generated").expect("write");
writeln!(write, "// copied from {}", src.as_ref().display()).expect("write");
writeln!(write, "").expect("write");
write.write_all(&content).expect("write_all");
// Print generated twice to avoid overlooking it accidentally
writeln!(write, "// @generated").expect("write");
write.flush().expect("flush");
}
fn copy_from_protobuf_test(path: &str) {
copy_test(
&format!("../../test-crates/protobuf-test/{}", path),
&format!("{}", path),
)
}
enum FileNameClass {
ModRs,
TestRs,
Proto,
GeneratedRs,
Ignore,
}
fn classify_file_name(dir: &str, name: &str) -> FileNameClass {
if name.starts_with(".") || name.ends_with(".md") || name.ends_with(".sh") {
FileNameClass::Ignore
} else if name.ends_with("_pb.rs") || name.ends_with("_pb_proto3.rs") {
FileNameClass::GeneratedRs
} else if name == "mod.rs" {
FileNameClass::ModRs
} else if name.ends_with(".proto") || name.ends_with(".proto3") {
FileNameClass::Proto
} else if name.ends_with(".rs") {
if dir == "src/google/protobuf" {
FileNameClass::GeneratedRs
} else {
FileNameClass::TestRs
}
} else {
panic!("unknown test file: {}", name);
}
}
// Copy tests from `protobuf-test` directory to the same directory here
fn copy_tests(dir: &str) {
let src_dir = format!("../../test-crates/protobuf-test/{}", dir);
for entry in fs::read_dir(&src_dir).expect(&format!("read_dir {}", src_dir)) {
let file_name = entry.expect("entry").file_name().into_string().unwrap();
match classify_file_name(dir, &file_name) {
FileNameClass::ModRs | FileNameClass::Ignore | FileNameClass::GeneratedRs => {}
FileNameClass::TestRs | FileNameClass::Proto => {
copy_from_protobuf_test(&format!("{}/{}", dir, file_name))
}
}
}
}
fn gen_in_dir(dir: &str, include_dir: &str) {
gen_in_dir_impl(
dir,
|GenInDirArgs {
out_dir,
input,
customize,
}| {
Codegen::new()
.pure()
.out_dir(out_dir)
.inputs(input)
.includes(&[include_dir])
.customize(customize)
.run_from_script()
},
);
}
fn generate_interop() {
copy_from_protobuf_test("src/interop/mod.rs");
copy_from_protobuf_test("src/interop/json.rs");
copy_from_protobuf_test("src/interop/bin.rs");
Codegen::new()
.pure()
.out_dir("src/interop")
.includes(&["../../interop/cxx", "../../proto"])
.input("../../interop/cxx/interop_pb.proto")
.run_from_script();
}
fn generate_include_generated() {
copy_from_protobuf_test("src/include_generated/mod.rs");
let dir = format!("{}/include_generated", env::var("OUT_DIR").unwrap());
if Path::new(&dir).exists() {
fs::remove_dir_all(&dir).unwrap();
}
fs::create_dir(&dir).unwrap();
Codegen::new()
.pure()
.out_dir(dir)
.input("../../test-crates/protobuf-test/src/include_generated/v2.proto")
.input("../../test-crates/protobuf-test/src/include_generated/v3.proto")
.include("../../test-crates/protobuf-test/src/include_generated")
.run_from_script();
}
fn generate_pb_rs() {
print_rerun_if_changed_recursively("../../test-crates/protobuf-test");
copy_tests("src/v2");
gen_in_dir("src/v2", "src/v2");
copy_tests("src/v3");
gen_in_dir("src/v3", "src/v3");
copy_tests("src/common/v2");
gen_in_dir("src/common/v2", "src/common/v2");
copy_tests_v2_v3("src/common/v2", "src/common/v3");
gen_in_dir("src/common/v3", "src/common/v3");
copy_tests("src/google/protobuf");
gen_in_dir("src/google/protobuf", "src");
generate_interop();
generate_include_generated();
}
fn
|
() {
env_logger::init();
clean_old_files();
generate_pb_rs();
}
|
main
|
identifier_name
|
build.rs
|
use std::env;
use std::fs;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use protobuf_codegen::Codegen;
use protobuf_test_common::build::*;
use protobuf_test_common::print_rerun_if_changed_recursively;
fn copy_test<P1: AsRef<Path>, P2: AsRef<Path>>(src: P1, dst: P2) {
eprintln!("copy {:?} to {:?}", src.as_ref(), dst.as_ref());
let mut content = Vec::new();
fs::File::open(src.as_ref())
.expect(&format!("open {}", src.as_ref().display()))
.read_to_end(&mut content)
.expect(&format!("read_to_end {}", src.as_ref().display()));
let mut write = fs::File::create(dst).expect("create");
writeln!(write, "// @generated").expect("write");
writeln!(write, "// copied from {}", src.as_ref().display()).expect("write");
writeln!(write, "").expect("write");
write.write_all(&content).expect("write_all");
// Print generated twice to avoid overlooking it accidentally
writeln!(write, "// @generated").expect("write");
write.flush().expect("flush");
}
fn copy_from_protobuf_test(path: &str) {
copy_test(
&format!("../../test-crates/protobuf-test/{}", path),
&format!("{}", path),
)
}
enum FileNameClass {
ModRs,
TestRs,
Proto,
GeneratedRs,
Ignore,
}
fn classify_file_name(dir: &str, name: &str) -> FileNameClass {
if name.starts_with(".") || name.ends_with(".md") || name.ends_with(".sh") {
FileNameClass::Ignore
} else if name.ends_with("_pb.rs") || name.ends_with("_pb_proto3.rs") {
FileNameClass::GeneratedRs
} else if name == "mod.rs" {
FileNameClass::ModRs
} else if name.ends_with(".proto") || name.ends_with(".proto3") {
FileNameClass::Proto
} else if name.ends_with(".rs") {
if dir == "src/google/protobuf" {
FileNameClass::GeneratedRs
} else {
FileNameClass::TestRs
}
} else {
panic!("unknown test file: {}", name);
}
}
// Copy tests from `protobuf-test` directory to the same directory here
fn copy_tests(dir: &str) {
let src_dir = format!("../../test-crates/protobuf-test/{}", dir);
for entry in fs::read_dir(&src_dir).expect(&format!("read_dir {}", src_dir)) {
let file_name = entry.expect("entry").file_name().into_string().unwrap();
match classify_file_name(dir, &file_name) {
FileNameClass::ModRs | FileNameClass::Ignore | FileNameClass::GeneratedRs => {}
FileNameClass::TestRs | FileNameClass::Proto => {
copy_from_protobuf_test(&format!("{}/{}", dir, file_name))
}
}
}
}
fn gen_in_dir(dir: &str, include_dir: &str) {
gen_in_dir_impl(
dir,
|GenInDirArgs {
out_dir,
input,
customize,
}| {
Codegen::new()
.pure()
.out_dir(out_dir)
.inputs(input)
.includes(&[include_dir])
.customize(customize)
.run_from_script()
},
);
}
fn generate_interop() {
copy_from_protobuf_test("src/interop/mod.rs");
copy_from_protobuf_test("src/interop/json.rs");
copy_from_protobuf_test("src/interop/bin.rs");
Codegen::new()
.pure()
.out_dir("src/interop")
.includes(&["../../interop/cxx", "../../proto"])
.input("../../interop/cxx/interop_pb.proto")
.run_from_script();
}
fn generate_include_generated() {
copy_from_protobuf_test("src/include_generated/mod.rs");
let dir = format!("{}/include_generated", env::var("OUT_DIR").unwrap());
if Path::new(&dir).exists() {
fs::remove_dir_all(&dir).unwrap();
}
fs::create_dir(&dir).unwrap();
Codegen::new()
|
.run_from_script();
}
fn generate_pb_rs() {
print_rerun_if_changed_recursively("../../test-crates/protobuf-test");
copy_tests("src/v2");
gen_in_dir("src/v2", "src/v2");
copy_tests("src/v3");
gen_in_dir("src/v3", "src/v3");
copy_tests("src/common/v2");
gen_in_dir("src/common/v2", "src/common/v2");
copy_tests_v2_v3("src/common/v2", "src/common/v3");
gen_in_dir("src/common/v3", "src/common/v3");
copy_tests("src/google/protobuf");
gen_in_dir("src/google/protobuf", "src");
generate_interop();
generate_include_generated();
}
fn main() {
env_logger::init();
clean_old_files();
generate_pb_rs();
}
|
.pure()
.out_dir(dir)
.input("../../test-crates/protobuf-test/src/include_generated/v2.proto")
.input("../../test-crates/protobuf-test/src/include_generated/v3.proto")
.include("../../test-crates/protobuf-test/src/include_generated")
|
random_line_split
|
wait_delete.rs
|
// Copyright 2019 Guanhao Yin <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use std::path::Path;
use tokio::sync::oneshot::Sender;
// Polling on BSD.
//
// It is not possible to use kqueue to watch delete events on a socket:
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=170177
#[cfg(not(target_os = "linux"))]
pub async fn wait_delete(path: &Path, ready: Sender<()>) -> anyhow::Result<()> {
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
// Use nix dir because it can be rewind, so it works with privilege dropping.
let mut dir = Dir::open(
path.parent().unwrap(),
OFlag::O_DIRECTORY | OFlag::O_RDONLY,
Mode::empty(),
)?;
let file_name = path.file_name().unwrap().to_owned();
let (tx, rx) = tokio::sync::oneshot::channel();
let _ = ready.send(());
std::thread::spawn(move || {
loop {
let mut found = false;
for f in dir.iter() {
let f = f.unwrap();
let os_str = OsStr::from_bytes(f.file_name().to_bytes());
let f_name = Path::new(os_str);
if f_name == file_name {
found = true;
break;
}
}
if!found {
break;
}
std::thread::sleep(std::time::Duration::from_secs(2));
}
tx.send(Ok(())).unwrap();
});
rx.await.unwrap()
}
#[cfg(target_os = "linux")]
pub async fn wait_delete(p: &Path, ready: Sender<()>) -> anyhow::Result<()> {
// Use inotify on linux.
use anyhow::Context;
use futures::StreamExt;
use inotify::{EventMask, Inotify, WatchMask};
let file_name = p.file_name().unwrap().into();
let parent_dir = p.parent().unwrap();
let mut inotify = Inotify::init().context("init")?;
inotify
.add_watch(parent_dir, WatchMask::DELETE)
|
let _ = ready.send(());
let buf = vec![0u8; 1024];
let mut stream = inotify.event_stream(buf).context("event_stream")?;
loop {
let event = stream.next().await.unwrap().context("next")?;
if event.mask == EventMask::DELETE && event.name.as_ref() == Some(&file_name) {
break;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
// XXX: On mips/mipsel, `INotify::init` (`inotify_init1`) returns an
// `EINVAL` error *in CI*. Ignore the test for now.
#[cfg(not(target_arch = "mips"))]
#[tokio::test]
async fn test_wait_delete() {
use super::*;
use nix::unistd::{mkstemp, unlink};
let (ready_tx, ready_rx) = tokio::sync::oneshot::channel();
let mut file = std::env::temp_dir();
file.push("test_wait_delete_XXXXXX");
let (_, tmp_path) = mkstemp(&file).expect("mkstemp");
{
let tmp_path = tmp_path.clone();
tokio::spawn(async move {
ready_rx.await.unwrap();
unlink(&tmp_path).expect("unlink");
});
}
wait_delete(&tmp_path, ready_tx).await.expect("wait delete");
assert!(!tmp_path.exists());
}
}
|
.context("add_watch")?;
|
random_line_split
|
wait_delete.rs
|
// Copyright 2019 Guanhao Yin <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use std::path::Path;
use tokio::sync::oneshot::Sender;
// Polling on BSD.
//
// It is not possible to use kqueue to watch delete events on a socket:
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=170177
#[cfg(not(target_os = "linux"))]
pub async fn wait_delete(path: &Path, ready: Sender<()>) -> anyhow::Result<()> {
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
// Use nix dir because it can be rewind, so it works with privilege dropping.
let mut dir = Dir::open(
path.parent().unwrap(),
OFlag::O_DIRECTORY | OFlag::O_RDONLY,
Mode::empty(),
)?;
let file_name = path.file_name().unwrap().to_owned();
let (tx, rx) = tokio::sync::oneshot::channel();
let _ = ready.send(());
std::thread::spawn(move || {
loop {
let mut found = false;
for f in dir.iter() {
let f = f.unwrap();
let os_str = OsStr::from_bytes(f.file_name().to_bytes());
let f_name = Path::new(os_str);
if f_name == file_name {
found = true;
break;
}
}
if!found {
break;
}
std::thread::sleep(std::time::Duration::from_secs(2));
}
tx.send(Ok(())).unwrap();
});
rx.await.unwrap()
}
#[cfg(target_os = "linux")]
pub async fn
|
(p: &Path, ready: Sender<()>) -> anyhow::Result<()> {
// Use inotify on linux.
use anyhow::Context;
use futures::StreamExt;
use inotify::{EventMask, Inotify, WatchMask};
let file_name = p.file_name().unwrap().into();
let parent_dir = p.parent().unwrap();
let mut inotify = Inotify::init().context("init")?;
inotify
.add_watch(parent_dir, WatchMask::DELETE)
.context("add_watch")?;
let _ = ready.send(());
let buf = vec![0u8; 1024];
let mut stream = inotify.event_stream(buf).context("event_stream")?;
loop {
let event = stream.next().await.unwrap().context("next")?;
if event.mask == EventMask::DELETE && event.name.as_ref() == Some(&file_name) {
break;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
// XXX: On mips/mipsel, `INotify::init` (`inotify_init1`) returns an
// `EINVAL` error *in CI*. Ignore the test for now.
#[cfg(not(target_arch = "mips"))]
#[tokio::test]
async fn test_wait_delete() {
use super::*;
use nix::unistd::{mkstemp, unlink};
let (ready_tx, ready_rx) = tokio::sync::oneshot::channel();
let mut file = std::env::temp_dir();
file.push("test_wait_delete_XXXXXX");
let (_, tmp_path) = mkstemp(&file).expect("mkstemp");
{
let tmp_path = tmp_path.clone();
tokio::spawn(async move {
ready_rx.await.unwrap();
unlink(&tmp_path).expect("unlink");
});
}
wait_delete(&tmp_path, ready_tx).await.expect("wait delete");
assert!(!tmp_path.exists());
}
}
|
wait_delete
|
identifier_name
|
wait_delete.rs
|
// Copyright 2019 Guanhao Yin <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use std::path::Path;
use tokio::sync::oneshot::Sender;
// Polling on BSD.
//
// It is not possible to use kqueue to watch delete events on a socket:
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=170177
#[cfg(not(target_os = "linux"))]
pub async fn wait_delete(path: &Path, ready: Sender<()>) -> anyhow::Result<()> {
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
// Use nix dir because it can be rewind, so it works with privilege dropping.
let mut dir = Dir::open(
path.parent().unwrap(),
OFlag::O_DIRECTORY | OFlag::O_RDONLY,
Mode::empty(),
)?;
let file_name = path.file_name().unwrap().to_owned();
let (tx, rx) = tokio::sync::oneshot::channel();
let _ = ready.send(());
std::thread::spawn(move || {
loop {
let mut found = false;
for f in dir.iter() {
let f = f.unwrap();
let os_str = OsStr::from_bytes(f.file_name().to_bytes());
let f_name = Path::new(os_str);
if f_name == file_name
|
}
if!found {
break;
}
std::thread::sleep(std::time::Duration::from_secs(2));
}
tx.send(Ok(())).unwrap();
});
rx.await.unwrap()
}
#[cfg(target_os = "linux")]
pub async fn wait_delete(p: &Path, ready: Sender<()>) -> anyhow::Result<()> {
// Use inotify on linux.
use anyhow::Context;
use futures::StreamExt;
use inotify::{EventMask, Inotify, WatchMask};
let file_name = p.file_name().unwrap().into();
let parent_dir = p.parent().unwrap();
let mut inotify = Inotify::init().context("init")?;
inotify
.add_watch(parent_dir, WatchMask::DELETE)
.context("add_watch")?;
let _ = ready.send(());
let buf = vec![0u8; 1024];
let mut stream = inotify.event_stream(buf).context("event_stream")?;
loop {
let event = stream.next().await.unwrap().context("next")?;
if event.mask == EventMask::DELETE && event.name.as_ref() == Some(&file_name) {
break;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
// XXX: On mips/mipsel, `INotify::init` (`inotify_init1`) returns an
// `EINVAL` error *in CI*. Ignore the test for now.
#[cfg(not(target_arch = "mips"))]
#[tokio::test]
async fn test_wait_delete() {
use super::*;
use nix::unistd::{mkstemp, unlink};
let (ready_tx, ready_rx) = tokio::sync::oneshot::channel();
let mut file = std::env::temp_dir();
file.push("test_wait_delete_XXXXXX");
let (_, tmp_path) = mkstemp(&file).expect("mkstemp");
{
let tmp_path = tmp_path.clone();
tokio::spawn(async move {
ready_rx.await.unwrap();
unlink(&tmp_path).expect("unlink");
});
}
wait_delete(&tmp_path, ready_tx).await.expect("wait delete");
assert!(!tmp_path.exists());
}
}
|
{
found = true;
break;
}
|
conditional_block
|
wait_delete.rs
|
// Copyright 2019 Guanhao Yin <[email protected]>
// This file is part of TiTun.
// TiTun is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// TiTun is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with TiTun. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use std::path::Path;
use tokio::sync::oneshot::Sender;
// Polling on BSD.
//
// It is not possible to use kqueue to watch delete events on a socket:
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=170177
#[cfg(not(target_os = "linux"))]
pub async fn wait_delete(path: &Path, ready: Sender<()>) -> anyhow::Result<()> {
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
// Use nix dir because it can be rewind, so it works with privilege dropping.
let mut dir = Dir::open(
path.parent().unwrap(),
OFlag::O_DIRECTORY | OFlag::O_RDONLY,
Mode::empty(),
)?;
let file_name = path.file_name().unwrap().to_owned();
let (tx, rx) = tokio::sync::oneshot::channel();
let _ = ready.send(());
std::thread::spawn(move || {
loop {
let mut found = false;
for f in dir.iter() {
let f = f.unwrap();
let os_str = OsStr::from_bytes(f.file_name().to_bytes());
let f_name = Path::new(os_str);
if f_name == file_name {
found = true;
break;
}
}
if!found {
break;
}
std::thread::sleep(std::time::Duration::from_secs(2));
}
tx.send(Ok(())).unwrap();
});
rx.await.unwrap()
}
#[cfg(target_os = "linux")]
pub async fn wait_delete(p: &Path, ready: Sender<()>) -> anyhow::Result<()>
|
}
Ok(())
}
#[cfg(test)]
mod tests {
// XXX: On mips/mipsel, `INotify::init` (`inotify_init1`) returns an
// `EINVAL` error *in CI*. Ignore the test for now.
#[cfg(not(target_arch = "mips"))]
#[tokio::test]
async fn test_wait_delete() {
use super::*;
use nix::unistd::{mkstemp, unlink};
let (ready_tx, ready_rx) = tokio::sync::oneshot::channel();
let mut file = std::env::temp_dir();
file.push("test_wait_delete_XXXXXX");
let (_, tmp_path) = mkstemp(&file).expect("mkstemp");
{
let tmp_path = tmp_path.clone();
tokio::spawn(async move {
ready_rx.await.unwrap();
unlink(&tmp_path).expect("unlink");
});
}
wait_delete(&tmp_path, ready_tx).await.expect("wait delete");
assert!(!tmp_path.exists());
}
}
|
{
// Use inotify on linux.
use anyhow::Context;
use futures::StreamExt;
use inotify::{EventMask, Inotify, WatchMask};
let file_name = p.file_name().unwrap().into();
let parent_dir = p.parent().unwrap();
let mut inotify = Inotify::init().context("init")?;
inotify
.add_watch(parent_dir, WatchMask::DELETE)
.context("add_watch")?;
let _ = ready.send(());
let buf = vec![0u8; 1024];
let mut stream = inotify.event_stream(buf).context("event_stream")?;
loop {
let event = stream.next().await.unwrap().context("next")?;
if event.mask == EventMask::DELETE && event.name.as_ref() == Some(&file_name) {
break;
}
|
identifier_body
|
util.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use common::Config;
/// Conversion table from triple OS name to Rust SYSNAME
const OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "windows"),
("win32", "windows"),
("windows", "windows"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
("dragonfly", "dragonfly"),
("bitrig", "bitrig"),
("openbsd", "openbsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE {
if triple.contains(triple_os) {
return os
}
}
panic!("Cannot determine OS from triple");
}
pub fn make_new_path(path: &str) -> String {
assert!(cfg!(windows));
// Windows just uses PATH as the library search path, so we have to
// maintain the current value while adding our own
match env::var(lib_path_env_var()) {
Ok(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
Err(..) => path.to_string()
}
}
pub fn lib_path_env_var() -> &'static str { "PATH" }
fn path_div() -> &'static str
|
pub fn logv(config: &Config, s: String) {
debug!("{}", s);
if config.verbose { println!("{}", s); }
}
|
{ ";" }
|
identifier_body
|
util.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use common::Config;
/// Conversion table from triple OS name to Rust SYSNAME
const OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "windows"),
("win32", "windows"),
("windows", "windows"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
("dragonfly", "dragonfly"),
("bitrig", "bitrig"),
("openbsd", "openbsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE {
if triple.contains(triple_os) {
return os
}
}
panic!("Cannot determine OS from triple");
}
pub fn make_new_path(path: &str) -> String {
assert!(cfg!(windows));
// Windows just uses PATH as the library search path, so we have to
// maintain the current value while adding our own
match env::var(lib_path_env_var()) {
Ok(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
Err(..) => path.to_string()
}
}
pub fn lib_path_env_var() -> &'static str { "PATH" }
fn path_div() -> &'static str { ";" }
pub fn
|
(config: &Config, s: String) {
debug!("{}", s);
if config.verbose { println!("{}", s); }
}
|
logv
|
identifier_name
|
util.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use common::Config;
/// Conversion table from triple OS name to Rust SYSNAME
const OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "windows"),
("win32", "windows"),
("windows", "windows"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
("dragonfly", "dragonfly"),
("bitrig", "bitrig"),
("openbsd", "openbsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE {
if triple.contains(triple_os) {
return os
}
}
panic!("Cannot determine OS from triple");
}
pub fn make_new_path(path: &str) -> String {
assert!(cfg!(windows));
// Windows just uses PATH as the library search path, so we have to
|
Err(..) => path.to_string()
}
}
pub fn lib_path_env_var() -> &'static str { "PATH" }
fn path_div() -> &'static str { ";" }
pub fn logv(config: &Config, s: String) {
debug!("{}", s);
if config.verbose { println!("{}", s); }
}
|
// maintain the current value while adding our own
match env::var(lib_path_env_var()) {
Ok(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
|
random_line_split
|
libclarity.rs
|
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
// Copyright (C) 2020 Stacks Open Internet Foundation
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![cfg_attr(test, allow(unused_variables, unused_assignments))]
extern crate curve25519_dalek;
extern crate ed25519_dalek;
extern crate rand;
extern crate rand_chacha;
extern crate rusqlite;
extern crate secp256k1;
extern crate serde;
extern crate tini;
#[macro_use]
extern crate lazy_static;
extern crate integer_sqrt;
extern crate mio;
extern crate percent_encoding;
extern crate regex;
extern crate ripemd160;
extern crate sha2;
extern crate sha3;
extern crate time;
extern crate url;
#[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)]
extern crate slog;
extern crate chrono;
#[cfg(feature = "slog_json")]
extern crate slog_json;
extern crate slog_term;
#[cfg(unix)]
extern crate libc;
#[cfg(unix)]
extern crate nix;
#[cfg(windows)]
extern crate winapi;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(test)]
#[macro_use]
extern crate assert_json_diff;
#[cfg(feature = "monitoring_prom")]
#[macro_use]
pub extern crate prometheus;
#[macro_use]
pub mod codec;
#[macro_use]
pub mod util;
#[macro_use]
pub mod net;
#[macro_use]
/// The Clarity virtual machine
pub mod vm;
#[macro_use]
pub mod chainstate;
#[cfg(test)]
extern crate stx_genesis;
pub mod address;
pub mod burnchains;
/// A high level library for interacting with the Clarity vm
pub mod clarity_vm;
pub mod core;
pub mod deps;
pub mod clarity;
pub mod cost_estimates;
pub mod monitoring;
pub mod types;
// set via _compile-time_ envars
const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH");
const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT");
const GIT_TREE_CLEAN: Option<&'static str> = option_env!("GIT_TREE_CLEAN");
#[cfg(debug_assertions)]
const BUILD_TYPE: &'static str = "debug";
#[cfg(not(debug_assertions))]
const BUILD_TYPE: &'static str = "release";
pub fn version_string(pkg_name: &str, pkg_version: &str) -> String {
let git_branch = GIT_BRANCH.unwrap_or("");
let git_commit = GIT_COMMIT.unwrap_or("");
let git_tree_clean = GIT_TREE_CLEAN.unwrap_or("");
|
format!(
"{} {} ({}:{}{}, {} build, {} [{}])",
pkg_name,
pkg_version,
&git_branch,
git_commit,
git_tree_clean,
BUILD_TYPE,
std::env::consts::OS,
std::env::consts::ARCH
)
}
|
random_line_split
|
|
libclarity.rs
|
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
// Copyright (C) 2020 Stacks Open Internet Foundation
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![cfg_attr(test, allow(unused_variables, unused_assignments))]
extern crate curve25519_dalek;
extern crate ed25519_dalek;
extern crate rand;
extern crate rand_chacha;
extern crate rusqlite;
extern crate secp256k1;
extern crate serde;
extern crate tini;
#[macro_use]
extern crate lazy_static;
extern crate integer_sqrt;
extern crate mio;
extern crate percent_encoding;
extern crate regex;
extern crate ripemd160;
extern crate sha2;
extern crate sha3;
extern crate time;
extern crate url;
#[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)]
extern crate slog;
extern crate chrono;
#[cfg(feature = "slog_json")]
extern crate slog_json;
extern crate slog_term;
#[cfg(unix)]
extern crate libc;
#[cfg(unix)]
extern crate nix;
#[cfg(windows)]
extern crate winapi;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(test)]
#[macro_use]
extern crate assert_json_diff;
#[cfg(feature = "monitoring_prom")]
#[macro_use]
pub extern crate prometheus;
#[macro_use]
pub mod codec;
#[macro_use]
pub mod util;
#[macro_use]
pub mod net;
#[macro_use]
/// The Clarity virtual machine
pub mod vm;
#[macro_use]
pub mod chainstate;
#[cfg(test)]
extern crate stx_genesis;
pub mod address;
pub mod burnchains;
/// A high level library for interacting with the Clarity vm
pub mod clarity_vm;
pub mod core;
pub mod deps;
pub mod clarity;
pub mod cost_estimates;
pub mod monitoring;
pub mod types;
// set via _compile-time_ envars
const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH");
const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT");
const GIT_TREE_CLEAN: Option<&'static str> = option_env!("GIT_TREE_CLEAN");
#[cfg(debug_assertions)]
const BUILD_TYPE: &'static str = "debug";
#[cfg(not(debug_assertions))]
const BUILD_TYPE: &'static str = "release";
pub fn
|
(pkg_name: &str, pkg_version: &str) -> String {
let git_branch = GIT_BRANCH.unwrap_or("");
let git_commit = GIT_COMMIT.unwrap_or("");
let git_tree_clean = GIT_TREE_CLEAN.unwrap_or("");
format!(
"{} {} ({}:{}{}, {} build, {} [{}])",
pkg_name,
pkg_version,
&git_branch,
git_commit,
git_tree_clean,
BUILD_TYPE,
std::env::consts::OS,
std::env::consts::ARCH
)
}
|
version_string
|
identifier_name
|
libclarity.rs
|
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
// Copyright (C) 2020 Stacks Open Internet Foundation
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![cfg_attr(test, allow(unused_variables, unused_assignments))]
extern crate curve25519_dalek;
extern crate ed25519_dalek;
extern crate rand;
extern crate rand_chacha;
extern crate rusqlite;
extern crate secp256k1;
extern crate serde;
extern crate tini;
#[macro_use]
extern crate lazy_static;
extern crate integer_sqrt;
extern crate mio;
extern crate percent_encoding;
extern crate regex;
extern crate ripemd160;
extern crate sha2;
extern crate sha3;
extern crate time;
extern crate url;
#[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)]
extern crate slog;
extern crate chrono;
#[cfg(feature = "slog_json")]
extern crate slog_json;
extern crate slog_term;
#[cfg(unix)]
extern crate libc;
#[cfg(unix)]
extern crate nix;
#[cfg(windows)]
extern crate winapi;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
#[cfg(test)]
#[macro_use]
extern crate assert_json_diff;
#[cfg(feature = "monitoring_prom")]
#[macro_use]
pub extern crate prometheus;
#[macro_use]
pub mod codec;
#[macro_use]
pub mod util;
#[macro_use]
pub mod net;
#[macro_use]
/// The Clarity virtual machine
pub mod vm;
#[macro_use]
pub mod chainstate;
#[cfg(test)]
extern crate stx_genesis;
pub mod address;
pub mod burnchains;
/// A high level library for interacting with the Clarity vm
pub mod clarity_vm;
pub mod core;
pub mod deps;
pub mod clarity;
pub mod cost_estimates;
pub mod monitoring;
pub mod types;
// set via _compile-time_ envars
const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH");
const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT");
const GIT_TREE_CLEAN: Option<&'static str> = option_env!("GIT_TREE_CLEAN");
#[cfg(debug_assertions)]
const BUILD_TYPE: &'static str = "debug";
#[cfg(not(debug_assertions))]
const BUILD_TYPE: &'static str = "release";
pub fn version_string(pkg_name: &str, pkg_version: &str) -> String
|
{
let git_branch = GIT_BRANCH.unwrap_or("");
let git_commit = GIT_COMMIT.unwrap_or("");
let git_tree_clean = GIT_TREE_CLEAN.unwrap_or("");
format!(
"{} {} ({}:{}{}, {} build, {} [{}])",
pkg_name,
pkg_version,
&git_branch,
git_commit,
git_tree_clean,
BUILD_TYPE,
std::env::consts::OS,
std::env::consts::ARCH
)
}
|
identifier_body
|
|
aal.rs
|
// #[macro_use] extern crate lazy_static;
extern crate docopt;
extern crate raal;
extern crate serde_json;
extern crate shellexpand;
use docopt::Docopt;
use raal::ec2_instances::{AshufInfo, read_without_cache, read_via_cache, instances_matching_regex};
use raal::config::read_config;
const USAGE: &'static str = "
Query amazon for a random choice among some set of resources
Display matching resources as a JSON document.
Usage:
aal [-c | --no-cache] [-e <env_name>] [-d <data_dir>] [-m <output_mode>] [-n <name>] <pattern>
aal (-h | --help)
Options:
-h --help Show this help screen
-d <data_dir> Data directory with cached data and config [default: ~/.raal]
-c --no-cache Bypass the cached resources info
-e --env-name=<env_name> The environment variable containing the name of this account [default: AWS_ACCOUNT_ID]
-m --mode=<output_mode> Output mode [default: json_ashuf_info]
-n <name> Easy name for this environment [default: default]
Output modes include: ip_private_line, json_ashuf_info, enum_name_tag
";
fn print_ip_private_line(results: Vec<AshufInfo>) {
// prints the public ip addresses of matches, on per line
for r in results {
for addr in r.private_ip_addresses {
println!("{}", addr);
};
};
}
fn print_json_ashuf_info(results: Vec<AshufInfo>) {
// prints the public ip addresses of matches, as json
println!("{}", serde_json::to_string_pretty(&results).expect("Couldn't serialize config"));
}
// fn print_enum_name_tag(results: Vec<AshufInfo>) {
// // prints a list of the names:addresses of instances, one pre line
// println!("When this works, sort and print a list, with numbers, of matches");
// }
fn main()
|
.get(&env_name.to_string())
.unwrap()
.account_id
.clone();
let aws_region = config.environments
.get(&env_name.to_string())
.unwrap()
.region
.clone();
let all_instances = match bypass_cache {
true => {
if debug {
println!("Bypassing the cache");
}
read_without_cache(&data_dir, &aws_region, &aws_id)
},
false => read_via_cache(&data_dir, &aws_region, &aws_id, cache_ttl),
};
// These are the tags we'll filter on
let tags = vec!["Name".to_string(), "Tier".to_string()];
let matches = instances_matching_regex(pattern, tags, all_instances);
// let matched_json = serde_json::to_string_pretty(&matches).expect("Couldn't serialize config");
let output_format = parsed_cmdline.get_str("-m");
if output_format == "ip_private_line" {
print_ip_private_line(matches);
} else if output_format == "json_ashuf_info" {
print_json_ashuf_info(matches);
// } else if output_format == "enum_name_tag" {
// print_enum_name_tag(matches);
}
}
|
{
let version = "0.1.0".to_owned();
let parsed_cmdline = Docopt::new(USAGE)
.and_then(|d| d.version(Some(version)).parse())
.unwrap_or_else(|e| e.exit());
let pattern = parsed_cmdline.get_str("<pattern>").to_string();
let debug = false;
// if debug {
// println!("Command line parsed to {:?}", parsed_cmdline);
// println!("Pattern is {:?}", pattern);
// };
let env_name = parsed_cmdline.get_str("-n");
let bypass_cache = parsed_cmdline.get_bool("-c");
let cache_ttl = 3600;
let data_dir = shellexpand::full(parsed_cmdline.get_str("-d"))
.unwrap()
.to_string();
let config = read_config(&data_dir);
let aws_id = config.environments
|
identifier_body
|
aal.rs
|
// #[macro_use] extern crate lazy_static;
extern crate docopt;
extern crate raal;
extern crate serde_json;
extern crate shellexpand;
use docopt::Docopt;
use raal::ec2_instances::{AshufInfo, read_without_cache, read_via_cache, instances_matching_regex};
use raal::config::read_config;
const USAGE: &'static str = "
Query amazon for a random choice among some set of resources
Display matching resources as a JSON document.
Usage:
aal [-c | --no-cache] [-e <env_name>] [-d <data_dir>] [-m <output_mode>] [-n <name>] <pattern>
aal (-h | --help)
Options:
-h --help Show this help screen
-d <data_dir> Data directory with cached data and config [default: ~/.raal]
-c --no-cache Bypass the cached resources info
-e --env-name=<env_name> The environment variable containing the name of this account [default: AWS_ACCOUNT_ID]
-m --mode=<output_mode> Output mode [default: json_ashuf_info]
-n <name> Easy name for this environment [default: default]
Output modes include: ip_private_line, json_ashuf_info, enum_name_tag
";
fn print_ip_private_line(results: Vec<AshufInfo>) {
// prints the public ip addresses of matches, on per line
for r in results {
for addr in r.private_ip_addresses {
println!("{}", addr);
};
};
}
fn print_json_ashuf_info(results: Vec<AshufInfo>) {
// prints the public ip addresses of matches, as json
println!("{}", serde_json::to_string_pretty(&results).expect("Couldn't serialize config"));
}
// fn print_enum_name_tag(results: Vec<AshufInfo>) {
// // prints a list of the names:addresses of instances, one pre line
// println!("When this works, sort and print a list, with numbers, of matches");
// }
fn
|
() {
let version = "0.1.0".to_owned();
let parsed_cmdline = Docopt::new(USAGE)
.and_then(|d| d.version(Some(version)).parse())
.unwrap_or_else(|e| e.exit());
let pattern = parsed_cmdline.get_str("<pattern>").to_string();
let debug = false;
// if debug {
// println!("Command line parsed to {:?}", parsed_cmdline);
// println!("Pattern is {:?}", pattern);
// };
let env_name = parsed_cmdline.get_str("-n");
let bypass_cache = parsed_cmdline.get_bool("-c");
let cache_ttl = 3600;
let data_dir = shellexpand::full(parsed_cmdline.get_str("-d"))
.unwrap()
.to_string();
let config = read_config(&data_dir);
let aws_id = config.environments
.get(&env_name.to_string())
.unwrap()
.account_id
.clone();
let aws_region = config.environments
.get(&env_name.to_string())
.unwrap()
.region
.clone();
let all_instances = match bypass_cache {
true => {
if debug {
println!("Bypassing the cache");
}
read_without_cache(&data_dir, &aws_region, &aws_id)
},
false => read_via_cache(&data_dir, &aws_region, &aws_id, cache_ttl),
};
// These are the tags we'll filter on
let tags = vec!["Name".to_string(), "Tier".to_string()];
let matches = instances_matching_regex(pattern, tags, all_instances);
// let matched_json = serde_json::to_string_pretty(&matches).expect("Couldn't serialize config");
let output_format = parsed_cmdline.get_str("-m");
if output_format == "ip_private_line" {
print_ip_private_line(matches);
} else if output_format == "json_ashuf_info" {
print_json_ashuf_info(matches);
// } else if output_format == "enum_name_tag" {
// print_enum_name_tag(matches);
}
}
|
main
|
identifier_name
|
aal.rs
|
// #[macro_use] extern crate lazy_static;
extern crate docopt;
extern crate raal;
extern crate serde_json;
extern crate shellexpand;
use docopt::Docopt;
use raal::ec2_instances::{AshufInfo, read_without_cache, read_via_cache, instances_matching_regex};
use raal::config::read_config;
const USAGE: &'static str = "
Query amazon for a random choice among some set of resources
Display matching resources as a JSON document.
Usage:
aal [-c | --no-cache] [-e <env_name>] [-d <data_dir>] [-m <output_mode>] [-n <name>] <pattern>
aal (-h | --help)
Options:
-h --help Show this help screen
-d <data_dir> Data directory with cached data and config [default: ~/.raal]
-c --no-cache Bypass the cached resources info
-e --env-name=<env_name> The environment variable containing the name of this account [default: AWS_ACCOUNT_ID]
-m --mode=<output_mode> Output mode [default: json_ashuf_info]
-n <name> Easy name for this environment [default: default]
Output modes include: ip_private_line, json_ashuf_info, enum_name_tag
";
fn print_ip_private_line(results: Vec<AshufInfo>) {
// prints the public ip addresses of matches, on per line
for r in results {
for addr in r.private_ip_addresses {
println!("{}", addr);
};
};
}
fn print_json_ashuf_info(results: Vec<AshufInfo>) {
// prints the public ip addresses of matches, as json
println!("{}", serde_json::to_string_pretty(&results).expect("Couldn't serialize config"));
}
// fn print_enum_name_tag(results: Vec<AshufInfo>) {
// // prints a list of the names:addresses of instances, one pre line
// println!("When this works, sort and print a list, with numbers, of matches");
// }
fn main() {
|
let pattern = parsed_cmdline.get_str("<pattern>").to_string();
let debug = false;
// if debug {
// println!("Command line parsed to {:?}", parsed_cmdline);
// println!("Pattern is {:?}", pattern);
// };
let env_name = parsed_cmdline.get_str("-n");
let bypass_cache = parsed_cmdline.get_bool("-c");
let cache_ttl = 3600;
let data_dir = shellexpand::full(parsed_cmdline.get_str("-d"))
.unwrap()
.to_string();
let config = read_config(&data_dir);
let aws_id = config.environments
.get(&env_name.to_string())
.unwrap()
.account_id
.clone();
let aws_region = config.environments
.get(&env_name.to_string())
.unwrap()
.region
.clone();
let all_instances = match bypass_cache {
true => {
if debug {
println!("Bypassing the cache");
}
read_without_cache(&data_dir, &aws_region, &aws_id)
},
false => read_via_cache(&data_dir, &aws_region, &aws_id, cache_ttl),
};
// These are the tags we'll filter on
let tags = vec!["Name".to_string(), "Tier".to_string()];
let matches = instances_matching_regex(pattern, tags, all_instances);
// let matched_json = serde_json::to_string_pretty(&matches).expect("Couldn't serialize config");
let output_format = parsed_cmdline.get_str("-m");
if output_format == "ip_private_line" {
print_ip_private_line(matches);
} else if output_format == "json_ashuf_info" {
print_json_ashuf_info(matches);
// } else if output_format == "enum_name_tag" {
// print_enum_name_tag(matches);
}
}
|
let version = "0.1.0".to_owned();
let parsed_cmdline = Docopt::new(USAGE)
.and_then(|d| d.version(Some(version)).parse())
.unwrap_or_else(|e| e.exit());
|
random_line_split
|
union_fields_1_0.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl<T> __BindgenUnionField<T> {
#[inline]
pub fn new() -> Self {
__BindgenUnionField(::std::marker::PhantomData)
}
#[inline]
pub unsafe fn as_ref(&self) -> &T {
::std::mem::transmute(self)
}
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T {
::std::mem::transmute(self)
}
}
impl<T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
fn
|
(&self) -> Self {
Self::new()
}
}
impl<T> ::std::marker::Copy for __BindgenUnionField<T> {}
impl<T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
}
}
impl<T> ::std::hash::Hash for __BindgenUnionField<T> {
fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {}
}
impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> {
fn eq(&self, _other: &__BindgenUnionField<T>) -> bool {
true
}
}
impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {}
#[repr(C)]
#[derive(Debug, Default, Copy, Hash, PartialEq)]
pub struct nsStyleUnion {
pub mInt: __BindgenUnionField<::std::os::raw::c_int>,
pub mFloat: __BindgenUnionField<f32>,
pub mPointer: __BindgenUnionField<*mut ::std::os::raw::c_void>,
pub bindgen_union_field: u64,
}
#[test]
fn bindgen_test_layout_nsStyleUnion() {
assert_eq!(
::std::mem::size_of::<nsStyleUnion>(),
8usize,
concat!("Size of: ", stringify!(nsStyleUnion))
);
assert_eq!(
::std::mem::align_of::<nsStyleUnion>(),
8usize,
concat!("Alignment of ", stringify!(nsStyleUnion))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mInt as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mInt)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mFloat as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mFloat)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mPointer as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mPointer)
)
);
}
impl Clone for nsStyleUnion {
fn clone(&self) -> Self {
*self
}
}
|
clone
|
identifier_name
|
union_fields_1_0.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl<T> __BindgenUnionField<T> {
#[inline]
pub fn new() -> Self {
__BindgenUnionField(::std::marker::PhantomData)
}
#[inline]
pub unsafe fn as_ref(&self) -> &T {
::std::mem::transmute(self)
}
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T {
::std::mem::transmute(self)
}
}
impl<T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
fn clone(&self) -> Self {
Self::new()
}
}
impl<T> ::std::marker::Copy for __BindgenUnionField<T> {}
impl<T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
}
}
impl<T> ::std::hash::Hash for __BindgenUnionField<T> {
fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {}
}
impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> {
fn eq(&self, _other: &__BindgenUnionField<T>) -> bool {
true
}
}
impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {}
#[repr(C)]
#[derive(Debug, Default, Copy, Hash, PartialEq)]
pub struct nsStyleUnion {
pub mInt: __BindgenUnionField<::std::os::raw::c_int>,
pub mFloat: __BindgenUnionField<f32>,
pub mPointer: __BindgenUnionField<*mut ::std::os::raw::c_void>,
pub bindgen_union_field: u64,
}
#[test]
fn bindgen_test_layout_nsStyleUnion() {
assert_eq!(
::std::mem::size_of::<nsStyleUnion>(),
8usize,
concat!("Size of: ", stringify!(nsStyleUnion))
);
assert_eq!(
::std::mem::align_of::<nsStyleUnion>(),
8usize,
concat!("Alignment of ", stringify!(nsStyleUnion))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mInt as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mInt)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mFloat as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mFloat)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mPointer as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mPointer)
)
);
}
impl Clone for nsStyleUnion {
fn clone(&self) -> Self
|
}
|
{
*self
}
|
identifier_body
|
union_fields_1_0.rs
|
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl<T> __BindgenUnionField<T> {
#[inline]
pub fn new() -> Self {
__BindgenUnionField(::std::marker::PhantomData)
}
#[inline]
pub unsafe fn as_ref(&self) -> &T {
::std::mem::transmute(self)
}
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T {
::std::mem::transmute(self)
}
}
impl<T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
fn clone(&self) -> Self {
Self::new()
}
}
impl<T> ::std::marker::Copy for __BindgenUnionField<T> {}
impl<T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
|
}
impl<T> ::std::hash::Hash for __BindgenUnionField<T> {
fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {}
}
impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> {
fn eq(&self, _other: &__BindgenUnionField<T>) -> bool {
true
}
}
impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {}
#[repr(C)]
#[derive(Debug, Default, Copy, Hash, PartialEq)]
pub struct nsStyleUnion {
pub mInt: __BindgenUnionField<::std::os::raw::c_int>,
pub mFloat: __BindgenUnionField<f32>,
pub mPointer: __BindgenUnionField<*mut ::std::os::raw::c_void>,
pub bindgen_union_field: u64,
}
#[test]
fn bindgen_test_layout_nsStyleUnion() {
assert_eq!(
::std::mem::size_of::<nsStyleUnion>(),
8usize,
concat!("Size of: ", stringify!(nsStyleUnion))
);
assert_eq!(
::std::mem::align_of::<nsStyleUnion>(),
8usize,
concat!("Alignment of ", stringify!(nsStyleUnion))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mInt as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mInt)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mFloat as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mFloat)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mPointer as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mPointer)
)
);
}
impl Clone for nsStyleUnion {
fn clone(&self) -> Self {
*self
}
}
|
}
|
random_line_split
|
method-ambig-two-traits-cross-crate.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test an ambiguity scenario where one copy of the method is available
// from a trait imported from another crate.
// aux-build:ambig_impl_2_lib.rs
extern crate ambig_impl_2_lib;
use ambig_impl_2_lib::me;
trait me2 {
fn me(&self) -> uint;
}
impl me2 for uint { fn me(&self) -> uint { *self } }
fn
|
() { 1u.me(); } //~ ERROR E0034
|
main
|
identifier_name
|
method-ambig-two-traits-cross-crate.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test an ambiguity scenario where one copy of the method is available
// from a trait imported from another crate.
// aux-build:ambig_impl_2_lib.rs
extern crate ambig_impl_2_lib;
use ambig_impl_2_lib::me;
trait me2 {
|
fn main() { 1u.me(); } //~ ERROR E0034
|
fn me(&self) -> uint;
}
impl me2 for uint { fn me(&self) -> uint { *self } }
|
random_line_split
|
method-ambig-two-traits-cross-crate.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test an ambiguity scenario where one copy of the method is available
// from a trait imported from another crate.
// aux-build:ambig_impl_2_lib.rs
extern crate ambig_impl_2_lib;
use ambig_impl_2_lib::me;
trait me2 {
fn me(&self) -> uint;
}
impl me2 for uint { fn me(&self) -> uint { *self } }
fn main()
|
//~ ERROR E0034
|
{ 1u.me(); }
|
identifier_body
|
x86.rs
|
#[repr(C)]
#[derive(Copy)]
pub struct netent {
pub n_name: *mut ::schar_t,
pub n_aliases: *mut *mut ::schar_t,
pub n_addrtype: ::int_t,
pub n_net: ::stdint::uint32_t,
}
new!(netent);
#[repr(C)]
#[derive(Copy)]
pub struct hostent {
pub h_name: *mut ::schar_t,
pub h_aliases: *mut *mut ::schar_t,
pub h_addrtype: ::int_t,
pub h_length: ::int_t,
pub h_addr_list: *mut *mut ::schar_t,
}
new!(hostent);
#[repr(C)]
#[derive(Copy)]
pub struct servent {
pub s_name: *mut ::schar_t,
pub s_aliases: *mut *mut ::schar_t,
pub s_port: ::int_t,
pub s_proto: *mut ::schar_t,
}
new!(servent);
#[repr(C)]
#[derive(Copy)]
pub struct protoent {
pub p_name: *mut ::schar_t,
pub p_aliases: *mut *mut ::schar_t,
pub p_proto: ::int_t,
}
new!(protoent);
#[repr(C)]
#[derive(Copy)]
pub struct
|
{
pub ai_flags: ::int_t,
pub ai_family: ::int_t,
pub ai_socktype: ::int_t,
pub ai_protocol: ::int_t,
pub ai_addrlen: ::sys::socket::socklen_t,
pub ai_addr: *mut ::sys::socket::sockaddr,
pub ai_canonname: *mut ::schar_t,
pub ai_next: *mut addrinfo,
}
new!(addrinfo);
pub const IPPORT_RESERVED: ::int_t = 1024;
pub const AI_PASSIVE: ::int_t = 0x0001;
pub const AI_CANONNAME: ::int_t = 0x0002;
pub const AI_NUMERICHOST: ::int_t = 0x0004;
pub const AI_NUMERICSERV: ::int_t = 0x0400;
pub const AI_V4MAPPED: ::int_t = 0x0008;
pub const AI_ALL: ::int_t = 0x0010;
pub const AI_ADDRCONFIG: ::int_t = 0x0020;
pub const NI_NOFQDN: ::int_t = 4;
pub const NI_NUMERICHOST: ::int_t = 1;
pub const NI_NAMEREQD: ::int_t = 8;
pub const NI_NUMERICSERV: ::int_t = 2;
pub const NI_DGRAM: ::int_t = 16;
pub const EAI_AGAIN: ::int_t = 3;
pub const EAI_BADFLAGS: ::int_t = 1;
pub const EAI_FAIL: ::int_t = 4;
pub const EAI_FAMILY: ::int_t = 6;
pub const EAI_MEMORY: ::int_t = 10;
pub const EAI_NONAME: ::int_t = 2;
pub const EAI_SERVICE: ::int_t = 8;
pub const EAI_SOCKTYPE: ::int_t = 7;
pub const EAI_SYSTEM: ::int_t = 11;
pub const EAI_OVERFLOW: ::int_t = 12;
|
addrinfo
|
identifier_name
|
x86.rs
|
#[repr(C)]
#[derive(Copy)]
pub struct netent {
pub n_name: *mut ::schar_t,
pub n_aliases: *mut *mut ::schar_t,
pub n_addrtype: ::int_t,
pub n_net: ::stdint::uint32_t,
}
new!(netent);
#[repr(C)]
#[derive(Copy)]
pub struct hostent {
pub h_name: *mut ::schar_t,
pub h_aliases: *mut *mut ::schar_t,
pub h_addrtype: ::int_t,
pub h_length: ::int_t,
pub h_addr_list: *mut *mut ::schar_t,
}
new!(hostent);
#[repr(C)]
#[derive(Copy)]
pub struct servent {
pub s_name: *mut ::schar_t,
pub s_aliases: *mut *mut ::schar_t,
pub s_port: ::int_t,
pub s_proto: *mut ::schar_t,
}
new!(servent);
#[repr(C)]
#[derive(Copy)]
pub struct protoent {
pub p_name: *mut ::schar_t,
pub p_aliases: *mut *mut ::schar_t,
pub p_proto: ::int_t,
}
new!(protoent);
#[repr(C)]
#[derive(Copy)]
pub struct addrinfo {
pub ai_flags: ::int_t,
pub ai_family: ::int_t,
pub ai_socktype: ::int_t,
pub ai_protocol: ::int_t,
pub ai_addrlen: ::sys::socket::socklen_t,
pub ai_addr: *mut ::sys::socket::sockaddr,
pub ai_canonname: *mut ::schar_t,
pub ai_next: *mut addrinfo,
}
new!(addrinfo);
pub const IPPORT_RESERVED: ::int_t = 1024;
pub const AI_PASSIVE: ::int_t = 0x0001;
pub const AI_CANONNAME: ::int_t = 0x0002;
pub const AI_NUMERICHOST: ::int_t = 0x0004;
pub const AI_NUMERICSERV: ::int_t = 0x0400;
pub const AI_V4MAPPED: ::int_t = 0x0008;
pub const AI_ALL: ::int_t = 0x0010;
pub const AI_ADDRCONFIG: ::int_t = 0x0020;
pub const NI_NOFQDN: ::int_t = 4;
pub const NI_NUMERICHOST: ::int_t = 1;
pub const NI_NAMEREQD: ::int_t = 8;
pub const NI_NUMERICSERV: ::int_t = 2;
pub const NI_DGRAM: ::int_t = 16;
pub const EAI_AGAIN: ::int_t = 3;
pub const EAI_BADFLAGS: ::int_t = 1;
pub const EAI_FAIL: ::int_t = 4;
pub const EAI_FAMILY: ::int_t = 6;
pub const EAI_MEMORY: ::int_t = 10;
pub const EAI_NONAME: ::int_t = 2;
pub const EAI_SERVICE: ::int_t = 8;
|
pub const EAI_SYSTEM: ::int_t = 11;
pub const EAI_OVERFLOW: ::int_t = 12;
|
pub const EAI_SOCKTYPE: ::int_t = 7;
|
random_line_split
|
function-prologue-stepping-regular.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test case checks if function arguments already have the correct value when breaking at the
// beginning of a function.
// min-lldb-version: 310
// ignore-gdb
// compile-flags:-g
// lldb-command:breakpoint set --name immediate_args
|
// lldb-command:breakpoint set --name assignment
// lldb-command:breakpoint set --name function_call
// lldb-command:breakpoint set --name identifier
// lldb-command:breakpoint set --name return_expr
// lldb-command:breakpoint set --name arithmetic_expr
// lldb-command:breakpoint set --name if_expr
// lldb-command:breakpoint set --name while_expr
// lldb-command:breakpoint set --name loop_expr
// lldb-command:run
// IMMEDIATE ARGS
// lldb-command:print a
// lldb-check:[...]$0 = 1
// lldb-command:print b
// lldb-check:[...]$1 = true
// lldb-command:print c
// lldb-check:[...]$2 = 2.5
// lldb-command:continue
// NON IMMEDIATE ARGS
// lldb-command:print a
// lldb-check:[...]$3 = BigStruct { a: 3, b: 4, c: 5, d: 6, e: 7, f: 8, g: 9, h: 10 }
// lldb-command:print b
// lldb-check:[...]$4 = BigStruct { a: 11, b: 12, c: 13, d: 14, e: 15, f: 16, g: 17, h: 18 }
// lldb-command:continue
// BINDING
// lldb-command:print a
// lldb-check:[...]$5 = 19
// lldb-command:print b
// lldb-check:[...]$6 = 20
// lldb-command:print c
// lldb-check:[...]$7 = 21.5
// lldb-command:continue
// ASSIGNMENT
// lldb-command:print a
// lldb-check:[...]$8 = 22
// lldb-command:print b
// lldb-check:[...]$9 = 23
// lldb-command:print c
// lldb-check:[...]$10 = 24.5
// lldb-command:continue
// FUNCTION CALL
// lldb-command:print x
// lldb-check:[...]$11 = 25
// lldb-command:print y
// lldb-check:[...]$12 = 26
// lldb-command:print z
// lldb-check:[...]$13 = 27.5
// lldb-command:continue
// EXPR
// lldb-command:print x
// lldb-check:[...]$14 = 28
// lldb-command:print y
// lldb-check:[...]$15 = 29
// lldb-command:print z
// lldb-check:[...]$16 = 30.5
// lldb-command:continue
// RETURN EXPR
// lldb-command:print x
// lldb-check:[...]$17 = 31
// lldb-command:print y
// lldb-check:[...]$18 = 32
// lldb-command:print z
// lldb-check:[...]$19 = 33.5
// lldb-command:continue
// ARITHMETIC EXPR
// lldb-command:print x
// lldb-check:[...]$20 = 34
// lldb-command:print y
// lldb-check:[...]$21 = 35
// lldb-command:print z
// lldb-check:[...]$22 = 36.5
// lldb-command:continue
// IF EXPR
// lldb-command:print x
// lldb-check:[...]$23 = 37
// lldb-command:print y
// lldb-check:[...]$24 = 38
// lldb-command:print z
// lldb-check:[...]$25 = 39.5
// lldb-command:continue
// WHILE EXPR
// lldb-command:print x
// lldb-check:[...]$26 = 40
// lldb-command:print y
// lldb-check:[...]$27 = 41
// lldb-command:print z
// lldb-check:[...]$28 = 42
// lldb-command:continue
// LOOP EXPR
// lldb-command:print x
// lldb-check:[...]$29 = 43
// lldb-command:print y
// lldb-check:[...]$30 = 44
// lldb-command:print z
// lldb-check:[...]$31 = 45
// lldb-command:continue
#![allow(unused_variables)]
#![omit_gdb_pretty_printer_section]
fn immediate_args(a: int, b: bool, c: f64) {
()
}
struct BigStruct {
a: u64,
b: u64,
c: u64,
d: u64,
e: u64,
f: u64,
g: u64,
h: u64
}
fn non_immediate_args(a: BigStruct, b: BigStruct) {
()
}
fn binding(a: i64, b: u64, c: f64) {
let x = 0;
}
fn assignment(mut a: u64, b: u64, c: f64) {
a = b;
}
fn function_call(x: u64, y: u64, z: f64) {
std::old_io::stdio::print("Hi!")
}
fn identifier(x: u64, y: u64, z: f64) -> u64 {
x
}
fn return_expr(x: u64, y: u64, z: f64) -> u64 {
return x;
}
fn arithmetic_expr(x: u64, y: u64, z: f64) -> u64 {
x + y
}
fn if_expr(x: u64, y: u64, z: f64) -> u64 {
if x + y < 1000 {
x
} else {
y
}
}
fn while_expr(mut x: u64, y: u64, z: u64) -> u64 {
while x + y < 1000 {
x += z
}
return x;
}
fn loop_expr(mut x: u64, y: u64, z: u64) -> u64 {
loop {
x += z;
if x + y > 1000 {
return x;
}
}
}
fn main() {
immediate_args(1, true, 2.5);
non_immediate_args(
BigStruct {
a: 3,
b: 4,
c: 5,
d: 6,
e: 7,
f: 8,
g: 9,
h: 10
},
BigStruct {
a: 11,
b: 12,
c: 13,
d: 14,
e: 15,
f: 16,
g: 17,
h: 18
}
);
binding(19, 20, 21.5);
assignment(22, 23, 24.5);
function_call(25, 26, 27.5);
identifier(28, 29, 30.5);
return_expr(31, 32, 33.5);
arithmetic_expr(34, 35, 36.5);
if_expr(37, 38, 39.5);
while_expr(40, 41, 42);
loop_expr(43, 44, 45);
}
|
// lldb-command:breakpoint set --name non_immediate_args
// lldb-command:breakpoint set --name binding
|
random_line_split
|
function-prologue-stepping-regular.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test case checks if function arguments already have the correct value when breaking at the
// beginning of a function.
// min-lldb-version: 310
// ignore-gdb
// compile-flags:-g
// lldb-command:breakpoint set --name immediate_args
// lldb-command:breakpoint set --name non_immediate_args
// lldb-command:breakpoint set --name binding
// lldb-command:breakpoint set --name assignment
// lldb-command:breakpoint set --name function_call
// lldb-command:breakpoint set --name identifier
// lldb-command:breakpoint set --name return_expr
// lldb-command:breakpoint set --name arithmetic_expr
// lldb-command:breakpoint set --name if_expr
// lldb-command:breakpoint set --name while_expr
// lldb-command:breakpoint set --name loop_expr
// lldb-command:run
// IMMEDIATE ARGS
// lldb-command:print a
// lldb-check:[...]$0 = 1
// lldb-command:print b
// lldb-check:[...]$1 = true
// lldb-command:print c
// lldb-check:[...]$2 = 2.5
// lldb-command:continue
// NON IMMEDIATE ARGS
// lldb-command:print a
// lldb-check:[...]$3 = BigStruct { a: 3, b: 4, c: 5, d: 6, e: 7, f: 8, g: 9, h: 10 }
// lldb-command:print b
// lldb-check:[...]$4 = BigStruct { a: 11, b: 12, c: 13, d: 14, e: 15, f: 16, g: 17, h: 18 }
// lldb-command:continue
// BINDING
// lldb-command:print a
// lldb-check:[...]$5 = 19
// lldb-command:print b
// lldb-check:[...]$6 = 20
// lldb-command:print c
// lldb-check:[...]$7 = 21.5
// lldb-command:continue
// ASSIGNMENT
// lldb-command:print a
// lldb-check:[...]$8 = 22
// lldb-command:print b
// lldb-check:[...]$9 = 23
// lldb-command:print c
// lldb-check:[...]$10 = 24.5
// lldb-command:continue
// FUNCTION CALL
// lldb-command:print x
// lldb-check:[...]$11 = 25
// lldb-command:print y
// lldb-check:[...]$12 = 26
// lldb-command:print z
// lldb-check:[...]$13 = 27.5
// lldb-command:continue
// EXPR
// lldb-command:print x
// lldb-check:[...]$14 = 28
// lldb-command:print y
// lldb-check:[...]$15 = 29
// lldb-command:print z
// lldb-check:[...]$16 = 30.5
// lldb-command:continue
// RETURN EXPR
// lldb-command:print x
// lldb-check:[...]$17 = 31
// lldb-command:print y
// lldb-check:[...]$18 = 32
// lldb-command:print z
// lldb-check:[...]$19 = 33.5
// lldb-command:continue
// ARITHMETIC EXPR
// lldb-command:print x
// lldb-check:[...]$20 = 34
// lldb-command:print y
// lldb-check:[...]$21 = 35
// lldb-command:print z
// lldb-check:[...]$22 = 36.5
// lldb-command:continue
// IF EXPR
// lldb-command:print x
// lldb-check:[...]$23 = 37
// lldb-command:print y
// lldb-check:[...]$24 = 38
// lldb-command:print z
// lldb-check:[...]$25 = 39.5
// lldb-command:continue
// WHILE EXPR
// lldb-command:print x
// lldb-check:[...]$26 = 40
// lldb-command:print y
// lldb-check:[...]$27 = 41
// lldb-command:print z
// lldb-check:[...]$28 = 42
// lldb-command:continue
// LOOP EXPR
// lldb-command:print x
// lldb-check:[...]$29 = 43
// lldb-command:print y
// lldb-check:[...]$30 = 44
// lldb-command:print z
// lldb-check:[...]$31 = 45
// lldb-command:continue
#![allow(unused_variables)]
#![omit_gdb_pretty_printer_section]
fn immediate_args(a: int, b: bool, c: f64) {
()
}
struct BigStruct {
a: u64,
b: u64,
c: u64,
d: u64,
e: u64,
f: u64,
g: u64,
h: u64
}
fn non_immediate_args(a: BigStruct, b: BigStruct) {
()
}
fn binding(a: i64, b: u64, c: f64) {
let x = 0;
}
fn assignment(mut a: u64, b: u64, c: f64) {
a = b;
}
fn function_call(x: u64, y: u64, z: f64) {
std::old_io::stdio::print("Hi!")
}
fn
|
(x: u64, y: u64, z: f64) -> u64 {
x
}
fn return_expr(x: u64, y: u64, z: f64) -> u64 {
return x;
}
fn arithmetic_expr(x: u64, y: u64, z: f64) -> u64 {
x + y
}
fn if_expr(x: u64, y: u64, z: f64) -> u64 {
if x + y < 1000 {
x
} else {
y
}
}
fn while_expr(mut x: u64, y: u64, z: u64) -> u64 {
while x + y < 1000 {
x += z
}
return x;
}
fn loop_expr(mut x: u64, y: u64, z: u64) -> u64 {
loop {
x += z;
if x + y > 1000 {
return x;
}
}
}
fn main() {
immediate_args(1, true, 2.5);
non_immediate_args(
BigStruct {
a: 3,
b: 4,
c: 5,
d: 6,
e: 7,
f: 8,
g: 9,
h: 10
},
BigStruct {
a: 11,
b: 12,
c: 13,
d: 14,
e: 15,
f: 16,
g: 17,
h: 18
}
);
binding(19, 20, 21.5);
assignment(22, 23, 24.5);
function_call(25, 26, 27.5);
identifier(28, 29, 30.5);
return_expr(31, 32, 33.5);
arithmetic_expr(34, 35, 36.5);
if_expr(37, 38, 39.5);
while_expr(40, 41, 42);
loop_expr(43, 44, 45);
}
|
identifier
|
identifier_name
|
function-prologue-stepping-regular.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test case checks if function arguments already have the correct value when breaking at the
// beginning of a function.
// min-lldb-version: 310
// ignore-gdb
// compile-flags:-g
// lldb-command:breakpoint set --name immediate_args
// lldb-command:breakpoint set --name non_immediate_args
// lldb-command:breakpoint set --name binding
// lldb-command:breakpoint set --name assignment
// lldb-command:breakpoint set --name function_call
// lldb-command:breakpoint set --name identifier
// lldb-command:breakpoint set --name return_expr
// lldb-command:breakpoint set --name arithmetic_expr
// lldb-command:breakpoint set --name if_expr
// lldb-command:breakpoint set --name while_expr
// lldb-command:breakpoint set --name loop_expr
// lldb-command:run
// IMMEDIATE ARGS
// lldb-command:print a
// lldb-check:[...]$0 = 1
// lldb-command:print b
// lldb-check:[...]$1 = true
// lldb-command:print c
// lldb-check:[...]$2 = 2.5
// lldb-command:continue
// NON IMMEDIATE ARGS
// lldb-command:print a
// lldb-check:[...]$3 = BigStruct { a: 3, b: 4, c: 5, d: 6, e: 7, f: 8, g: 9, h: 10 }
// lldb-command:print b
// lldb-check:[...]$4 = BigStruct { a: 11, b: 12, c: 13, d: 14, e: 15, f: 16, g: 17, h: 18 }
// lldb-command:continue
// BINDING
// lldb-command:print a
// lldb-check:[...]$5 = 19
// lldb-command:print b
// lldb-check:[...]$6 = 20
// lldb-command:print c
// lldb-check:[...]$7 = 21.5
// lldb-command:continue
// ASSIGNMENT
// lldb-command:print a
// lldb-check:[...]$8 = 22
// lldb-command:print b
// lldb-check:[...]$9 = 23
// lldb-command:print c
// lldb-check:[...]$10 = 24.5
// lldb-command:continue
// FUNCTION CALL
// lldb-command:print x
// lldb-check:[...]$11 = 25
// lldb-command:print y
// lldb-check:[...]$12 = 26
// lldb-command:print z
// lldb-check:[...]$13 = 27.5
// lldb-command:continue
// EXPR
// lldb-command:print x
// lldb-check:[...]$14 = 28
// lldb-command:print y
// lldb-check:[...]$15 = 29
// lldb-command:print z
// lldb-check:[...]$16 = 30.5
// lldb-command:continue
// RETURN EXPR
// lldb-command:print x
// lldb-check:[...]$17 = 31
// lldb-command:print y
// lldb-check:[...]$18 = 32
// lldb-command:print z
// lldb-check:[...]$19 = 33.5
// lldb-command:continue
// ARITHMETIC EXPR
// lldb-command:print x
// lldb-check:[...]$20 = 34
// lldb-command:print y
// lldb-check:[...]$21 = 35
// lldb-command:print z
// lldb-check:[...]$22 = 36.5
// lldb-command:continue
// IF EXPR
// lldb-command:print x
// lldb-check:[...]$23 = 37
// lldb-command:print y
// lldb-check:[...]$24 = 38
// lldb-command:print z
// lldb-check:[...]$25 = 39.5
// lldb-command:continue
// WHILE EXPR
// lldb-command:print x
// lldb-check:[...]$26 = 40
// lldb-command:print y
// lldb-check:[...]$27 = 41
// lldb-command:print z
// lldb-check:[...]$28 = 42
// lldb-command:continue
// LOOP EXPR
// lldb-command:print x
// lldb-check:[...]$29 = 43
// lldb-command:print y
// lldb-check:[...]$30 = 44
// lldb-command:print z
// lldb-check:[...]$31 = 45
// lldb-command:continue
#![allow(unused_variables)]
#![omit_gdb_pretty_printer_section]
fn immediate_args(a: int, b: bool, c: f64) {
()
}
struct BigStruct {
a: u64,
b: u64,
c: u64,
d: u64,
e: u64,
f: u64,
g: u64,
h: u64
}
fn non_immediate_args(a: BigStruct, b: BigStruct) {
()
}
fn binding(a: i64, b: u64, c: f64) {
let x = 0;
}
fn assignment(mut a: u64, b: u64, c: f64) {
a = b;
}
fn function_call(x: u64, y: u64, z: f64) {
std::old_io::stdio::print("Hi!")
}
fn identifier(x: u64, y: u64, z: f64) -> u64 {
x
}
fn return_expr(x: u64, y: u64, z: f64) -> u64 {
return x;
}
fn arithmetic_expr(x: u64, y: u64, z: f64) -> u64 {
x + y
}
fn if_expr(x: u64, y: u64, z: f64) -> u64 {
if x + y < 1000 {
x
} else
|
}
fn while_expr(mut x: u64, y: u64, z: u64) -> u64 {
while x + y < 1000 {
x += z
}
return x;
}
fn loop_expr(mut x: u64, y: u64, z: u64) -> u64 {
loop {
x += z;
if x + y > 1000 {
return x;
}
}
}
fn main() {
immediate_args(1, true, 2.5);
non_immediate_args(
BigStruct {
a: 3,
b: 4,
c: 5,
d: 6,
e: 7,
f: 8,
g: 9,
h: 10
},
BigStruct {
a: 11,
b: 12,
c: 13,
d: 14,
e: 15,
f: 16,
g: 17,
h: 18
}
);
binding(19, 20, 21.5);
assignment(22, 23, 24.5);
function_call(25, 26, 27.5);
identifier(28, 29, 30.5);
return_expr(31, 32, 33.5);
arithmetic_expr(34, 35, 36.5);
if_expr(37, 38, 39.5);
while_expr(40, 41, 42);
loop_expr(43, 44, 45);
}
|
{
y
}
|
conditional_block
|
mod.rs
|
//! A module that provides an interface for torrent clients
mod deluge;
mod transmission;
pub use self::deluge::Deluge;
pub use self::transmission::Transmission;
use self::transmission::{ArgGet, DeleteLocalData, TorrentSelect, TorrentStatus as TStatus};
use std::fmt::Debug;
pub type Result<T> = std::result::Result<T, failure::Error>;
/// Torrent
#[derive(Debug, Clone)]
pub struct Torrent {
pub hash: String,
pub status: TorrentStatus,
}
/// Torrent status.
#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)]
pub enum TorrentStatus {
Seeding,
Stopped,
Other,
}
impl From<i16> for TorrentStatus {
fn from(status: i16) -> Self {
match status {
0 => TorrentStatus::Seeding,
1 => TorrentStatus::Stopped,
_ => TorrentStatus::Other,
}
}
}
|
/// Starts a list of torrents.
fn start(&self, _: &[String]) -> Result<()>;
/// Stop a list of torrents.
fn stop(&self, _: &[String]) -> Result<()>;
/// Remove a list of torrents from client.
///
/// If the second parameter is true, then it also removes local data.
fn remove(&self, _: &[String], _: bool) -> Result<()>;
}
impl From<TStatus> for TorrentStatus {
fn from(status: TStatus) -> Self {
match status {
TStatus::Seeding => TorrentStatus::Seeding,
TStatus::TorrentIsStopped => TorrentStatus::Stopped,
_ => TorrentStatus::Other,
}
}
}
impl TorrentClient for Transmission {
fn url(&self) -> &str {
self.url()
}
fn list(&self) -> Result<Vec<Torrent>> {
Ok(self
.get(TorrentSelect::All, &[ArgGet::HashString, ArgGet::Status])?
.into_iter()
.map(|resp| Torrent {
hash: resp.hash.to_uppercase(),
status: resp.status.into(),
})
.collect())
}
fn start(&self, hashes: &[String]) -> Result<()> {
self.start(TorrentSelect::Ids(hashes))?;
Ok(())
}
fn stop(&self, hashes: &[String]) -> Result<()> {
self.stop(TorrentSelect::Ids(hashes))?;
Ok(())
}
fn remove(&self, hashes: &[String], delete: bool) -> Result<()> {
self.remove(TorrentSelect::Ids(hashes), DeleteLocalData(delete))?;
Ok(())
}
}
impl TorrentClient for Deluge {
fn url(&self) -> &str {
unimplemented!();
}
fn list(&self) -> Result<Vec<Torrent>> {
unimplemented!();
}
fn start(&self, _hashes: &[String]) -> Result<()> {
unimplemented!();
}
fn stop(&self, _hashes: &[String]) -> Result<()> {
unimplemented!();
}
fn remove(&self, _hashes: &[String], _delete: bool) -> Result<()> {
unimplemented!();
}
}
|
/// A trait for any object that will represent a torrent client.
pub trait TorrentClient: Debug {
fn url(&self) -> &str;
/// Returns a list of all torrents in the client.
fn list(&self) -> Result<Vec<Torrent>>;
|
random_line_split
|
mod.rs
|
//! A module that provides an interface for torrent clients
mod deluge;
mod transmission;
pub use self::deluge::Deluge;
pub use self::transmission::Transmission;
use self::transmission::{ArgGet, DeleteLocalData, TorrentSelect, TorrentStatus as TStatus};
use std::fmt::Debug;
pub type Result<T> = std::result::Result<T, failure::Error>;
/// Torrent
#[derive(Debug, Clone)]
pub struct Torrent {
pub hash: String,
pub status: TorrentStatus,
}
/// Torrent status.
#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)]
pub enum
|
{
Seeding,
Stopped,
Other,
}
impl From<i16> for TorrentStatus {
fn from(status: i16) -> Self {
match status {
0 => TorrentStatus::Seeding,
1 => TorrentStatus::Stopped,
_ => TorrentStatus::Other,
}
}
}
/// A trait for any object that will represent a torrent client.
pub trait TorrentClient: Debug {
fn url(&self) -> &str;
/// Returns a list of all torrents in the client.
fn list(&self) -> Result<Vec<Torrent>>;
/// Starts a list of torrents.
fn start(&self, _: &[String]) -> Result<()>;
/// Stop a list of torrents.
fn stop(&self, _: &[String]) -> Result<()>;
/// Remove a list of torrents from client.
///
/// If the second parameter is true, then it also removes local data.
fn remove(&self, _: &[String], _: bool) -> Result<()>;
}
impl From<TStatus> for TorrentStatus {
fn from(status: TStatus) -> Self {
match status {
TStatus::Seeding => TorrentStatus::Seeding,
TStatus::TorrentIsStopped => TorrentStatus::Stopped,
_ => TorrentStatus::Other,
}
}
}
impl TorrentClient for Transmission {
fn url(&self) -> &str {
self.url()
}
fn list(&self) -> Result<Vec<Torrent>> {
Ok(self
.get(TorrentSelect::All, &[ArgGet::HashString, ArgGet::Status])?
.into_iter()
.map(|resp| Torrent {
hash: resp.hash.to_uppercase(),
status: resp.status.into(),
})
.collect())
}
fn start(&self, hashes: &[String]) -> Result<()> {
self.start(TorrentSelect::Ids(hashes))?;
Ok(())
}
fn stop(&self, hashes: &[String]) -> Result<()> {
self.stop(TorrentSelect::Ids(hashes))?;
Ok(())
}
fn remove(&self, hashes: &[String], delete: bool) -> Result<()> {
self.remove(TorrentSelect::Ids(hashes), DeleteLocalData(delete))?;
Ok(())
}
}
impl TorrentClient for Deluge {
fn url(&self) -> &str {
unimplemented!();
}
fn list(&self) -> Result<Vec<Torrent>> {
unimplemented!();
}
fn start(&self, _hashes: &[String]) -> Result<()> {
unimplemented!();
}
fn stop(&self, _hashes: &[String]) -> Result<()> {
unimplemented!();
}
fn remove(&self, _hashes: &[String], _delete: bool) -> Result<()> {
unimplemented!();
}
}
|
TorrentStatus
|
identifier_name
|
mod.rs
|
//! A module that provides an interface for torrent clients
mod deluge;
mod transmission;
pub use self::deluge::Deluge;
pub use self::transmission::Transmission;
use self::transmission::{ArgGet, DeleteLocalData, TorrentSelect, TorrentStatus as TStatus};
use std::fmt::Debug;
pub type Result<T> = std::result::Result<T, failure::Error>;
/// Torrent
#[derive(Debug, Clone)]
pub struct Torrent {
pub hash: String,
pub status: TorrentStatus,
}
/// Torrent status.
#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)]
pub enum TorrentStatus {
Seeding,
Stopped,
Other,
}
impl From<i16> for TorrentStatus {
fn from(status: i16) -> Self {
match status {
0 => TorrentStatus::Seeding,
1 => TorrentStatus::Stopped,
_ => TorrentStatus::Other,
}
}
}
/// A trait for any object that will represent a torrent client.
pub trait TorrentClient: Debug {
fn url(&self) -> &str;
/// Returns a list of all torrents in the client.
fn list(&self) -> Result<Vec<Torrent>>;
/// Starts a list of torrents.
fn start(&self, _: &[String]) -> Result<()>;
/// Stop a list of torrents.
fn stop(&self, _: &[String]) -> Result<()>;
/// Remove a list of torrents from client.
///
/// If the second parameter is true, then it also removes local data.
fn remove(&self, _: &[String], _: bool) -> Result<()>;
}
impl From<TStatus> for TorrentStatus {
fn from(status: TStatus) -> Self {
match status {
TStatus::Seeding => TorrentStatus::Seeding,
TStatus::TorrentIsStopped => TorrentStatus::Stopped,
_ => TorrentStatus::Other,
}
}
}
impl TorrentClient for Transmission {
fn url(&self) -> &str {
self.url()
}
fn list(&self) -> Result<Vec<Torrent>> {
Ok(self
.get(TorrentSelect::All, &[ArgGet::HashString, ArgGet::Status])?
.into_iter()
.map(|resp| Torrent {
hash: resp.hash.to_uppercase(),
status: resp.status.into(),
})
.collect())
}
fn start(&self, hashes: &[String]) -> Result<()> {
self.start(TorrentSelect::Ids(hashes))?;
Ok(())
}
fn stop(&self, hashes: &[String]) -> Result<()> {
self.stop(TorrentSelect::Ids(hashes))?;
Ok(())
}
fn remove(&self, hashes: &[String], delete: bool) -> Result<()> {
self.remove(TorrentSelect::Ids(hashes), DeleteLocalData(delete))?;
Ok(())
}
}
impl TorrentClient for Deluge {
fn url(&self) -> &str {
unimplemented!();
}
fn list(&self) -> Result<Vec<Torrent>>
|
fn start(&self, _hashes: &[String]) -> Result<()> {
unimplemented!();
}
fn stop(&self, _hashes: &[String]) -> Result<()> {
unimplemented!();
}
fn remove(&self, _hashes: &[String], _delete: bool) -> Result<()> {
unimplemented!();
}
}
|
{
unimplemented!();
}
|
identifier_body
|
main.rs
|
fn _12_1(){
println!("guide 12-1");
let a = [1i32, 2i32, 3i32];
let mut m = [2i32, 3i32, 4i32];
if false {
println!("{:?} {:?}", a, m);
}
let b = [0i32; 20]; // shorthand for array of 20 elements all initialized to 0
println!("{:?}", b);
m = [5i32, 6i32, 7i32];
println!("{:?}", m);
for i in m.iter() {
println!("elem {}", i);
}
let names = ["Emilija", "Anzelika"];
println!("{} -> {}", names[1], names[0]);
}
fn _12_2(){
println!("guide 12-2");
let mut v = vec![1i32, 2, 3];
v.push(4);
println!("{:?}, len is {}", v, v.len());
}
fn _12_3(){
println!("guide 12-3");
let mut a = vec![0i32, 1, 2, 3, 4];
let middle = a.as_mut_slice();
middle[0] = 10i32;
for e in middle.iter() {
println!("{}", e);
}
}
fn main()
|
{
println!("guide 12");
_12_1();
_12_2();
_12_3();
}
|
identifier_body
|
|
main.rs
|
fn _12_1(){
println!("guide 12-1");
let a = [1i32, 2i32, 3i32];
let mut m = [2i32, 3i32, 4i32];
if false {
println!("{:?} {:?}", a, m);
}
|
println!("{:?}", m);
for i in m.iter() {
println!("elem {}", i);
}
let names = ["Emilija", "Anzelika"];
println!("{} -> {}", names[1], names[0]);
}
fn _12_2(){
println!("guide 12-2");
let mut v = vec![1i32, 2, 3];
v.push(4);
println!("{:?}, len is {}", v, v.len());
}
fn _12_3(){
println!("guide 12-3");
let mut a = vec![0i32, 1, 2, 3, 4];
let middle = a.as_mut_slice();
middle[0] = 10i32;
for e in middle.iter() {
println!("{}", e);
}
}
fn main(){
println!("guide 12");
_12_1();
_12_2();
_12_3();
}
|
let b = [0i32; 20]; // shorthand for array of 20 elements all initialized to 0
println!("{:?}", b);
m = [5i32, 6i32, 7i32];
|
random_line_split
|
main.rs
|
fn _12_1(){
println!("guide 12-1");
let a = [1i32, 2i32, 3i32];
let mut m = [2i32, 3i32, 4i32];
if false
|
let b = [0i32; 20]; // shorthand for array of 20 elements all initialized to 0
println!("{:?}", b);
m = [5i32, 6i32, 7i32];
println!("{:?}", m);
for i in m.iter() {
println!("elem {}", i);
}
let names = ["Emilija", "Anzelika"];
println!("{} -> {}", names[1], names[0]);
}
fn _12_2(){
println!("guide 12-2");
let mut v = vec![1i32, 2, 3];
v.push(4);
println!("{:?}, len is {}", v, v.len());
}
fn _12_3(){
println!("guide 12-3");
let mut a = vec![0i32, 1, 2, 3, 4];
let middle = a.as_mut_slice();
middle[0] = 10i32;
for e in middle.iter() {
println!("{}", e);
}
}
fn main(){
println!("guide 12");
_12_1();
_12_2();
_12_3();
}
|
{
println!("{:?} {:?}", a, m);
}
|
conditional_block
|
main.rs
|
fn
|
(){
println!("guide 12-1");
let a = [1i32, 2i32, 3i32];
let mut m = [2i32, 3i32, 4i32];
if false {
println!("{:?} {:?}", a, m);
}
let b = [0i32; 20]; // shorthand for array of 20 elements all initialized to 0
println!("{:?}", b);
m = [5i32, 6i32, 7i32];
println!("{:?}", m);
for i in m.iter() {
println!("elem {}", i);
}
let names = ["Emilija", "Anzelika"];
println!("{} -> {}", names[1], names[0]);
}
fn _12_2(){
println!("guide 12-2");
let mut v = vec![1i32, 2, 3];
v.push(4);
println!("{:?}, len is {}", v, v.len());
}
fn _12_3(){
println!("guide 12-3");
let mut a = vec![0i32, 1, 2, 3, 4];
let middle = a.as_mut_slice();
middle[0] = 10i32;
for e in middle.iter() {
println!("{}", e);
}
}
fn main(){
println!("guide 12");
_12_1();
_12_2();
_12_3();
}
|
_12_1
|
identifier_name
|
resolve.rs
|
use super::encode::Metadata;
use crate::core::dependency::DepKind;
use crate::core::interning::InternedString;
use crate::core::{Dependency, PackageId, PackageIdSpec, Summary, Target};
use crate::util::errors::CargoResult;
use crate::util::Graph;
use std::borrow::Borrow;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::fmt;
/// Represents a fully-resolved package dependency graph. Each node in the graph
/// is a package and edges represent dependencies between packages.
///
/// Each instance of `Resolve` also understands the full set of features used
/// for each package.
pub struct Resolve {
/// A graph, whose vertices are packages and edges are dependency specifications
/// from `Cargo.toml`. We need a `Vec` here because the same package
/// might be present in both `[dependencies]` and `[build-dependencies]`.
graph: Graph<PackageId, HashSet<Dependency>>,
/// Replacements from the `[replace]` table.
replacements: HashMap<PackageId, PackageId>,
/// Inverted version of `replacements`.
reverse_replacements: HashMap<PackageId, PackageId>,
/// An empty `HashSet` to avoid creating a new `HashSet` for every package
/// that does not have any features, and to avoid using `Option` to
/// simplify the API.
empty_features: Vec<InternedString>,
/// Features enabled for a given package.
features: HashMap<PackageId, Vec<InternedString>>,
/// Checksum for each package. A SHA256 hash of the `.crate` file used to
/// validate the correct crate file is used. This is `None` for sources
/// that do not use `.crate` files, like path or git dependencies.
checksums: HashMap<PackageId, Option<String>>,
/// "Unknown" metadata. This is a collection of extra, unrecognized data
/// found in the `[metadata]` section of `Cargo.lock`, preserved for
/// forwards compatibility.
metadata: Metadata,
/// `[patch]` entries that did not match anything, preserved in
/// `Cargo.lock` as the `[[patch.unused]]` table array. Tracking unused
/// patches helps prevent Cargo from being forced to re-update the
/// registry every time it runs, and keeps the resolve in a locked state
/// so it doesn't re-resolve the unused entries.
unused_patches: Vec<PackageId>,
/// A map from packages to a set of their public dependencies
public_dependencies: HashMap<PackageId, HashSet<PackageId>>,
/// Version of the `Cargo.lock` format, see
/// `cargo::core::resolver::encode` for more.
version: ResolveVersion,
summaries: HashMap<PackageId, Summary>,
}
/// A version to indicate how a `Cargo.lock` should be serialized. Currently
/// V2 is the default when creating a new lockfile. If a V1 lockfile already
/// exists, it will stay as V1.
///
/// It's theorized that we can add more here over time to track larger changes
/// to the `Cargo.lock` format, but we've yet to see how that strategy pans out.
#[derive(PartialEq, Eq, Clone, Copy, Debug, PartialOrd, Ord)]
pub enum ResolveVersion {
/// Historical baseline for when this abstraction was added.
V1,
/// A more compact format, more amenable to avoiding source-control merge
/// conflicts. The `dependencies` arrays are compressed and checksums are
/// listed inline. Introduced in 2019 in version 1.38. New lockfiles use
/// V2 by default starting in 1.41.
V2,
}
impl Resolve {
pub fn new(
graph: Graph<PackageId, HashSet<Dependency>>,
replacements: HashMap<PackageId, PackageId>,
features: HashMap<PackageId, Vec<InternedString>>,
checksums: HashMap<PackageId, Option<String>>,
metadata: Metadata,
unused_patches: Vec<PackageId>,
version: ResolveVersion,
summaries: HashMap<PackageId, Summary>,
) -> Resolve {
let reverse_replacements = replacements.iter().map(|(&p, &r)| (r, p)).collect();
let public_dependencies = graph
.iter()
.map(|p| {
let public_deps = graph
.edges(p)
.filter(|(_, deps)| {
deps.iter()
.any(|d| d.kind() == DepKind::Normal && d.is_public())
})
.map(|(dep_package, _)| *dep_package)
.collect::<HashSet<PackageId>>();
(*p, public_deps)
})
.collect();
Resolve {
graph,
replacements,
features,
checksums,
metadata,
unused_patches,
empty_features: Vec::new(),
reverse_replacements,
public_dependencies,
version,
summaries,
}
}
/// Resolves one of the paths from the given dependent package up to
/// the root.
pub fn path_to_top<'a>(&'a self, pkg: &'a PackageId) -> Vec<&'a PackageId> {
self.graph.path_to_top(pkg)
}
pub fn register_used_patches(&mut self, patches: &[Summary]) {
for summary in patches {
if self.iter().any(|id| id == summary.package_id()) {
continue;
}
self.unused_patches.push(summary.package_id());
}
}
pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> {
// Given a previous instance of resolve, it should be forbidden to ever
// have a checksums which *differ*. If the same package ID has differing
// checksums, then something has gone wrong such as:
//
// * Something got seriously corrupted
// * A "mirror" isn't actually a mirror as some changes were made
// * A replacement source wasn't actually a replacement, some changes
// were made
//
// In all of these cases, we want to report an error to indicate that
// something is awry. Normal execution (esp just using crates.io) should
// never run into this.
for (id, cksum) in previous.checksums.iter() {
if let Some(mine) = self.checksums.get(id) {
if mine == cksum {
continue;
}
// If the previous checksum wasn't calculated, the current
// checksum is `Some`. This may indicate that a source was
// erroneously replaced or was replaced with something that
// desires stronger checksum guarantees than can be afforded
// elsewhere.
if cksum.is_none() {
anyhow::bail!(
"\
checksum for `{}` was not previously calculated, but a checksum could now \
be calculated
this could be indicative of a few possible situations:
* the source `{}` did not previously support checksums,
but was replaced with one that does
* newer Cargo implementations know how to checksum this source, but this
older implementation does not
* the lock file is corrupt
",
id,
id.source_id()
)
// If our checksum hasn't been calculated, then it could mean
// that future Cargo figured out how to checksum something or
// more realistically we were overridden with a source that does
// not have checksums.
} else if mine.is_none() {
anyhow::bail!(
"\
checksum for `{}` could not be calculated, but a checksum is listed in \
the existing lock file
this could be indicative of a few possible situations:
* the source `{}` supports checksums,
but was replaced with one that doesn't
* the lock file is corrupt
unable to verify that `{0}` is the same as when the lockfile was generated
",
id,
id.source_id()
)
// If the checksums aren't equal, and neither is None, then they
// must both be Some, in which case the checksum now differs.
// That's quite bad!
} else {
anyhow::bail!(
"\
checksum for `{}` changed between lock files
this could be indicative of a few possible errors:
* the lock file is corrupt
* a replacement source in use (e.g., a mirror) returned a different checksum
* the source itself may be corrupt in one way or another
unable to verify that `{0}` is the same as when the lockfile was generated
",
id
);
}
}
}
// Be sure to just copy over any unknown metadata.
self.metadata = previous.metadata.clone();
// The goal of Cargo is largely to preserve the encoding of `Cargo.lock`
// that it finds on the filesystem. Sometimes `Cargo.lock` changes are
// in the works where they haven't been set as the default yet but will
// become the default soon.
//
// The scenarios we could be in are:
//
// * This is a brand new lock file with nothing previous. In that case
// this method isn't actually called at all, but instead
// `default_for_new_lockfiles` called below was encoded during the
// resolution step, so that's what we're gonna use.
//
// * We have an old lock file. In this case we want to switch the
// version to `default_for_old_lockfiles`. That puts us in one of
// three cases:
//
// * Our version is older than the default. This means that we're
// migrating someone forward, so we switch the encoding.
// * Our version is equal to the default, nothing to do!
// * Our version is *newer* than the default. This is where we
// critically keep the new version of encoding.
//
// This strategy should get new lockfiles into the pipeline more quickly
// while ensuring that any time an old cargo sees a future lock file it
// keeps the future lockfile encoding.
self.version = cmp::max(
previous.version,
ResolveVersion::default_for_old_lockfiles(),
);
Ok(())
}
pub fn contains<Q:?Sized>(&self, k: &Q) -> bool
where
PackageId: Borrow<Q>,
Q: Ord + Eq,
{
self.graph.contains(k)
}
pub fn sort(&self) -> Vec<PackageId> {
self.graph.sort()
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = PackageId> + 'a {
self.graph.iter().cloned()
}
pub fn deps(&self, pkg: PackageId) -> impl Iterator<Item = (PackageId, &HashSet<Dependency>)> {
self.deps_not_replaced(pkg)
.map(move |(id, deps)| (self.replacement(id).unwrap_or(id), deps))
}
pub fn deps_not_replaced(
&self,
pkg: PackageId,
) -> impl Iterator<Item = (PackageId, &HashSet<Dependency>)> {
self.graph.edges(&pkg).map(|(id, deps)| (*id, deps))
}
pub fn replacement(&self, pkg: PackageId) -> Option<PackageId> {
self.replacements.get(&pkg).cloned()
}
pub fn replacements(&self) -> &HashMap<PackageId, PackageId> {
&self.replacements
}
pub fn features(&self, pkg: PackageId) -> &[InternedString] {
self.features.get(&pkg).unwrap_or(&self.empty_features)
}
/// This is only here for legacy support, it will be removed when
/// switching to the new feature resolver.
pub fn features_clone(&self) -> HashMap<PackageId, Vec<InternedString>> {
self.features.clone()
}
pub fn is_public_dep(&self, pkg: PackageId, dep: PackageId) -> bool {
self.public_dependencies
.get(&pkg)
.map(|public_deps| public_deps.contains(&dep))
.unwrap_or_else(|| panic!("Unknown dependency {:?} for package {:?}", dep, pkg))
}
pub fn query(&self, spec: &str) -> CargoResult<PackageId> {
PackageIdSpec::query_str(spec, self.iter())
}
pub fn unused_patches(&self) -> &[PackageId] {
&self.unused_patches
}
pub fn checksums(&self) -> &HashMap<PackageId, Option<String>> {
&self.checksums
}
pub fn metadata(&self) -> &Metadata {
&self.metadata
}
pub fn extern_crate_name(
&self,
from: PackageId,
to: PackageId,
to_target: &Target,
) -> CargoResult<String> {
let empty_set: HashSet<Dependency> = HashSet::new();
let deps = if from == to {
&empty_set
} else
|
;
let crate_name = to_target.crate_name();
let mut names = deps.iter().map(|d| {
d.explicit_name_in_toml()
.map(|s| s.as_str().replace("-", "_"))
.unwrap_or_else(|| crate_name.clone())
});
let name = names.next().unwrap_or_else(|| crate_name.clone());
for n in names {
anyhow::ensure!(
n == name,
"the crate `{}` depends on crate `{}` multiple times with different names",
from,
to,
);
}
Ok(name)
}
fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &HashSet<Dependency> {
// We've got a dependency on `from` to `to`, but this dependency edge
// may be affected by [replace]. If the `to` package is listed as the
// target of a replacement (aka the key of a reverse replacement map)
// then we try to find our dependency edge through that. If that fails
// then we go down below assuming it's not replaced.
//
// Note that we don't treat `from` as if it's been replaced because
// that's where the dependency originates from, and we only replace
// targets of dependencies not the originator.
if let Some(replace) = self.reverse_replacements.get(&to) {
if let Some(deps) = self.graph.edge(&from, replace) {
return deps;
}
}
match self.graph.edge(&from, &to) {
Some(ret) => ret,
None => panic!("no Dependency listed for `{}` => `{}`", from, to),
}
}
/// Returns the version of the encoding that's being used for this lock
/// file.
pub fn version(&self) -> &ResolveVersion {
&self.version
}
pub fn summary(&self, pkg_id: PackageId) -> &Summary {
&self.summaries[&pkg_id]
}
}
impl PartialEq for Resolve {
fn eq(&self, other: &Resolve) -> bool {
macro_rules! compare {
($($fields:ident)* | $($ignored:ident)*) => {
let Resolve { $($fields,)* $($ignored: _,)* } = self;
$($fields == &other.$fields)&&*
}
}
compare! {
// fields to compare
graph replacements reverse_replacements empty_features features
checksums metadata unused_patches public_dependencies summaries
|
// fields to ignore
version
}
}
}
impl fmt::Debug for Resolve {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(fmt, "graph: {:?}", self.graph)?;
writeln!(fmt, "\nfeatures: {{")?;
for (pkg, features) in &self.features {
writeln!(fmt, " {}: {:?}", pkg, features)?;
}
write!(fmt, "}}")
}
}
impl ResolveVersion {
/// The default way to encode new `Cargo.lock` files.
///
/// It's important that if a new version of `ResolveVersion` is added that
/// this is not updated until *at least* the support for the version is in
/// the stable release of Rust. It's ok for this to be newer than
/// `default_for_old_lockfiles` below.
pub fn default_for_new_lockfiles() -> ResolveVersion {
ResolveVersion::V2
}
/// The default way to encode old preexisting `Cargo.lock` files. This is
/// often trailing the new lockfiles one above to give older projects a
/// longer time to catch up.
///
/// It's important that this trails behind `default_for_new_lockfiles` for
/// quite some time. This gives projects a quite large window to update in
/// where we don't force updates, so if projects span many versions of Cargo
/// all those versions of Cargo will have support for a new version of the
/// lock file.
pub fn default_for_old_lockfiles() -> ResolveVersion {
ResolveVersion::V1
}
}
|
{
self.dependencies_listed(from, to)
}
|
conditional_block
|
resolve.rs
|
use super::encode::Metadata;
use crate::core::dependency::DepKind;
use crate::core::interning::InternedString;
use crate::core::{Dependency, PackageId, PackageIdSpec, Summary, Target};
use crate::util::errors::CargoResult;
use crate::util::Graph;
use std::borrow::Borrow;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::fmt;
/// Represents a fully-resolved package dependency graph. Each node in the graph
/// is a package and edges represent dependencies between packages.
///
/// Each instance of `Resolve` also understands the full set of features used
/// for each package.
pub struct Resolve {
/// A graph, whose vertices are packages and edges are dependency specifications
/// from `Cargo.toml`. We need a `Vec` here because the same package
/// might be present in both `[dependencies]` and `[build-dependencies]`.
graph: Graph<PackageId, HashSet<Dependency>>,
/// Replacements from the `[replace]` table.
replacements: HashMap<PackageId, PackageId>,
/// Inverted version of `replacements`.
reverse_replacements: HashMap<PackageId, PackageId>,
/// An empty `HashSet` to avoid creating a new `HashSet` for every package
/// that does not have any features, and to avoid using `Option` to
/// simplify the API.
empty_features: Vec<InternedString>,
/// Features enabled for a given package.
features: HashMap<PackageId, Vec<InternedString>>,
/// Checksum for each package. A SHA256 hash of the `.crate` file used to
/// validate the correct crate file is used. This is `None` for sources
/// that do not use `.crate` files, like path or git dependencies.
checksums: HashMap<PackageId, Option<String>>,
/// "Unknown" metadata. This is a collection of extra, unrecognized data
/// found in the `[metadata]` section of `Cargo.lock`, preserved for
/// forwards compatibility.
metadata: Metadata,
/// `[patch]` entries that did not match anything, preserved in
/// `Cargo.lock` as the `[[patch.unused]]` table array. Tracking unused
/// patches helps prevent Cargo from being forced to re-update the
/// registry every time it runs, and keeps the resolve in a locked state
/// so it doesn't re-resolve the unused entries.
unused_patches: Vec<PackageId>,
/// A map from packages to a set of their public dependencies
public_dependencies: HashMap<PackageId, HashSet<PackageId>>,
/// Version of the `Cargo.lock` format, see
/// `cargo::core::resolver::encode` for more.
version: ResolveVersion,
summaries: HashMap<PackageId, Summary>,
}
/// A version to indicate how a `Cargo.lock` should be serialized. Currently
/// V2 is the default when creating a new lockfile. If a V1 lockfile already
/// exists, it will stay as V1.
///
/// It's theorized that we can add more here over time to track larger changes
/// to the `Cargo.lock` format, but we've yet to see how that strategy pans out.
#[derive(PartialEq, Eq, Clone, Copy, Debug, PartialOrd, Ord)]
pub enum ResolveVersion {
/// Historical baseline for when this abstraction was added.
V1,
/// A more compact format, more amenable to avoiding source-control merge
/// conflicts. The `dependencies` arrays are compressed and checksums are
/// listed inline. Introduced in 2019 in version 1.38. New lockfiles use
/// V2 by default starting in 1.41.
V2,
}
impl Resolve {
pub fn new(
graph: Graph<PackageId, HashSet<Dependency>>,
replacements: HashMap<PackageId, PackageId>,
features: HashMap<PackageId, Vec<InternedString>>,
checksums: HashMap<PackageId, Option<String>>,
metadata: Metadata,
unused_patches: Vec<PackageId>,
version: ResolveVersion,
summaries: HashMap<PackageId, Summary>,
) -> Resolve {
let reverse_replacements = replacements.iter().map(|(&p, &r)| (r, p)).collect();
let public_dependencies = graph
.iter()
.map(|p| {
let public_deps = graph
.edges(p)
.filter(|(_, deps)| {
deps.iter()
.any(|d| d.kind() == DepKind::Normal && d.is_public())
})
.map(|(dep_package, _)| *dep_package)
.collect::<HashSet<PackageId>>();
(*p, public_deps)
})
.collect();
Resolve {
graph,
replacements,
features,
checksums,
metadata,
unused_patches,
empty_features: Vec::new(),
reverse_replacements,
public_dependencies,
version,
summaries,
}
}
/// Resolves one of the paths from the given dependent package up to
/// the root.
pub fn path_to_top<'a>(&'a self, pkg: &'a PackageId) -> Vec<&'a PackageId> {
self.graph.path_to_top(pkg)
}
pub fn register_used_patches(&mut self, patches: &[Summary]) {
for summary in patches {
if self.iter().any(|id| id == summary.package_id()) {
continue;
}
self.unused_patches.push(summary.package_id());
}
}
pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> {
// Given a previous instance of resolve, it should be forbidden to ever
// have a checksums which *differ*. If the same package ID has differing
// checksums, then something has gone wrong such as:
//
// * Something got seriously corrupted
// * A "mirror" isn't actually a mirror as some changes were made
// * A replacement source wasn't actually a replacement, some changes
// were made
//
// In all of these cases, we want to report an error to indicate that
// something is awry. Normal execution (esp just using crates.io) should
// never run into this.
for (id, cksum) in previous.checksums.iter() {
if let Some(mine) = self.checksums.get(id) {
if mine == cksum {
continue;
}
// If the previous checksum wasn't calculated, the current
// checksum is `Some`. This may indicate that a source was
// erroneously replaced or was replaced with something that
// desires stronger checksum guarantees than can be afforded
// elsewhere.
if cksum.is_none() {
anyhow::bail!(
"\
checksum for `{}` was not previously calculated, but a checksum could now \
be calculated
this could be indicative of a few possible situations:
* the source `{}` did not previously support checksums,
but was replaced with one that does
* newer Cargo implementations know how to checksum this source, but this
older implementation does not
* the lock file is corrupt
",
id,
id.source_id()
)
// If our checksum hasn't been calculated, then it could mean
// that future Cargo figured out how to checksum something or
// more realistically we were overridden with a source that does
// not have checksums.
} else if mine.is_none() {
anyhow::bail!(
"\
checksum for `{}` could not be calculated, but a checksum is listed in \
the existing lock file
this could be indicative of a few possible situations:
* the source `{}` supports checksums,
but was replaced with one that doesn't
* the lock file is corrupt
unable to verify that `{0}` is the same as when the lockfile was generated
",
id,
id.source_id()
)
// If the checksums aren't equal, and neither is None, then they
// must both be Some, in which case the checksum now differs.
// That's quite bad!
} else {
anyhow::bail!(
"\
checksum for `{}` changed between lock files
this could be indicative of a few possible errors:
* the lock file is corrupt
* a replacement source in use (e.g., a mirror) returned a different checksum
* the source itself may be corrupt in one way or another
unable to verify that `{0}` is the same as when the lockfile was generated
",
id
);
}
}
}
// Be sure to just copy over any unknown metadata.
self.metadata = previous.metadata.clone();
// The goal of Cargo is largely to preserve the encoding of `Cargo.lock`
// that it finds on the filesystem. Sometimes `Cargo.lock` changes are
// in the works where they haven't been set as the default yet but will
// become the default soon.
//
// The scenarios we could be in are:
//
// * This is a brand new lock file with nothing previous. In that case
// this method isn't actually called at all, but instead
// `default_for_new_lockfiles` called below was encoded during the
// resolution step, so that's what we're gonna use.
//
// * We have an old lock file. In this case we want to switch the
// version to `default_for_old_lockfiles`. That puts us in one of
// three cases:
//
// * Our version is older than the default. This means that we're
// migrating someone forward, so we switch the encoding.
// * Our version is equal to the default, nothing to do!
// * Our version is *newer* than the default. This is where we
// critically keep the new version of encoding.
//
// This strategy should get new lockfiles into the pipeline more quickly
// while ensuring that any time an old cargo sees a future lock file it
// keeps the future lockfile encoding.
self.version = cmp::max(
previous.version,
ResolveVersion::default_for_old_lockfiles(),
);
Ok(())
}
pub fn contains<Q:?Sized>(&self, k: &Q) -> bool
where
PackageId: Borrow<Q>,
Q: Ord + Eq,
{
self.graph.contains(k)
}
pub fn sort(&self) -> Vec<PackageId> {
self.graph.sort()
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = PackageId> + 'a {
self.graph.iter().cloned()
}
pub fn deps(&self, pkg: PackageId) -> impl Iterator<Item = (PackageId, &HashSet<Dependency>)> {
self.deps_not_replaced(pkg)
.map(move |(id, deps)| (self.replacement(id).unwrap_or(id), deps))
}
pub fn deps_not_replaced(
&self,
pkg: PackageId,
) -> impl Iterator<Item = (PackageId, &HashSet<Dependency>)> {
self.graph.edges(&pkg).map(|(id, deps)| (*id, deps))
}
pub fn replacement(&self, pkg: PackageId) -> Option<PackageId> {
self.replacements.get(&pkg).cloned()
}
pub fn replacements(&self) -> &HashMap<PackageId, PackageId> {
&self.replacements
}
pub fn features(&self, pkg: PackageId) -> &[InternedString] {
self.features.get(&pkg).unwrap_or(&self.empty_features)
}
/// This is only here for legacy support, it will be removed when
/// switching to the new feature resolver.
pub fn features_clone(&self) -> HashMap<PackageId, Vec<InternedString>> {
self.features.clone()
}
pub fn is_public_dep(&self, pkg: PackageId, dep: PackageId) -> bool {
self.public_dependencies
.get(&pkg)
.map(|public_deps| public_deps.contains(&dep))
.unwrap_or_else(|| panic!("Unknown dependency {:?} for package {:?}", dep, pkg))
}
pub fn query(&self, spec: &str) -> CargoResult<PackageId> {
PackageIdSpec::query_str(spec, self.iter())
}
pub fn unused_patches(&self) -> &[PackageId] {
&self.unused_patches
}
pub fn checksums(&self) -> &HashMap<PackageId, Option<String>> {
&self.checksums
}
pub fn metadata(&self) -> &Metadata {
&self.metadata
}
pub fn extern_crate_name(
&self,
from: PackageId,
to: PackageId,
to_target: &Target,
) -> CargoResult<String> {
let empty_set: HashSet<Dependency> = HashSet::new();
let deps = if from == to {
&empty_set
} else {
self.dependencies_listed(from, to)
};
let crate_name = to_target.crate_name();
let mut names = deps.iter().map(|d| {
d.explicit_name_in_toml()
.map(|s| s.as_str().replace("-", "_"))
.unwrap_or_else(|| crate_name.clone())
});
let name = names.next().unwrap_or_else(|| crate_name.clone());
for n in names {
anyhow::ensure!(
n == name,
"the crate `{}` depends on crate `{}` multiple times with different names",
from,
to,
);
}
Ok(name)
}
fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &HashSet<Dependency> {
// We've got a dependency on `from` to `to`, but this dependency edge
// may be affected by [replace]. If the `to` package is listed as the
// target of a replacement (aka the key of a reverse replacement map)
// then we try to find our dependency edge through that. If that fails
// then we go down below assuming it's not replaced.
//
// Note that we don't treat `from` as if it's been replaced because
// that's where the dependency originates from, and we only replace
// targets of dependencies not the originator.
if let Some(replace) = self.reverse_replacements.get(&to) {
if let Some(deps) = self.graph.edge(&from, replace) {
return deps;
}
}
match self.graph.edge(&from, &to) {
Some(ret) => ret,
None => panic!("no Dependency listed for `{}` => `{}`", from, to),
}
}
/// Returns the version of the encoding that's being used for this lock
|
&self.version
}
pub fn summary(&self, pkg_id: PackageId) -> &Summary {
&self.summaries[&pkg_id]
}
}
impl PartialEq for Resolve {
fn eq(&self, other: &Resolve) -> bool {
macro_rules! compare {
($($fields:ident)* | $($ignored:ident)*) => {
let Resolve { $($fields,)* $($ignored: _,)* } = self;
$($fields == &other.$fields)&&*
}
}
compare! {
// fields to compare
graph replacements reverse_replacements empty_features features
checksums metadata unused_patches public_dependencies summaries
|
// fields to ignore
version
}
}
}
impl fmt::Debug for Resolve {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(fmt, "graph: {:?}", self.graph)?;
writeln!(fmt, "\nfeatures: {{")?;
for (pkg, features) in &self.features {
writeln!(fmt, " {}: {:?}", pkg, features)?;
}
write!(fmt, "}}")
}
}
impl ResolveVersion {
/// The default way to encode new `Cargo.lock` files.
///
/// It's important that if a new version of `ResolveVersion` is added that
/// this is not updated until *at least* the support for the version is in
/// the stable release of Rust. It's ok for this to be newer than
/// `default_for_old_lockfiles` below.
pub fn default_for_new_lockfiles() -> ResolveVersion {
ResolveVersion::V2
}
/// The default way to encode old preexisting `Cargo.lock` files. This is
/// often trailing the new lockfiles one above to give older projects a
/// longer time to catch up.
///
/// It's important that this trails behind `default_for_new_lockfiles` for
/// quite some time. This gives projects a quite large window to update in
/// where we don't force updates, so if projects span many versions of Cargo
/// all those versions of Cargo will have support for a new version of the
/// lock file.
pub fn default_for_old_lockfiles() -> ResolveVersion {
ResolveVersion::V1
}
}
|
/// file.
pub fn version(&self) -> &ResolveVersion {
|
random_line_split
|
resolve.rs
|
use super::encode::Metadata;
use crate::core::dependency::DepKind;
use crate::core::interning::InternedString;
use crate::core::{Dependency, PackageId, PackageIdSpec, Summary, Target};
use crate::util::errors::CargoResult;
use crate::util::Graph;
use std::borrow::Borrow;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::fmt;
/// Represents a fully-resolved package dependency graph. Each node in the graph
/// is a package and edges represent dependencies between packages.
///
/// Each instance of `Resolve` also understands the full set of features used
/// for each package.
pub struct Resolve {
/// A graph, whose vertices are packages and edges are dependency specifications
/// from `Cargo.toml`. We need a `Vec` here because the same package
/// might be present in both `[dependencies]` and `[build-dependencies]`.
graph: Graph<PackageId, HashSet<Dependency>>,
/// Replacements from the `[replace]` table.
replacements: HashMap<PackageId, PackageId>,
/// Inverted version of `replacements`.
reverse_replacements: HashMap<PackageId, PackageId>,
/// An empty `HashSet` to avoid creating a new `HashSet` for every package
/// that does not have any features, and to avoid using `Option` to
/// simplify the API.
empty_features: Vec<InternedString>,
/// Features enabled for a given package.
features: HashMap<PackageId, Vec<InternedString>>,
/// Checksum for each package. A SHA256 hash of the `.crate` file used to
/// validate the correct crate file is used. This is `None` for sources
/// that do not use `.crate` files, like path or git dependencies.
checksums: HashMap<PackageId, Option<String>>,
/// "Unknown" metadata. This is a collection of extra, unrecognized data
/// found in the `[metadata]` section of `Cargo.lock`, preserved for
/// forwards compatibility.
metadata: Metadata,
/// `[patch]` entries that did not match anything, preserved in
/// `Cargo.lock` as the `[[patch.unused]]` table array. Tracking unused
/// patches helps prevent Cargo from being forced to re-update the
/// registry every time it runs, and keeps the resolve in a locked state
/// so it doesn't re-resolve the unused entries.
unused_patches: Vec<PackageId>,
/// A map from packages to a set of their public dependencies
public_dependencies: HashMap<PackageId, HashSet<PackageId>>,
/// Version of the `Cargo.lock` format, see
/// `cargo::core::resolver::encode` for more.
version: ResolveVersion,
summaries: HashMap<PackageId, Summary>,
}
/// A version to indicate how a `Cargo.lock` should be serialized. Currently
/// V2 is the default when creating a new lockfile. If a V1 lockfile already
/// exists, it will stay as V1.
///
/// It's theorized that we can add more here over time to track larger changes
/// to the `Cargo.lock` format, but we've yet to see how that strategy pans out.
#[derive(PartialEq, Eq, Clone, Copy, Debug, PartialOrd, Ord)]
pub enum
|
{
/// Historical baseline for when this abstraction was added.
V1,
/// A more compact format, more amenable to avoiding source-control merge
/// conflicts. The `dependencies` arrays are compressed and checksums are
/// listed inline. Introduced in 2019 in version 1.38. New lockfiles use
/// V2 by default starting in 1.41.
V2,
}
impl Resolve {
pub fn new(
graph: Graph<PackageId, HashSet<Dependency>>,
replacements: HashMap<PackageId, PackageId>,
features: HashMap<PackageId, Vec<InternedString>>,
checksums: HashMap<PackageId, Option<String>>,
metadata: Metadata,
unused_patches: Vec<PackageId>,
version: ResolveVersion,
summaries: HashMap<PackageId, Summary>,
) -> Resolve {
let reverse_replacements = replacements.iter().map(|(&p, &r)| (r, p)).collect();
let public_dependencies = graph
.iter()
.map(|p| {
let public_deps = graph
.edges(p)
.filter(|(_, deps)| {
deps.iter()
.any(|d| d.kind() == DepKind::Normal && d.is_public())
})
.map(|(dep_package, _)| *dep_package)
.collect::<HashSet<PackageId>>();
(*p, public_deps)
})
.collect();
Resolve {
graph,
replacements,
features,
checksums,
metadata,
unused_patches,
empty_features: Vec::new(),
reverse_replacements,
public_dependencies,
version,
summaries,
}
}
/// Resolves one of the paths from the given dependent package up to
/// the root.
pub fn path_to_top<'a>(&'a self, pkg: &'a PackageId) -> Vec<&'a PackageId> {
self.graph.path_to_top(pkg)
}
pub fn register_used_patches(&mut self, patches: &[Summary]) {
for summary in patches {
if self.iter().any(|id| id == summary.package_id()) {
continue;
}
self.unused_patches.push(summary.package_id());
}
}
pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> {
// Given a previous instance of resolve, it should be forbidden to ever
// have a checksums which *differ*. If the same package ID has differing
// checksums, then something has gone wrong such as:
//
// * Something got seriously corrupted
// * A "mirror" isn't actually a mirror as some changes were made
// * A replacement source wasn't actually a replacement, some changes
// were made
//
// In all of these cases, we want to report an error to indicate that
// something is awry. Normal execution (esp just using crates.io) should
// never run into this.
for (id, cksum) in previous.checksums.iter() {
if let Some(mine) = self.checksums.get(id) {
if mine == cksum {
continue;
}
// If the previous checksum wasn't calculated, the current
// checksum is `Some`. This may indicate that a source was
// erroneously replaced or was replaced with something that
// desires stronger checksum guarantees than can be afforded
// elsewhere.
if cksum.is_none() {
anyhow::bail!(
"\
checksum for `{}` was not previously calculated, but a checksum could now \
be calculated
this could be indicative of a few possible situations:
* the source `{}` did not previously support checksums,
but was replaced with one that does
* newer Cargo implementations know how to checksum this source, but this
older implementation does not
* the lock file is corrupt
",
id,
id.source_id()
)
// If our checksum hasn't been calculated, then it could mean
// that future Cargo figured out how to checksum something or
// more realistically we were overridden with a source that does
// not have checksums.
} else if mine.is_none() {
anyhow::bail!(
"\
checksum for `{}` could not be calculated, but a checksum is listed in \
the existing lock file
this could be indicative of a few possible situations:
* the source `{}` supports checksums,
but was replaced with one that doesn't
* the lock file is corrupt
unable to verify that `{0}` is the same as when the lockfile was generated
",
id,
id.source_id()
)
// If the checksums aren't equal, and neither is None, then they
// must both be Some, in which case the checksum now differs.
// That's quite bad!
} else {
anyhow::bail!(
"\
checksum for `{}` changed between lock files
this could be indicative of a few possible errors:
* the lock file is corrupt
* a replacement source in use (e.g., a mirror) returned a different checksum
* the source itself may be corrupt in one way or another
unable to verify that `{0}` is the same as when the lockfile was generated
",
id
);
}
}
}
// Be sure to just copy over any unknown metadata.
self.metadata = previous.metadata.clone();
// The goal of Cargo is largely to preserve the encoding of `Cargo.lock`
// that it finds on the filesystem. Sometimes `Cargo.lock` changes are
// in the works where they haven't been set as the default yet but will
// become the default soon.
//
// The scenarios we could be in are:
//
// * This is a brand new lock file with nothing previous. In that case
// this method isn't actually called at all, but instead
// `default_for_new_lockfiles` called below was encoded during the
// resolution step, so that's what we're gonna use.
//
// * We have an old lock file. In this case we want to switch the
// version to `default_for_old_lockfiles`. That puts us in one of
// three cases:
//
// * Our version is older than the default. This means that we're
// migrating someone forward, so we switch the encoding.
// * Our version is equal to the default, nothing to do!
// * Our version is *newer* than the default. This is where we
// critically keep the new version of encoding.
//
// This strategy should get new lockfiles into the pipeline more quickly
// while ensuring that any time an old cargo sees a future lock file it
// keeps the future lockfile encoding.
self.version = cmp::max(
previous.version,
ResolveVersion::default_for_old_lockfiles(),
);
Ok(())
}
pub fn contains<Q:?Sized>(&self, k: &Q) -> bool
where
PackageId: Borrow<Q>,
Q: Ord + Eq,
{
self.graph.contains(k)
}
pub fn sort(&self) -> Vec<PackageId> {
self.graph.sort()
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = PackageId> + 'a {
self.graph.iter().cloned()
}
pub fn deps(&self, pkg: PackageId) -> impl Iterator<Item = (PackageId, &HashSet<Dependency>)> {
self.deps_not_replaced(pkg)
.map(move |(id, deps)| (self.replacement(id).unwrap_or(id), deps))
}
pub fn deps_not_replaced(
&self,
pkg: PackageId,
) -> impl Iterator<Item = (PackageId, &HashSet<Dependency>)> {
self.graph.edges(&pkg).map(|(id, deps)| (*id, deps))
}
pub fn replacement(&self, pkg: PackageId) -> Option<PackageId> {
self.replacements.get(&pkg).cloned()
}
pub fn replacements(&self) -> &HashMap<PackageId, PackageId> {
&self.replacements
}
pub fn features(&self, pkg: PackageId) -> &[InternedString] {
self.features.get(&pkg).unwrap_or(&self.empty_features)
}
/// This is only here for legacy support, it will be removed when
/// switching to the new feature resolver.
pub fn features_clone(&self) -> HashMap<PackageId, Vec<InternedString>> {
self.features.clone()
}
pub fn is_public_dep(&self, pkg: PackageId, dep: PackageId) -> bool {
self.public_dependencies
.get(&pkg)
.map(|public_deps| public_deps.contains(&dep))
.unwrap_or_else(|| panic!("Unknown dependency {:?} for package {:?}", dep, pkg))
}
pub fn query(&self, spec: &str) -> CargoResult<PackageId> {
PackageIdSpec::query_str(spec, self.iter())
}
pub fn unused_patches(&self) -> &[PackageId] {
&self.unused_patches
}
pub fn checksums(&self) -> &HashMap<PackageId, Option<String>> {
&self.checksums
}
pub fn metadata(&self) -> &Metadata {
&self.metadata
}
pub fn extern_crate_name(
&self,
from: PackageId,
to: PackageId,
to_target: &Target,
) -> CargoResult<String> {
let empty_set: HashSet<Dependency> = HashSet::new();
let deps = if from == to {
&empty_set
} else {
self.dependencies_listed(from, to)
};
let crate_name = to_target.crate_name();
let mut names = deps.iter().map(|d| {
d.explicit_name_in_toml()
.map(|s| s.as_str().replace("-", "_"))
.unwrap_or_else(|| crate_name.clone())
});
let name = names.next().unwrap_or_else(|| crate_name.clone());
for n in names {
anyhow::ensure!(
n == name,
"the crate `{}` depends on crate `{}` multiple times with different names",
from,
to,
);
}
Ok(name)
}
fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &HashSet<Dependency> {
// We've got a dependency on `from` to `to`, but this dependency edge
// may be affected by [replace]. If the `to` package is listed as the
// target of a replacement (aka the key of a reverse replacement map)
// then we try to find our dependency edge through that. If that fails
// then we go down below assuming it's not replaced.
//
// Note that we don't treat `from` as if it's been replaced because
// that's where the dependency originates from, and we only replace
// targets of dependencies not the originator.
if let Some(replace) = self.reverse_replacements.get(&to) {
if let Some(deps) = self.graph.edge(&from, replace) {
return deps;
}
}
match self.graph.edge(&from, &to) {
Some(ret) => ret,
None => panic!("no Dependency listed for `{}` => `{}`", from, to),
}
}
/// Returns the version of the encoding that's being used for this lock
/// file.
pub fn version(&self) -> &ResolveVersion {
&self.version
}
pub fn summary(&self, pkg_id: PackageId) -> &Summary {
&self.summaries[&pkg_id]
}
}
impl PartialEq for Resolve {
fn eq(&self, other: &Resolve) -> bool {
macro_rules! compare {
($($fields:ident)* | $($ignored:ident)*) => {
let Resolve { $($fields,)* $($ignored: _,)* } = self;
$($fields == &other.$fields)&&*
}
}
compare! {
// fields to compare
graph replacements reverse_replacements empty_features features
checksums metadata unused_patches public_dependencies summaries
|
// fields to ignore
version
}
}
}
impl fmt::Debug for Resolve {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(fmt, "graph: {:?}", self.graph)?;
writeln!(fmt, "\nfeatures: {{")?;
for (pkg, features) in &self.features {
writeln!(fmt, " {}: {:?}", pkg, features)?;
}
write!(fmt, "}}")
}
}
impl ResolveVersion {
/// The default way to encode new `Cargo.lock` files.
///
/// It's important that if a new version of `ResolveVersion` is added that
/// this is not updated until *at least* the support for the version is in
/// the stable release of Rust. It's ok for this to be newer than
/// `default_for_old_lockfiles` below.
pub fn default_for_new_lockfiles() -> ResolveVersion {
ResolveVersion::V2
}
/// The default way to encode old preexisting `Cargo.lock` files. This is
/// often trailing the new lockfiles one above to give older projects a
/// longer time to catch up.
///
/// It's important that this trails behind `default_for_new_lockfiles` for
/// quite some time. This gives projects a quite large window to update in
/// where we don't force updates, so if projects span many versions of Cargo
/// all those versions of Cargo will have support for a new version of the
/// lock file.
pub fn default_for_old_lockfiles() -> ResolveVersion {
ResolveVersion::V1
}
}
|
ResolveVersion
|
identifier_name
|
resolve.rs
|
use super::encode::Metadata;
use crate::core::dependency::DepKind;
use crate::core::interning::InternedString;
use crate::core::{Dependency, PackageId, PackageIdSpec, Summary, Target};
use crate::util::errors::CargoResult;
use crate::util::Graph;
use std::borrow::Borrow;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::fmt;
/// Represents a fully-resolved package dependency graph. Each node in the graph
/// is a package and edges represent dependencies between packages.
///
/// Each instance of `Resolve` also understands the full set of features used
/// for each package.
pub struct Resolve {
/// A graph, whose vertices are packages and edges are dependency specifications
/// from `Cargo.toml`. We need a `Vec` here because the same package
/// might be present in both `[dependencies]` and `[build-dependencies]`.
graph: Graph<PackageId, HashSet<Dependency>>,
/// Replacements from the `[replace]` table.
replacements: HashMap<PackageId, PackageId>,
/// Inverted version of `replacements`.
reverse_replacements: HashMap<PackageId, PackageId>,
/// An empty `HashSet` to avoid creating a new `HashSet` for every package
/// that does not have any features, and to avoid using `Option` to
/// simplify the API.
empty_features: Vec<InternedString>,
/// Features enabled for a given package.
features: HashMap<PackageId, Vec<InternedString>>,
/// Checksum for each package. A SHA256 hash of the `.crate` file used to
/// validate the correct crate file is used. This is `None` for sources
/// that do not use `.crate` files, like path or git dependencies.
checksums: HashMap<PackageId, Option<String>>,
/// "Unknown" metadata. This is a collection of extra, unrecognized data
/// found in the `[metadata]` section of `Cargo.lock`, preserved for
/// forwards compatibility.
metadata: Metadata,
/// `[patch]` entries that did not match anything, preserved in
/// `Cargo.lock` as the `[[patch.unused]]` table array. Tracking unused
/// patches helps prevent Cargo from being forced to re-update the
/// registry every time it runs, and keeps the resolve in a locked state
/// so it doesn't re-resolve the unused entries.
unused_patches: Vec<PackageId>,
/// A map from packages to a set of their public dependencies
public_dependencies: HashMap<PackageId, HashSet<PackageId>>,
/// Version of the `Cargo.lock` format, see
/// `cargo::core::resolver::encode` for more.
version: ResolveVersion,
summaries: HashMap<PackageId, Summary>,
}
/// A version to indicate how a `Cargo.lock` should be serialized. Currently
/// V2 is the default when creating a new lockfile. If a V1 lockfile already
/// exists, it will stay as V1.
///
/// It's theorized that we can add more here over time to track larger changes
/// to the `Cargo.lock` format, but we've yet to see how that strategy pans out.
#[derive(PartialEq, Eq, Clone, Copy, Debug, PartialOrd, Ord)]
pub enum ResolveVersion {
/// Historical baseline for when this abstraction was added.
V1,
/// A more compact format, more amenable to avoiding source-control merge
/// conflicts. The `dependencies` arrays are compressed and checksums are
/// listed inline. Introduced in 2019 in version 1.38. New lockfiles use
/// V2 by default starting in 1.41.
V2,
}
impl Resolve {
pub fn new(
graph: Graph<PackageId, HashSet<Dependency>>,
replacements: HashMap<PackageId, PackageId>,
features: HashMap<PackageId, Vec<InternedString>>,
checksums: HashMap<PackageId, Option<String>>,
metadata: Metadata,
unused_patches: Vec<PackageId>,
version: ResolveVersion,
summaries: HashMap<PackageId, Summary>,
) -> Resolve {
let reverse_replacements = replacements.iter().map(|(&p, &r)| (r, p)).collect();
let public_dependencies = graph
.iter()
.map(|p| {
let public_deps = graph
.edges(p)
.filter(|(_, deps)| {
deps.iter()
.any(|d| d.kind() == DepKind::Normal && d.is_public())
})
.map(|(dep_package, _)| *dep_package)
.collect::<HashSet<PackageId>>();
(*p, public_deps)
})
.collect();
Resolve {
graph,
replacements,
features,
checksums,
metadata,
unused_patches,
empty_features: Vec::new(),
reverse_replacements,
public_dependencies,
version,
summaries,
}
}
/// Resolves one of the paths from the given dependent package up to
/// the root.
pub fn path_to_top<'a>(&'a self, pkg: &'a PackageId) -> Vec<&'a PackageId> {
self.graph.path_to_top(pkg)
}
pub fn register_used_patches(&mut self, patches: &[Summary]) {
for summary in patches {
if self.iter().any(|id| id == summary.package_id()) {
continue;
}
self.unused_patches.push(summary.package_id());
}
}
pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> {
// Given a previous instance of resolve, it should be forbidden to ever
// have a checksums which *differ*. If the same package ID has differing
// checksums, then something has gone wrong such as:
//
// * Something got seriously corrupted
// * A "mirror" isn't actually a mirror as some changes were made
// * A replacement source wasn't actually a replacement, some changes
// were made
//
// In all of these cases, we want to report an error to indicate that
// something is awry. Normal execution (esp just using crates.io) should
// never run into this.
for (id, cksum) in previous.checksums.iter() {
if let Some(mine) = self.checksums.get(id) {
if mine == cksum {
continue;
}
// If the previous checksum wasn't calculated, the current
// checksum is `Some`. This may indicate that a source was
// erroneously replaced or was replaced with something that
// desires stronger checksum guarantees than can be afforded
// elsewhere.
if cksum.is_none() {
anyhow::bail!(
"\
checksum for `{}` was not previously calculated, but a checksum could now \
be calculated
this could be indicative of a few possible situations:
* the source `{}` did not previously support checksums,
but was replaced with one that does
* newer Cargo implementations know how to checksum this source, but this
older implementation does not
* the lock file is corrupt
",
id,
id.source_id()
)
// If our checksum hasn't been calculated, then it could mean
// that future Cargo figured out how to checksum something or
// more realistically we were overridden with a source that does
// not have checksums.
} else if mine.is_none() {
anyhow::bail!(
"\
checksum for `{}` could not be calculated, but a checksum is listed in \
the existing lock file
this could be indicative of a few possible situations:
* the source `{}` supports checksums,
but was replaced with one that doesn't
* the lock file is corrupt
unable to verify that `{0}` is the same as when the lockfile was generated
",
id,
id.source_id()
)
// If the checksums aren't equal, and neither is None, then they
// must both be Some, in which case the checksum now differs.
// That's quite bad!
} else {
anyhow::bail!(
"\
checksum for `{}` changed between lock files
this could be indicative of a few possible errors:
* the lock file is corrupt
* a replacement source in use (e.g., a mirror) returned a different checksum
* the source itself may be corrupt in one way or another
unable to verify that `{0}` is the same as when the lockfile was generated
",
id
);
}
}
}
// Be sure to just copy over any unknown metadata.
self.metadata = previous.metadata.clone();
// The goal of Cargo is largely to preserve the encoding of `Cargo.lock`
// that it finds on the filesystem. Sometimes `Cargo.lock` changes are
// in the works where they haven't been set as the default yet but will
// become the default soon.
//
// The scenarios we could be in are:
//
// * This is a brand new lock file with nothing previous. In that case
// this method isn't actually called at all, but instead
// `default_for_new_lockfiles` called below was encoded during the
// resolution step, so that's what we're gonna use.
//
// * We have an old lock file. In this case we want to switch the
// version to `default_for_old_lockfiles`. That puts us in one of
// three cases:
//
// * Our version is older than the default. This means that we're
// migrating someone forward, so we switch the encoding.
// * Our version is equal to the default, nothing to do!
// * Our version is *newer* than the default. This is where we
// critically keep the new version of encoding.
//
// This strategy should get new lockfiles into the pipeline more quickly
// while ensuring that any time an old cargo sees a future lock file it
// keeps the future lockfile encoding.
self.version = cmp::max(
previous.version,
ResolveVersion::default_for_old_lockfiles(),
);
Ok(())
}
pub fn contains<Q:?Sized>(&self, k: &Q) -> bool
where
PackageId: Borrow<Q>,
Q: Ord + Eq,
{
self.graph.contains(k)
}
pub fn sort(&self) -> Vec<PackageId> {
self.graph.sort()
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = PackageId> + 'a {
self.graph.iter().cloned()
}
pub fn deps(&self, pkg: PackageId) -> impl Iterator<Item = (PackageId, &HashSet<Dependency>)> {
self.deps_not_replaced(pkg)
.map(move |(id, deps)| (self.replacement(id).unwrap_or(id), deps))
}
pub fn deps_not_replaced(
&self,
pkg: PackageId,
) -> impl Iterator<Item = (PackageId, &HashSet<Dependency>)> {
self.graph.edges(&pkg).map(|(id, deps)| (*id, deps))
}
pub fn replacement(&self, pkg: PackageId) -> Option<PackageId> {
self.replacements.get(&pkg).cloned()
}
pub fn replacements(&self) -> &HashMap<PackageId, PackageId>
|
pub fn features(&self, pkg: PackageId) -> &[InternedString] {
self.features.get(&pkg).unwrap_or(&self.empty_features)
}
/// This is only here for legacy support, it will be removed when
/// switching to the new feature resolver.
pub fn features_clone(&self) -> HashMap<PackageId, Vec<InternedString>> {
self.features.clone()
}
pub fn is_public_dep(&self, pkg: PackageId, dep: PackageId) -> bool {
self.public_dependencies
.get(&pkg)
.map(|public_deps| public_deps.contains(&dep))
.unwrap_or_else(|| panic!("Unknown dependency {:?} for package {:?}", dep, pkg))
}
pub fn query(&self, spec: &str) -> CargoResult<PackageId> {
PackageIdSpec::query_str(spec, self.iter())
}
pub fn unused_patches(&self) -> &[PackageId] {
&self.unused_patches
}
pub fn checksums(&self) -> &HashMap<PackageId, Option<String>> {
&self.checksums
}
pub fn metadata(&self) -> &Metadata {
&self.metadata
}
pub fn extern_crate_name(
&self,
from: PackageId,
to: PackageId,
to_target: &Target,
) -> CargoResult<String> {
let empty_set: HashSet<Dependency> = HashSet::new();
let deps = if from == to {
&empty_set
} else {
self.dependencies_listed(from, to)
};
let crate_name = to_target.crate_name();
let mut names = deps.iter().map(|d| {
d.explicit_name_in_toml()
.map(|s| s.as_str().replace("-", "_"))
.unwrap_or_else(|| crate_name.clone())
});
let name = names.next().unwrap_or_else(|| crate_name.clone());
for n in names {
anyhow::ensure!(
n == name,
"the crate `{}` depends on crate `{}` multiple times with different names",
from,
to,
);
}
Ok(name)
}
fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &HashSet<Dependency> {
// We've got a dependency on `from` to `to`, but this dependency edge
// may be affected by [replace]. If the `to` package is listed as the
// target of a replacement (aka the key of a reverse replacement map)
// then we try to find our dependency edge through that. If that fails
// then we go down below assuming it's not replaced.
//
// Note that we don't treat `from` as if it's been replaced because
// that's where the dependency originates from, and we only replace
// targets of dependencies not the originator.
if let Some(replace) = self.reverse_replacements.get(&to) {
if let Some(deps) = self.graph.edge(&from, replace) {
return deps;
}
}
match self.graph.edge(&from, &to) {
Some(ret) => ret,
None => panic!("no Dependency listed for `{}` => `{}`", from, to),
}
}
/// Returns the version of the encoding that's being used for this lock
/// file.
pub fn version(&self) -> &ResolveVersion {
&self.version
}
pub fn summary(&self, pkg_id: PackageId) -> &Summary {
&self.summaries[&pkg_id]
}
}
impl PartialEq for Resolve {
fn eq(&self, other: &Resolve) -> bool {
macro_rules! compare {
($($fields:ident)* | $($ignored:ident)*) => {
let Resolve { $($fields,)* $($ignored: _,)* } = self;
$($fields == &other.$fields)&&*
}
}
compare! {
// fields to compare
graph replacements reverse_replacements empty_features features
checksums metadata unused_patches public_dependencies summaries
|
// fields to ignore
version
}
}
}
impl fmt::Debug for Resolve {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(fmt, "graph: {:?}", self.graph)?;
writeln!(fmt, "\nfeatures: {{")?;
for (pkg, features) in &self.features {
writeln!(fmt, " {}: {:?}", pkg, features)?;
}
write!(fmt, "}}")
}
}
impl ResolveVersion {
/// The default way to encode new `Cargo.lock` files.
///
/// It's important that if a new version of `ResolveVersion` is added that
/// this is not updated until *at least* the support for the version is in
/// the stable release of Rust. It's ok for this to be newer than
/// `default_for_old_lockfiles` below.
pub fn default_for_new_lockfiles() -> ResolveVersion {
ResolveVersion::V2
}
/// The default way to encode old preexisting `Cargo.lock` files. This is
/// often trailing the new lockfiles one above to give older projects a
/// longer time to catch up.
///
/// It's important that this trails behind `default_for_new_lockfiles` for
/// quite some time. This gives projects a quite large window to update in
/// where we don't force updates, so if projects span many versions of Cargo
/// all those versions of Cargo will have support for a new version of the
/// lock file.
pub fn default_for_old_lockfiles() -> ResolveVersion {
ResolveVersion::V1
}
}
|
{
&self.replacements
}
|
identifier_body
|
clusters.rs
|
use maud::{html, Markup, PreEscaped};
use rocket::State;
use cache::{BrokerCache, Cache, TopicCache};
use metadata::ClusterId;
use web_server::view::layout;
fn cluster_pane_layout(
cluster_id: &ClusterId,
brokers: usize,
topics: usize,
) -> PreEscaped<String> {
let link = format!("/clusters/{}/", cluster_id.name());
html! {
div class="col-lg-4 col-md-6" {
div class="panel panel-primary" {
div class="panel-heading" {
div class="row" {
// div class="col-xs-3" i class="fa fa-server fa-5x" {}
div class="col-xs-3" { img style="height: 64px" src="/public/images/kafka_logo_white.png" {} }
div class="col-xs-9 text-right" {
div style="font-size: 24px" {
a href=(link) style="color: inherit; text-decoration: inherit;" { (cluster_id.name()) }
}
div { (brokers) " brokers" }
div { (topics) " topics" }
}
}
}
a href=(link) {
div class="panel-footer" {
span class="pull-left" { "View Details" }
span class="pull-right" { i class="fa fa-arrow-circle-right" {} }
div class="clearfix" {}
}
}
}
}
}
}
fn cluster_pane(
cluster_id: &ClusterId,
broker_cache: &BrokerCache,
topic_cache: &TopicCache,
) -> PreEscaped<String> {
let broker_count = broker_cache.get(cluster_id).unwrap_or_default().len();
let topics_count = topic_cache.count(|&(ref c, _)| c == cluster_id);
cluster_pane_layout(cluster_id, broker_count, topics_count)
}
#[get("/clusters")]
pub fn clusters_page(cache: State<Cache>) -> Markup
|
{
let mut cluster_ids = cache.brokers.keys();
cluster_ids.sort();
let content = html! {
@for cluster_id in &cluster_ids {
(cluster_pane(cluster_id, &cache.brokers, &cache.topics))
}
};
layout::page("Clusters", content)
}
|
identifier_body
|
|
clusters.rs
|
use maud::{html, Markup, PreEscaped};
use rocket::State;
use cache::{BrokerCache, Cache, TopicCache};
use metadata::ClusterId;
use web_server::view::layout;
fn cluster_pane_layout(
cluster_id: &ClusterId,
brokers: usize,
topics: usize,
) -> PreEscaped<String> {
let link = format!("/clusters/{}/", cluster_id.name());
html! {
div class="col-lg-4 col-md-6" {
div class="panel panel-primary" {
div class="panel-heading" {
div class="row" {
// div class="col-xs-3" i class="fa fa-server fa-5x" {}
div class="col-xs-3" { img style="height: 64px" src="/public/images/kafka_logo_white.png" {} }
div class="col-xs-9 text-right" {
div style="font-size: 24px" {
a href=(link) style="color: inherit; text-decoration: inherit;" { (cluster_id.name()) }
}
div { (brokers) " brokers" }
div { (topics) " topics" }
}
}
}
a href=(link) {
div class="panel-footer" {
span class="pull-left" { "View Details" }
span class="pull-right" { i class="fa fa-arrow-circle-right" {} }
div class="clearfix" {}
}
}
}
}
}
}
fn cluster_pane(
cluster_id: &ClusterId,
broker_cache: &BrokerCache,
topic_cache: &TopicCache,
) -> PreEscaped<String> {
let broker_count = broker_cache.get(cluster_id).unwrap_or_default().len();
let topics_count = topic_cache.count(|&(ref c, _)| c == cluster_id);
cluster_pane_layout(cluster_id, broker_count, topics_count)
}
#[get("/clusters")]
pub fn
|
(cache: State<Cache>) -> Markup {
let mut cluster_ids = cache.brokers.keys();
cluster_ids.sort();
let content = html! {
@for cluster_id in &cluster_ids {
(cluster_pane(cluster_id, &cache.brokers, &cache.topics))
}
};
layout::page("Clusters", content)
}
|
clusters_page
|
identifier_name
|
clusters.rs
|
use maud::{html, Markup, PreEscaped};
use rocket::State;
use cache::{BrokerCache, Cache, TopicCache};
use metadata::ClusterId;
use web_server::view::layout;
fn cluster_pane_layout(
cluster_id: &ClusterId,
brokers: usize,
topics: usize,
) -> PreEscaped<String> {
let link = format!("/clusters/{}/", cluster_id.name());
html! {
div class="col-lg-4 col-md-6" {
div class="panel panel-primary" {
div class="panel-heading" {
div class="row" {
// div class="col-xs-3" i class="fa fa-server fa-5x" {}
div class="col-xs-3" { img style="height: 64px" src="/public/images/kafka_logo_white.png" {} }
div class="col-xs-9 text-right" {
div style="font-size: 24px" {
a href=(link) style="color: inherit; text-decoration: inherit;" { (cluster_id.name()) }
}
div { (brokers) " brokers" }
div { (topics) " topics" }
}
}
}
a href=(link) {
div class="panel-footer" {
span class="pull-left" { "View Details" }
span class="pull-right" { i class="fa fa-arrow-circle-right" {} }
div class="clearfix" {}
}
}
}
}
}
|
topic_cache: &TopicCache,
) -> PreEscaped<String> {
let broker_count = broker_cache.get(cluster_id).unwrap_or_default().len();
let topics_count = topic_cache.count(|&(ref c, _)| c == cluster_id);
cluster_pane_layout(cluster_id, broker_count, topics_count)
}
#[get("/clusters")]
pub fn clusters_page(cache: State<Cache>) -> Markup {
let mut cluster_ids = cache.brokers.keys();
cluster_ids.sort();
let content = html! {
@for cluster_id in &cluster_ids {
(cluster_pane(cluster_id, &cache.brokers, &cache.topics))
}
};
layout::page("Clusters", content)
}
|
}
fn cluster_pane(
cluster_id: &ClusterId,
broker_cache: &BrokerCache,
|
random_line_split
|
tests.rs
|
use super::*;
use std::cell::Cell;
#[test]
fn allocator_param() {
use crate::alloc::AllocError;
// Writing a test of integration between third-party
// allocators and `RawVec` is a little tricky because the `RawVec`
// API does not expose fallible allocation methods, so we
// cannot check what happens when allocator is exhausted
// (beyond detecting a panic).
//
// Instead, this just checks that the `RawVec` methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
// before allocation attempts start failing.
struct BoundedAlloc {
fuel: Cell<usize>,
}
unsafe impl Allocator for BoundedAlloc {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let size = layout.size();
if size > self.fuel.get() {
return Err(AllocError);
}
match Global.allocate(layout) {
ok @ Ok(_) => {
self.fuel.set(self.fuel.get() - size);
ok
}
err @ Err(_) => err,
}
}
unsafe fn
|
(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe { Global.deallocate(ptr, layout) }
}
}
let a = BoundedAlloc { fuel: Cell::new(500) };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.alloc.fuel.get(), 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.alloc.fuel.get(), 250);
}
#[test]
fn reserve_does_not_overallocate() {
{
let mut v: RawVec<u32> = RawVec::new();
// First, `reserve` allocates like `reserve_exact`.
v.reserve(0, 9);
assert_eq!(9, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 7);
assert_eq!(7, v.capacity());
// 97 is more than double of 7, so `reserve` should work
// like `reserve_exact`.
v.reserve(7, 90);
assert_eq!(97, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 12);
assert_eq!(12, v.capacity());
v.reserve(12, 3);
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.capacity() >= 12 + 12 / 2);
}
}
|
deallocate
|
identifier_name
|
tests.rs
|
use super::*;
use std::cell::Cell;
#[test]
fn allocator_param() {
use crate::alloc::AllocError;
// Writing a test of integration between third-party
// allocators and `RawVec` is a little tricky because the `RawVec`
// API does not expose fallible allocation methods, so we
// cannot check what happens when allocator is exhausted
// (beyond detecting a panic).
//
// Instead, this just checks that the `RawVec` methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
// before allocation attempts start failing.
struct BoundedAlloc {
fuel: Cell<usize>,
}
unsafe impl Allocator for BoundedAlloc {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let size = layout.size();
if size > self.fuel.get() {
return Err(AllocError);
}
match Global.allocate(layout) {
ok @ Ok(_) =>
|
err @ Err(_) => err,
}
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe { Global.deallocate(ptr, layout) }
}
}
let a = BoundedAlloc { fuel: Cell::new(500) };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.alloc.fuel.get(), 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.alloc.fuel.get(), 250);
}
#[test]
fn reserve_does_not_overallocate() {
{
let mut v: RawVec<u32> = RawVec::new();
// First, `reserve` allocates like `reserve_exact`.
v.reserve(0, 9);
assert_eq!(9, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 7);
assert_eq!(7, v.capacity());
// 97 is more than double of 7, so `reserve` should work
// like `reserve_exact`.
v.reserve(7, 90);
assert_eq!(97, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 12);
assert_eq!(12, v.capacity());
v.reserve(12, 3);
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.capacity() >= 12 + 12 / 2);
}
}
|
{
self.fuel.set(self.fuel.get() - size);
ok
}
|
conditional_block
|
tests.rs
|
use super::*;
use std::cell::Cell;
#[test]
fn allocator_param() {
use crate::alloc::AllocError;
// Writing a test of integration between third-party
// allocators and `RawVec` is a little tricky because the `RawVec`
// API does not expose fallible allocation methods, so we
// cannot check what happens when allocator is exhausted
// (beyond detecting a panic).
//
// Instead, this just checks that the `RawVec` methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
// before allocation attempts start failing.
struct BoundedAlloc {
fuel: Cell<usize>,
}
unsafe impl Allocator for BoundedAlloc {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>
|
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe { Global.deallocate(ptr, layout) }
}
}
let a = BoundedAlloc { fuel: Cell::new(500) };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.alloc.fuel.get(), 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.alloc.fuel.get(), 250);
}
#[test]
fn reserve_does_not_overallocate() {
{
let mut v: RawVec<u32> = RawVec::new();
// First, `reserve` allocates like `reserve_exact`.
v.reserve(0, 9);
assert_eq!(9, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 7);
assert_eq!(7, v.capacity());
// 97 is more than double of 7, so `reserve` should work
// like `reserve_exact`.
v.reserve(7, 90);
assert_eq!(97, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 12);
assert_eq!(12, v.capacity());
v.reserve(12, 3);
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.capacity() >= 12 + 12 / 2);
}
}
|
{
let size = layout.size();
if size > self.fuel.get() {
return Err(AllocError);
}
match Global.allocate(layout) {
ok @ Ok(_) => {
self.fuel.set(self.fuel.get() - size);
ok
}
err @ Err(_) => err,
}
}
|
identifier_body
|
tests.rs
|
use super::*;
use std::cell::Cell;
#[test]
fn allocator_param() {
use crate::alloc::AllocError;
// Writing a test of integration between third-party
// allocators and `RawVec` is a little tricky because the `RawVec`
// API does not expose fallible allocation methods, so we
// cannot check what happens when allocator is exhausted
// (beyond detecting a panic).
//
// Instead, this just checks that the `RawVec` methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
// before allocation attempts start failing.
struct BoundedAlloc {
fuel: Cell<usize>,
}
unsafe impl Allocator for BoundedAlloc {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
let size = layout.size();
if size > self.fuel.get() {
return Err(AllocError);
}
match Global.allocate(layout) {
ok @ Ok(_) => {
self.fuel.set(self.fuel.get() - size);
ok
}
err @ Err(_) => err,
}
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
unsafe { Global.deallocate(ptr, layout) }
}
}
let a = BoundedAlloc { fuel: Cell::new(500) };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.alloc.fuel.get(), 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.alloc.fuel.get(), 250);
}
#[test]
fn reserve_does_not_overallocate() {
{
let mut v: RawVec<u32> = RawVec::new();
|
v.reserve(0, 9);
assert_eq!(9, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 7);
assert_eq!(7, v.capacity());
// 97 is more than double of 7, so `reserve` should work
// like `reserve_exact`.
v.reserve(7, 90);
assert_eq!(97, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 12);
assert_eq!(12, v.capacity());
v.reserve(12, 3);
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.capacity() >= 12 + 12 / 2);
}
}
|
// First, `reserve` allocates like `reserve_exact`.
|
random_line_split
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![cfg_attr(feature = "unstable", feature(core_intrinsics))]
#![cfg_attr(feature = "unstable", feature(on_unimplemented))]
#![feature(const_fn)]
#![feature(mpsc_select)]
#![feature(plugin)]
#![feature(string_retain)]
#![feature(use_extern_macros)]
#![deny(unsafe_code)]
#![allow(non_snake_case)]
#![doc = "The script crate contains all matters DOM."]
#![plugin(script_plugins)]
#![cfg_attr(not(feature = "unrooted_must_root_lint"), allow(unknown_lints))]
extern crate app_units;
extern crate audio_video_metadata;
extern crate base64;
#[macro_use]
extern crate bitflags;
extern crate bluetooth_traits;
extern crate byteorder;
extern crate canvas_traits;
extern crate caseless;
extern crate chrono;
extern crate cookie as cookie_rs;
#[macro_use] extern crate cssparser;
#[macro_use] extern crate deny_public_fields;
extern crate devtools_traits;
extern crate dom_struct;
#[macro_use]
extern crate domobject_derive;
extern crate embedder_traits;
extern crate encoding_rs;
#[macro_use] extern crate enum_iterator;
extern crate euclid;
extern crate fnv;
extern crate gleam;
extern crate half;
#[macro_use] extern crate html5ever;
#[macro_use]
extern crate hyper;
extern crate hyper_serde;
extern crate image;
extern crate ipc_channel;
#[macro_use]
extern crate jstraceable_derive;
#[macro_use]
extern crate lazy_static;
extern crate libc;
#[macro_use]
extern crate log;
#[macro_use] extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate metrics;
#[macro_use]
extern crate mime;
extern crate mime_guess;
extern crate mitochondria;
extern crate mozangle;
#[macro_use]
extern crate mozjs as js;
extern crate msg;
extern crate net_traits;
extern crate num_traits;
extern crate offscreen_gl_context;
extern crate parking_lot;
extern crate phf;
#[macro_use]
extern crate profile_traits;
extern crate ref_filter_map;
extern crate ref_slice;
extern crate regex;
extern crate script_layout_interface;
extern crate script_traits;
extern crate selectors;
extern crate serde;
extern crate serde_bytes;
extern crate servo_allocator;
extern crate servo_arc;
#[macro_use] extern crate servo_atoms;
extern crate servo_config;
extern crate servo_geometry;
extern crate servo_media;
extern crate servo_rand;
extern crate servo_url;
extern crate smallvec;
#[macro_use]
extern crate style;
extern crate style_traits;
extern crate swapper;
extern crate time;
#[cfg(target_os = "linux")]
extern crate tinyfiledialogs;
extern crate unicode_segmentation;
extern crate url;
extern crate utf8;
extern crate uuid;
extern crate webrender_api;
extern crate webvr_traits;
extern crate xml5ever;
#[macro_use]
mod task;
mod body;
pub mod clipboard_provider;
mod devtools;
pub mod document_loader;
#[macro_use]
mod dom;
pub mod fetch;
mod layout_image;
mod mem;
mod microtask;
mod network_listener;
pub mod script_runtime;
#[allow(unsafe_code)]
pub mod script_thread;
mod serviceworker_manager;
mod serviceworkerjob;
mod stylesheet_loader;
mod task_source;
pub mod test;
pub mod textinput;
mod timers;
mod unpremultiplytable;
mod webdriver_handlers;
/// A module with everything layout can use from script.
///
/// Try to keep this small!
///
/// TODO(emilio): A few of the FooHelpers can go away, presumably...
pub mod layout_exports {
pub use dom::bindings::inheritance::{CharacterDataTypeId, ElementTypeId};
pub use dom::bindings::inheritance::{HTMLElementTypeId, NodeTypeId};
pub use dom::bindings::root::LayoutDom;
pub use dom::characterdata::LayoutCharacterDataHelpers;
pub use dom::document::{Document, LayoutDocumentHelpers, PendingRestyle};
pub use dom::element::{Element, LayoutElementHelpers, RawLayoutElementHelpers};
pub use dom::node::NodeFlags;
pub use dom::node::{LayoutNodeHelpers, Node};
pub use dom::text::Text;
}
use dom::bindings::codegen::RegisterBindings;
use dom::bindings::conversions::is_dom_proxy;
use dom::bindings::proxyhandler;
use dom::bindings::utils::is_platform_object;
use js::jsapi::JSObject;
use script_traits::SWManagerSenders;
use serviceworker_manager::ServiceWorkerManager;
#[cfg(target_os = "linux")]
#[allow(unsafe_code)]
fn perform_platform_specific_initialization() {
use std::mem;
// 4096 is default max on many linux systems
const MAX_FILE_LIMIT: libc::rlim_t = 4096;
// Bump up our number of file descriptors to save us from impending doom caused by an onslaught
// of iframes.
unsafe {
let mut rlim: libc::rlimit = mem::uninitialized();
match libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) {
0 => {
if rlim.rlim_cur >= MAX_FILE_LIMIT {
// we have more than enough
return;
}
rlim.rlim_cur = match rlim.rlim_max {
libc::RLIM_INFINITY => MAX_FILE_LIMIT,
_ => {
if rlim.rlim_max < MAX_FILE_LIMIT {
rlim.rlim_max
} else {
MAX_FILE_LIMIT
}
}
};
match libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) {
0 => (),
_ => warn!("Failed to set file count limit"),
};
},
_ => warn!("Failed to get file count limit"),
};
}
}
#[cfg(not(target_os = "linux"))]
fn perform_platform_specific_initialization() {}
pub fn init_service_workers(sw_senders: SWManagerSenders)
|
#[allow(unsafe_code)]
unsafe extern "C" fn is_dom_object(obj: *mut JSObject) -> bool {
!obj.is_null() && (is_platform_object(obj) || is_dom_proxy(obj))
}
#[allow(unsafe_code)]
pub fn init() {
unsafe {
proxyhandler::init();
// Create the global vtables used by the (generated) DOM
// bindings to implement JS proxies.
RegisterBindings::RegisterProxyHandlers();
js::glue::InitializeMemoryReporter(Some(is_dom_object));
}
perform_platform_specific_initialization();
}
|
{
// Spawn the service worker manager passing the constellation sender
ServiceWorkerManager::spawn_manager(sw_senders);
}
|
identifier_body
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![cfg_attr(feature = "unstable", feature(core_intrinsics))]
#![cfg_attr(feature = "unstable", feature(on_unimplemented))]
#![feature(const_fn)]
#![feature(mpsc_select)]
#![feature(plugin)]
#![feature(string_retain)]
#![feature(use_extern_macros)]
#![deny(unsafe_code)]
#![allow(non_snake_case)]
#![doc = "The script crate contains all matters DOM."]
#![plugin(script_plugins)]
#![cfg_attr(not(feature = "unrooted_must_root_lint"), allow(unknown_lints))]
extern crate app_units;
extern crate audio_video_metadata;
extern crate base64;
#[macro_use]
extern crate bitflags;
extern crate bluetooth_traits;
extern crate byteorder;
extern crate canvas_traits;
extern crate caseless;
extern crate chrono;
extern crate cookie as cookie_rs;
#[macro_use] extern crate cssparser;
#[macro_use] extern crate deny_public_fields;
extern crate devtools_traits;
extern crate dom_struct;
#[macro_use]
extern crate domobject_derive;
extern crate embedder_traits;
extern crate encoding_rs;
#[macro_use] extern crate enum_iterator;
extern crate euclid;
extern crate fnv;
extern crate gleam;
extern crate half;
#[macro_use] extern crate html5ever;
#[macro_use]
extern crate hyper;
extern crate hyper_serde;
extern crate image;
extern crate ipc_channel;
#[macro_use]
extern crate jstraceable_derive;
#[macro_use]
extern crate lazy_static;
extern crate libc;
#[macro_use]
extern crate log;
#[macro_use] extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate metrics;
#[macro_use]
extern crate mime;
extern crate mime_guess;
extern crate mitochondria;
extern crate mozangle;
#[macro_use]
extern crate mozjs as js;
extern crate msg;
extern crate net_traits;
extern crate num_traits;
extern crate offscreen_gl_context;
extern crate parking_lot;
extern crate phf;
#[macro_use]
extern crate profile_traits;
extern crate ref_filter_map;
extern crate ref_slice;
extern crate regex;
extern crate script_layout_interface;
extern crate script_traits;
extern crate selectors;
extern crate serde;
extern crate serde_bytes;
extern crate servo_allocator;
extern crate servo_arc;
#[macro_use] extern crate servo_atoms;
extern crate servo_config;
extern crate servo_geometry;
extern crate servo_media;
extern crate servo_rand;
extern crate servo_url;
extern crate smallvec;
#[macro_use]
extern crate style;
extern crate style_traits;
extern crate swapper;
extern crate time;
#[cfg(target_os = "linux")]
extern crate tinyfiledialogs;
extern crate unicode_segmentation;
extern crate url;
extern crate utf8;
extern crate uuid;
extern crate webrender_api;
extern crate webvr_traits;
extern crate xml5ever;
#[macro_use]
mod task;
mod body;
pub mod clipboard_provider;
mod devtools;
pub mod document_loader;
#[macro_use]
mod dom;
pub mod fetch;
mod layout_image;
mod mem;
mod microtask;
mod network_listener;
pub mod script_runtime;
#[allow(unsafe_code)]
pub mod script_thread;
mod serviceworker_manager;
mod serviceworkerjob;
mod stylesheet_loader;
mod task_source;
pub mod test;
pub mod textinput;
mod timers;
mod unpremultiplytable;
mod webdriver_handlers;
/// A module with everything layout can use from script.
///
/// Try to keep this small!
///
/// TODO(emilio): A few of the FooHelpers can go away, presumably...
pub mod layout_exports {
pub use dom::bindings::inheritance::{CharacterDataTypeId, ElementTypeId};
pub use dom::bindings::inheritance::{HTMLElementTypeId, NodeTypeId};
pub use dom::bindings::root::LayoutDom;
pub use dom::characterdata::LayoutCharacterDataHelpers;
pub use dom::document::{Document, LayoutDocumentHelpers, PendingRestyle};
pub use dom::element::{Element, LayoutElementHelpers, RawLayoutElementHelpers};
pub use dom::node::NodeFlags;
pub use dom::node::{LayoutNodeHelpers, Node};
pub use dom::text::Text;
}
use dom::bindings::codegen::RegisterBindings;
use dom::bindings::conversions::is_dom_proxy;
use dom::bindings::proxyhandler;
use dom::bindings::utils::is_platform_object;
use js::jsapi::JSObject;
use script_traits::SWManagerSenders;
use serviceworker_manager::ServiceWorkerManager;
#[cfg(target_os = "linux")]
#[allow(unsafe_code)]
fn perform_platform_specific_initialization() {
use std::mem;
// 4096 is default max on many linux systems
const MAX_FILE_LIMIT: libc::rlim_t = 4096;
// Bump up our number of file descriptors to save us from impending doom caused by an onslaught
// of iframes.
unsafe {
let mut rlim: libc::rlimit = mem::uninitialized();
match libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) {
0 => {
if rlim.rlim_cur >= MAX_FILE_LIMIT {
// we have more than enough
return;
}
rlim.rlim_cur = match rlim.rlim_max {
libc::RLIM_INFINITY => MAX_FILE_LIMIT,
_ => {
if rlim.rlim_max < MAX_FILE_LIMIT {
rlim.rlim_max
} else {
MAX_FILE_LIMIT
}
}
};
match libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) {
0 => (),
_ => warn!("Failed to set file count limit"),
};
},
_ => warn!("Failed to get file count limit"),
};
}
}
#[cfg(not(target_os = "linux"))]
fn
|
() {}
pub fn init_service_workers(sw_senders: SWManagerSenders) {
// Spawn the service worker manager passing the constellation sender
ServiceWorkerManager::spawn_manager(sw_senders);
}
#[allow(unsafe_code)]
unsafe extern "C" fn is_dom_object(obj: *mut JSObject) -> bool {
!obj.is_null() && (is_platform_object(obj) || is_dom_proxy(obj))
}
#[allow(unsafe_code)]
pub fn init() {
unsafe {
proxyhandler::init();
// Create the global vtables used by the (generated) DOM
// bindings to implement JS proxies.
RegisterBindings::RegisterProxyHandlers();
js::glue::InitializeMemoryReporter(Some(is_dom_object));
}
perform_platform_specific_initialization();
}
|
perform_platform_specific_initialization
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![cfg_attr(feature = "unstable", feature(core_intrinsics))]
#![cfg_attr(feature = "unstable", feature(on_unimplemented))]
#![feature(const_fn)]
#![feature(mpsc_select)]
#![feature(plugin)]
#![feature(string_retain)]
#![feature(use_extern_macros)]
#![deny(unsafe_code)]
#![allow(non_snake_case)]
#![doc = "The script crate contains all matters DOM."]
#![plugin(script_plugins)]
#![cfg_attr(not(feature = "unrooted_must_root_lint"), allow(unknown_lints))]
extern crate app_units;
extern crate audio_video_metadata;
extern crate base64;
#[macro_use]
extern crate bitflags;
extern crate bluetooth_traits;
extern crate byteorder;
extern crate canvas_traits;
extern crate caseless;
extern crate chrono;
extern crate cookie as cookie_rs;
#[macro_use] extern crate cssparser;
#[macro_use] extern crate deny_public_fields;
extern crate devtools_traits;
extern crate dom_struct;
#[macro_use]
extern crate domobject_derive;
extern crate embedder_traits;
extern crate encoding_rs;
#[macro_use] extern crate enum_iterator;
extern crate euclid;
extern crate fnv;
extern crate gleam;
extern crate half;
#[macro_use] extern crate html5ever;
#[macro_use]
extern crate hyper;
extern crate hyper_serde;
extern crate image;
extern crate ipc_channel;
#[macro_use]
extern crate jstraceable_derive;
#[macro_use]
extern crate lazy_static;
extern crate libc;
#[macro_use]
extern crate log;
#[macro_use] extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate metrics;
#[macro_use]
extern crate mime;
extern crate mime_guess;
extern crate mitochondria;
extern crate mozangle;
#[macro_use]
extern crate mozjs as js;
extern crate msg;
extern crate net_traits;
extern crate num_traits;
extern crate offscreen_gl_context;
extern crate parking_lot;
extern crate phf;
#[macro_use]
extern crate profile_traits;
extern crate ref_filter_map;
extern crate ref_slice;
extern crate regex;
extern crate script_layout_interface;
extern crate script_traits;
extern crate selectors;
extern crate serde;
extern crate serde_bytes;
extern crate servo_allocator;
extern crate servo_arc;
#[macro_use] extern crate servo_atoms;
extern crate servo_config;
extern crate servo_geometry;
extern crate servo_media;
extern crate servo_rand;
extern crate servo_url;
extern crate smallvec;
#[macro_use]
extern crate style;
extern crate style_traits;
extern crate swapper;
extern crate time;
#[cfg(target_os = "linux")]
extern crate tinyfiledialogs;
extern crate unicode_segmentation;
extern crate url;
extern crate utf8;
extern crate uuid;
extern crate webrender_api;
extern crate webvr_traits;
extern crate xml5ever;
#[macro_use]
mod task;
mod body;
pub mod clipboard_provider;
mod devtools;
pub mod document_loader;
#[macro_use]
mod dom;
pub mod fetch;
mod layout_image;
mod mem;
mod microtask;
mod network_listener;
pub mod script_runtime;
#[allow(unsafe_code)]
pub mod script_thread;
mod serviceworker_manager;
mod serviceworkerjob;
mod stylesheet_loader;
mod task_source;
pub mod test;
pub mod textinput;
mod timers;
mod unpremultiplytable;
mod webdriver_handlers;
/// A module with everything layout can use from script.
///
/// Try to keep this small!
///
/// TODO(emilio): A few of the FooHelpers can go away, presumably...
pub mod layout_exports {
pub use dom::bindings::inheritance::{CharacterDataTypeId, ElementTypeId};
pub use dom::bindings::inheritance::{HTMLElementTypeId, NodeTypeId};
pub use dom::bindings::root::LayoutDom;
pub use dom::characterdata::LayoutCharacterDataHelpers;
pub use dom::document::{Document, LayoutDocumentHelpers, PendingRestyle};
pub use dom::element::{Element, LayoutElementHelpers, RawLayoutElementHelpers};
pub use dom::node::NodeFlags;
pub use dom::node::{LayoutNodeHelpers, Node};
pub use dom::text::Text;
}
use dom::bindings::codegen::RegisterBindings;
use dom::bindings::conversions::is_dom_proxy;
use dom::bindings::proxyhandler;
use dom::bindings::utils::is_platform_object;
use js::jsapi::JSObject;
use script_traits::SWManagerSenders;
use serviceworker_manager::ServiceWorkerManager;
#[cfg(target_os = "linux")]
#[allow(unsafe_code)]
fn perform_platform_specific_initialization() {
use std::mem;
// 4096 is default max on many linux systems
const MAX_FILE_LIMIT: libc::rlim_t = 4096;
// Bump up our number of file descriptors to save us from impending doom caused by an onslaught
// of iframes.
unsafe {
let mut rlim: libc::rlimit = mem::uninitialized();
match libc::getrlimit(libc::RLIMIT_NOFILE, &mut rlim) {
|
// we have more than enough
return;
}
rlim.rlim_cur = match rlim.rlim_max {
libc::RLIM_INFINITY => MAX_FILE_LIMIT,
_ => {
if rlim.rlim_max < MAX_FILE_LIMIT {
rlim.rlim_max
} else {
MAX_FILE_LIMIT
}
}
};
match libc::setrlimit(libc::RLIMIT_NOFILE, &rlim) {
0 => (),
_ => warn!("Failed to set file count limit"),
};
},
_ => warn!("Failed to get file count limit"),
};
}
}
#[cfg(not(target_os = "linux"))]
fn perform_platform_specific_initialization() {}
pub fn init_service_workers(sw_senders: SWManagerSenders) {
// Spawn the service worker manager passing the constellation sender
ServiceWorkerManager::spawn_manager(sw_senders);
}
#[allow(unsafe_code)]
unsafe extern "C" fn is_dom_object(obj: *mut JSObject) -> bool {
!obj.is_null() && (is_platform_object(obj) || is_dom_proxy(obj))
}
#[allow(unsafe_code)]
pub fn init() {
unsafe {
proxyhandler::init();
// Create the global vtables used by the (generated) DOM
// bindings to implement JS proxies.
RegisterBindings::RegisterProxyHandlers();
js::glue::InitializeMemoryReporter(Some(is_dom_object));
}
perform_platform_specific_initialization();
}
|
0 => {
if rlim.rlim_cur >= MAX_FILE_LIMIT {
|
random_line_split
|
main.rs
|
#![feature(vec_resize)]
use std::iter::repeat;
use std::collections::VecDeque;
use std::collections::BinaryHeap;
use std::cmp::Ordering;
fn max(a: u32, b: u32) -> u32 { if a > b { a } else { b } }
#[derive(Clone)]
struct Edge {
vertex: u32,
weight: f32,
}
impl Edge {
pub fn new(vertex_: u32, weight_: f32) -> Edge {
Edge{vertex: vertex_, weight: weight_,}
}
}
impl Ord for Edge {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Eq for Edge {}
impl PartialOrd for Edge {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
(self.weight, self.vertex).partial_cmp(&(other.weight, other.vertex))
}
}
impl PartialEq for Edge {
fn eq(&self, other: &Self) -> bool {
(self.weight, &self.vertex) == (other.weight, &other.vertex)
}
}
struct Graph {
nodes: Vec<Vec<Edge>>,
}
impl Graph {
pub fn new() -> Graph { Graph{nodes: Vec::new(),} }
pub fn add_edge(&mut self, src: u32, dst: u32, weight: f32) {
let len = self.nodes.len();
if (max(src, dst)) > len as u32 {
let new_len = (max(src, dst) + 1) as usize;
self.nodes.extend(repeat(Vec::new()).take(new_len - len))
}
self.nodes[src as usize].push(Edge::new(dst, weight));
}
|
pub fn bfs(&self, src: u32) {
let mut queue: VecDeque<u32> = VecDeque::new();
let len = self.nodes.len();
let mut visited = vec!(false ; len);
queue.push_front(src);
while let Some(current) = queue.pop_back() {
if!visited[current as usize] {
println!("current: {}", current);
visited[current as usize] = true;
} else { continue ; }
for n in &self.nodes[current as usize] {
let neighbor: u32 = n.vertex;
queue.push_front(neighbor);
}
}
}
pub fn dfs(&self, src: u32) {
let mut stack: Vec<u32> = Vec::new();
let len = self.nodes.len() as usize;
let mut visited = vec!(false ; len);
stack.push(src);
while let Some(current) = stack.pop() {
if!visited[current as usize] {
println!("current: {}", current);
visited[current as usize] = true;
} else { continue ; }
for n in &self.nodes[current as usize] {
let neighbor: u32 = n.vertex;
stack.push(neighbor);
}
}
}
pub fn dijkstra(&self, src: u32, dst: u32) {
let mut dist: Vec<f32> = Vec::new();
let mut prev: Vec<u32> = Vec::new();
let mut q = BinaryHeap::new();
const MAX_WEIGHT: f32 = std::f32::MAX;
// init dist, prev table
// would only work in rust nightly
dist.resize(self.nodes.len(), MAX_WEIGHT);
prev.resize(self.nodes.len(), 0);
// We're at `start`, with a zero cost
dist[src as usize] = 0.0;
q.push(Edge::new(src, dist[src as usize]));
while let Some(u) = q.pop() {
// loop for all edges connected to
for v in self.nodes[u.vertex as usize].iter() {
let alt: f32 = dist[u.vertex as usize] + v.weight; // accumulate shortest dist from source
// The variable alt is the length of the path from the root node to the neighbor node v
// if it were to go through u. If this path is shorter than the current shortest path
// recorded for v, that current path is replaced with this alt path.
if alt < dist[v.vertex as usize] {
dist[v.vertex as usize] = alt; // keep the shortest dist from src to v
prev[v.vertex as usize] = u.vertex;
q.push(Edge::new(v.vertex, dist[v.vertex as usize])); // Add unvisited v into the Q to be processed
}
}
}
let mut shortest_path: Vec<u32> = Vec::new();
let mut curr: u32 = dst;
shortest_path.push(curr);
while curr!= src {
curr = prev[curr as usize];
shortest_path.push(curr);
}
shortest_path.reverse();
for v in shortest_path.iter() {
println!("current: {}", v);
}
}
}
fn main() {
let mut g1 = Graph::new();
g1.add_edge(0, 1, 1.0);
g1.add_edge(0, 2, 4.0);
g1.add_edge(1, 0, 1.0);
g1.add_edge(1, 2, 2.0);
g1.add_edge(1, 3, 6.0);
g1.add_edge(2, 0, 4.0);
g1.add_edge(2, 1, 2.0);
g1.add_edge(2, 3, 3.0);
g1.add_edge(3, 1, 6.0);
g1.add_edge(3, 2, 3.0);
println!("bfs");
g1.bfs(0);
println!("");
println!("dfs");
g1.dfs(0);
println!("");
println!("dijkstra");
g1.dijkstra(0, 3);
}
|
random_line_split
|
|
main.rs
|
#![feature(vec_resize)]
use std::iter::repeat;
use std::collections::VecDeque;
use std::collections::BinaryHeap;
use std::cmp::Ordering;
fn max(a: u32, b: u32) -> u32 { if a > b { a } else
|
}
#[derive(Clone)]
struct Edge {
vertex: u32,
weight: f32,
}
impl Edge {
pub fn new(vertex_: u32, weight_: f32) -> Edge {
Edge{vertex: vertex_, weight: weight_,}
}
}
impl Ord for Edge {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Eq for Edge {}
impl PartialOrd for Edge {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
(self.weight, self.vertex).partial_cmp(&(other.weight, other.vertex))
}
}
impl PartialEq for Edge {
fn eq(&self, other: &Self) -> bool {
(self.weight, &self.vertex) == (other.weight, &other.vertex)
}
}
struct Graph {
nodes: Vec<Vec<Edge>>,
}
impl Graph {
pub fn new() -> Graph { Graph{nodes: Vec::new(),} }
pub fn add_edge(&mut self, src: u32, dst: u32, weight: f32) {
let len = self.nodes.len();
if (max(src, dst)) > len as u32 {
let new_len = (max(src, dst) + 1) as usize;
self.nodes.extend(repeat(Vec::new()).take(new_len - len))
}
self.nodes[src as usize].push(Edge::new(dst, weight));
}
pub fn bfs(&self, src: u32) {
let mut queue: VecDeque<u32> = VecDeque::new();
let len = self.nodes.len();
let mut visited = vec!(false ; len);
queue.push_front(src);
while let Some(current) = queue.pop_back() {
if!visited[current as usize] {
println!("current: {}", current);
visited[current as usize] = true;
} else { continue ; }
for n in &self.nodes[current as usize] {
let neighbor: u32 = n.vertex;
queue.push_front(neighbor);
}
}
}
pub fn dfs(&self, src: u32) {
let mut stack: Vec<u32> = Vec::new();
let len = self.nodes.len() as usize;
let mut visited = vec!(false ; len);
stack.push(src);
while let Some(current) = stack.pop() {
if!visited[current as usize] {
println!("current: {}", current);
visited[current as usize] = true;
} else { continue ; }
for n in &self.nodes[current as usize] {
let neighbor: u32 = n.vertex;
stack.push(neighbor);
}
}
}
pub fn dijkstra(&self, src: u32, dst: u32) {
let mut dist: Vec<f32> = Vec::new();
let mut prev: Vec<u32> = Vec::new();
let mut q = BinaryHeap::new();
const MAX_WEIGHT: f32 = std::f32::MAX;
// init dist, prev table
// would only work in rust nightly
dist.resize(self.nodes.len(), MAX_WEIGHT);
prev.resize(self.nodes.len(), 0);
// We're at `start`, with a zero cost
dist[src as usize] = 0.0;
q.push(Edge::new(src, dist[src as usize]));
while let Some(u) = q.pop() {
// loop for all edges connected to
for v in self.nodes[u.vertex as usize].iter() {
let alt: f32 = dist[u.vertex as usize] + v.weight; // accumulate shortest dist from source
// The variable alt is the length of the path from the root node to the neighbor node v
// if it were to go through u. If this path is shorter than the current shortest path
// recorded for v, that current path is replaced with this alt path.
if alt < dist[v.vertex as usize] {
dist[v.vertex as usize] = alt; // keep the shortest dist from src to v
prev[v.vertex as usize] = u.vertex;
q.push(Edge::new(v.vertex, dist[v.vertex as usize])); // Add unvisited v into the Q to be processed
}
}
}
let mut shortest_path: Vec<u32> = Vec::new();
let mut curr: u32 = dst;
shortest_path.push(curr);
while curr!= src {
curr = prev[curr as usize];
shortest_path.push(curr);
}
shortest_path.reverse();
for v in shortest_path.iter() {
println!("current: {}", v);
}
}
}
fn main() {
let mut g1 = Graph::new();
g1.add_edge(0, 1, 1.0);
g1.add_edge(0, 2, 4.0);
g1.add_edge(1, 0, 1.0);
g1.add_edge(1, 2, 2.0);
g1.add_edge(1, 3, 6.0);
g1.add_edge(2, 0, 4.0);
g1.add_edge(2, 1, 2.0);
g1.add_edge(2, 3, 3.0);
g1.add_edge(3, 1, 6.0);
g1.add_edge(3, 2, 3.0);
println!("bfs");
g1.bfs(0);
println!("");
println!("dfs");
g1.dfs(0);
println!("");
println!("dijkstra");
g1.dijkstra(0, 3);
}
|
{ b }
|
conditional_block
|
main.rs
|
#![feature(vec_resize)]
use std::iter::repeat;
use std::collections::VecDeque;
use std::collections::BinaryHeap;
use std::cmp::Ordering;
fn max(a: u32, b: u32) -> u32 { if a > b { a } else { b } }
#[derive(Clone)]
struct Edge {
vertex: u32,
weight: f32,
}
impl Edge {
pub fn new(vertex_: u32, weight_: f32) -> Edge {
Edge{vertex: vertex_, weight: weight_,}
}
}
impl Ord for Edge {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Eq for Edge {}
impl PartialOrd for Edge {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
(self.weight, self.vertex).partial_cmp(&(other.weight, other.vertex))
}
}
impl PartialEq for Edge {
fn eq(&self, other: &Self) -> bool
|
}
struct Graph {
nodes: Vec<Vec<Edge>>,
}
impl Graph {
pub fn new() -> Graph { Graph{nodes: Vec::new(),} }
pub fn add_edge(&mut self, src: u32, dst: u32, weight: f32) {
let len = self.nodes.len();
if (max(src, dst)) > len as u32 {
let new_len = (max(src, dst) + 1) as usize;
self.nodes.extend(repeat(Vec::new()).take(new_len - len))
}
self.nodes[src as usize].push(Edge::new(dst, weight));
}
pub fn bfs(&self, src: u32) {
let mut queue: VecDeque<u32> = VecDeque::new();
let len = self.nodes.len();
let mut visited = vec!(false ; len);
queue.push_front(src);
while let Some(current) = queue.pop_back() {
if!visited[current as usize] {
println!("current: {}", current);
visited[current as usize] = true;
} else { continue ; }
for n in &self.nodes[current as usize] {
let neighbor: u32 = n.vertex;
queue.push_front(neighbor);
}
}
}
pub fn dfs(&self, src: u32) {
let mut stack: Vec<u32> = Vec::new();
let len = self.nodes.len() as usize;
let mut visited = vec!(false ; len);
stack.push(src);
while let Some(current) = stack.pop() {
if!visited[current as usize] {
println!("current: {}", current);
visited[current as usize] = true;
} else { continue ; }
for n in &self.nodes[current as usize] {
let neighbor: u32 = n.vertex;
stack.push(neighbor);
}
}
}
pub fn dijkstra(&self, src: u32, dst: u32) {
let mut dist: Vec<f32> = Vec::new();
let mut prev: Vec<u32> = Vec::new();
let mut q = BinaryHeap::new();
const MAX_WEIGHT: f32 = std::f32::MAX;
// init dist, prev table
// would only work in rust nightly
dist.resize(self.nodes.len(), MAX_WEIGHT);
prev.resize(self.nodes.len(), 0);
// We're at `start`, with a zero cost
dist[src as usize] = 0.0;
q.push(Edge::new(src, dist[src as usize]));
while let Some(u) = q.pop() {
// loop for all edges connected to
for v in self.nodes[u.vertex as usize].iter() {
let alt: f32 = dist[u.vertex as usize] + v.weight; // accumulate shortest dist from source
// The variable alt is the length of the path from the root node to the neighbor node v
// if it were to go through u. If this path is shorter than the current shortest path
// recorded for v, that current path is replaced with this alt path.
if alt < dist[v.vertex as usize] {
dist[v.vertex as usize] = alt; // keep the shortest dist from src to v
prev[v.vertex as usize] = u.vertex;
q.push(Edge::new(v.vertex, dist[v.vertex as usize])); // Add unvisited v into the Q to be processed
}
}
}
let mut shortest_path: Vec<u32> = Vec::new();
let mut curr: u32 = dst;
shortest_path.push(curr);
while curr!= src {
curr = prev[curr as usize];
shortest_path.push(curr);
}
shortest_path.reverse();
for v in shortest_path.iter() {
println!("current: {}", v);
}
}
}
fn main() {
let mut g1 = Graph::new();
g1.add_edge(0, 1, 1.0);
g1.add_edge(0, 2, 4.0);
g1.add_edge(1, 0, 1.0);
g1.add_edge(1, 2, 2.0);
g1.add_edge(1, 3, 6.0);
g1.add_edge(2, 0, 4.0);
g1.add_edge(2, 1, 2.0);
g1.add_edge(2, 3, 3.0);
g1.add_edge(3, 1, 6.0);
g1.add_edge(3, 2, 3.0);
println!("bfs");
g1.bfs(0);
println!("");
println!("dfs");
g1.dfs(0);
println!("");
println!("dijkstra");
g1.dijkstra(0, 3);
}
|
{
(self.weight, &self.vertex) == (other.weight, &other.vertex)
}
|
identifier_body
|
main.rs
|
#![feature(vec_resize)]
use std::iter::repeat;
use std::collections::VecDeque;
use std::collections::BinaryHeap;
use std::cmp::Ordering;
fn max(a: u32, b: u32) -> u32 { if a > b { a } else { b } }
#[derive(Clone)]
struct Edge {
vertex: u32,
weight: f32,
}
impl Edge {
pub fn new(vertex_: u32, weight_: f32) -> Edge {
Edge{vertex: vertex_, weight: weight_,}
}
}
impl Ord for Edge {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Eq for Edge {}
impl PartialOrd for Edge {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
(self.weight, self.vertex).partial_cmp(&(other.weight, other.vertex))
}
}
impl PartialEq for Edge {
fn eq(&self, other: &Self) -> bool {
(self.weight, &self.vertex) == (other.weight, &other.vertex)
}
}
struct Graph {
nodes: Vec<Vec<Edge>>,
}
impl Graph {
pub fn new() -> Graph { Graph{nodes: Vec::new(),} }
pub fn add_edge(&mut self, src: u32, dst: u32, weight: f32) {
let len = self.nodes.len();
if (max(src, dst)) > len as u32 {
let new_len = (max(src, dst) + 1) as usize;
self.nodes.extend(repeat(Vec::new()).take(new_len - len))
}
self.nodes[src as usize].push(Edge::new(dst, weight));
}
pub fn bfs(&self, src: u32) {
let mut queue: VecDeque<u32> = VecDeque::new();
let len = self.nodes.len();
let mut visited = vec!(false ; len);
queue.push_front(src);
while let Some(current) = queue.pop_back() {
if!visited[current as usize] {
println!("current: {}", current);
visited[current as usize] = true;
} else { continue ; }
for n in &self.nodes[current as usize] {
let neighbor: u32 = n.vertex;
queue.push_front(neighbor);
}
}
}
pub fn
|
(&self, src: u32) {
let mut stack: Vec<u32> = Vec::new();
let len = self.nodes.len() as usize;
let mut visited = vec!(false ; len);
stack.push(src);
while let Some(current) = stack.pop() {
if!visited[current as usize] {
println!("current: {}", current);
visited[current as usize] = true;
} else { continue ; }
for n in &self.nodes[current as usize] {
let neighbor: u32 = n.vertex;
stack.push(neighbor);
}
}
}
pub fn dijkstra(&self, src: u32, dst: u32) {
let mut dist: Vec<f32> = Vec::new();
let mut prev: Vec<u32> = Vec::new();
let mut q = BinaryHeap::new();
const MAX_WEIGHT: f32 = std::f32::MAX;
// init dist, prev table
// would only work in rust nightly
dist.resize(self.nodes.len(), MAX_WEIGHT);
prev.resize(self.nodes.len(), 0);
// We're at `start`, with a zero cost
dist[src as usize] = 0.0;
q.push(Edge::new(src, dist[src as usize]));
while let Some(u) = q.pop() {
// loop for all edges connected to
for v in self.nodes[u.vertex as usize].iter() {
let alt: f32 = dist[u.vertex as usize] + v.weight; // accumulate shortest dist from source
// The variable alt is the length of the path from the root node to the neighbor node v
// if it were to go through u. If this path is shorter than the current shortest path
// recorded for v, that current path is replaced with this alt path.
if alt < dist[v.vertex as usize] {
dist[v.vertex as usize] = alt; // keep the shortest dist from src to v
prev[v.vertex as usize] = u.vertex;
q.push(Edge::new(v.vertex, dist[v.vertex as usize])); // Add unvisited v into the Q to be processed
}
}
}
let mut shortest_path: Vec<u32> = Vec::new();
let mut curr: u32 = dst;
shortest_path.push(curr);
while curr!= src {
curr = prev[curr as usize];
shortest_path.push(curr);
}
shortest_path.reverse();
for v in shortest_path.iter() {
println!("current: {}", v);
}
}
}
fn main() {
let mut g1 = Graph::new();
g1.add_edge(0, 1, 1.0);
g1.add_edge(0, 2, 4.0);
g1.add_edge(1, 0, 1.0);
g1.add_edge(1, 2, 2.0);
g1.add_edge(1, 3, 6.0);
g1.add_edge(2, 0, 4.0);
g1.add_edge(2, 1, 2.0);
g1.add_edge(2, 3, 3.0);
g1.add_edge(3, 1, 6.0);
g1.add_edge(3, 2, 3.0);
println!("bfs");
g1.bfs(0);
println!("");
println!("dfs");
g1.dfs(0);
println!("");
println!("dijkstra");
g1.dijkstra(0, 3);
}
|
dfs
|
identifier_name
|
condexpr.rs
|
/*
* Copyright (c) 2017 Christoph Heiss
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use std::env;
use std::path::Path;
use state::State;
use parser::{ExecResult, ToExecResult};
/*
* https://www.gnu.org/software/bash/manual/html_node/Bash-Conditional-Expressions.html
*/
pub fn exec(state: &mut State, args: &[String]) -> ExecResult
|
Some(var) => (!var.value.is_empty()).to_exec_result(),
None => match env::var(&args[1]) {
Ok(var) => (!var.is_empty()).to_exec_result(),
Err(_) => ExecResult::failure(),
},
}
},
"-R" => {
match state.var(&args[1]) {
Some(var) => (!var.value.is_empty() && var.reference).to_exec_result(),
None => ExecResult::failure(),
}
},
"-z" => args[1].is_empty().to_exec_result(),
_ => ExecResult::with_code(2),
}
}
|
{
if args.len() == 1 {
return ExecResult::failure();
}
let args = &args[..args.len()-1];
match args[0].as_ref() {
"-a" => Path::new(&args[1]).exists().to_exec_result(),
"-d" => Path::new(&args[1]).is_dir().to_exec_result(),
"-f" => Path::new(&args[1]).is_file().to_exec_result(),
"-h" => {
match Path::new(&args[1]).symlink_metadata() {
Ok(metadata) => metadata.file_type().is_symlink().to_exec_result(),
Err(_) => ExecResult::failure(),
}
},
"-n" => (!args[1].is_empty()).to_exec_result(),
"-v" => {
match state.var(&args[1]) {
|
identifier_body
|
condexpr.rs
|
/*
* Copyright (c) 2017 Christoph Heiss
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use std::env;
use std::path::Path;
use state::State;
use parser::{ExecResult, ToExecResult};
/*
* https://www.gnu.org/software/bash/manual/html_node/Bash-Conditional-Expressions.html
*/
pub fn exec(state: &mut State, args: &[String]) -> ExecResult {
if args.len() == 1 {
return ExecResult::failure();
}
let args = &args[..args.len()-1];
match args[0].as_ref() {
"-a" => Path::new(&args[1]).exists().to_exec_result(),
"-d" => Path::new(&args[1]).is_dir().to_exec_result(),
"-f" => Path::new(&args[1]).is_file().to_exec_result(),
"-h" => {
match Path::new(&args[1]).symlink_metadata() {
Ok(metadata) => metadata.file_type().is_symlink().to_exec_result(),
Err(_) => ExecResult::failure(),
}
|
None => match env::var(&args[1]) {
Ok(var) => (!var.is_empty()).to_exec_result(),
Err(_) => ExecResult::failure(),
},
}
},
"-R" => {
match state.var(&args[1]) {
Some(var) => (!var.value.is_empty() && var.reference).to_exec_result(),
None => ExecResult::failure(),
}
},
"-z" => args[1].is_empty().to_exec_result(),
_ => ExecResult::with_code(2),
}
}
|
},
"-n" => (!args[1].is_empty()).to_exec_result(),
"-v" => {
match state.var(&args[1]) {
Some(var) => (!var.value.is_empty()).to_exec_result(),
|
random_line_split
|
condexpr.rs
|
/*
* Copyright (c) 2017 Christoph Heiss
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use std::env;
use std::path::Path;
use state::State;
use parser::{ExecResult, ToExecResult};
/*
* https://www.gnu.org/software/bash/manual/html_node/Bash-Conditional-Expressions.html
*/
pub fn
|
(state: &mut State, args: &[String]) -> ExecResult {
if args.len() == 1 {
return ExecResult::failure();
}
let args = &args[..args.len()-1];
match args[0].as_ref() {
"-a" => Path::new(&args[1]).exists().to_exec_result(),
"-d" => Path::new(&args[1]).is_dir().to_exec_result(),
"-f" => Path::new(&args[1]).is_file().to_exec_result(),
"-h" => {
match Path::new(&args[1]).symlink_metadata() {
Ok(metadata) => metadata.file_type().is_symlink().to_exec_result(),
Err(_) => ExecResult::failure(),
}
},
"-n" => (!args[1].is_empty()).to_exec_result(),
"-v" => {
match state.var(&args[1]) {
Some(var) => (!var.value.is_empty()).to_exec_result(),
None => match env::var(&args[1]) {
Ok(var) => (!var.is_empty()).to_exec_result(),
Err(_) => ExecResult::failure(),
},
}
},
"-R" => {
match state.var(&args[1]) {
Some(var) => (!var.value.is_empty() && var.reference).to_exec_result(),
None => ExecResult::failure(),
}
},
"-z" => args[1].is_empty().to_exec_result(),
_ => ExecResult::with_code(2),
}
}
|
exec
|
identifier_name
|
mod.rs
|
#![ warn( missing_docs ) ]
#![ warn( missing_debug_implementations ) ]
// #![ feature( type_name_of_val ) ]
// #![ feature( trace_macros ) ]
//!
//! Former - variation of builder pattern.
//!
//! # Sample
//! ```
//! use former::Former;
//!
//! #[derive( Debug, PartialEq, Former )]
//! pub struct Structure1
//! {
//! int_1 : i32,
//! string_1 : String,
//! vec_1 : Vec< i32 >,
//! hashmap_strings_1 : std::collections::HashMap< String, String >,
//! int_optional_1 : core::option::Option< i32 >,
//! string_optional_1 : Option< String >,
//! }
//!
//! fn main()
//! {
//!
//! let struct1 = Structure1::former()
//! .int_1( 13 )
//! .string_1( "Abcd".to_string() )
//! .vec_1().replace( vec![ 1, 3 ] ).end()
//! .hashmap_strings_1().insert( "k1", "v1" ).insert( "k2", "v2" ).end()
//! .string_optional_1( "dir1" )
//! .form();
//! dbg!( &struct1 );
//!
//! // < &struct1 = Structure1 {
//! // < int_1: 13,
//! // < string_1: "Abcd",
//! // < vec_1: [
//! // < 1,
//! // < 3,
//! // < ],
//! // < hashmap_strings_1: {
//! // < "k1": "v1",
//! // < "k2": "v2",
//! // < },
//! // < int_optional_1: None,
//! // < string_optional_1: Some(
//! // < "dir1",
//! // < ),
//! // < }
//!
//! }
//! ```
pub use former_runtime as runtime;
|
pub use former_meta as derive;
pub use derive::Former as Former;
|
random_line_split
|
|
probe.rs
|
use std::os;
use std::io::fs::PathExtensions;
pub struct ProbeResult {
pub cert_file: Option<Path>,
pub cert_dir: Option<Path>,
}
/// Probe the system for the directory in which CA certificates should likely be
/// found.
///
/// This will only search known system locations.
pub fn find_certs_dirs() -> Vec<Path> {
// see http://gagravarr.org/writing/openssl-certs/others.shtml
[
"/var/ssl",
"/usr/share/ssl",
"/usr/local/ssl",
"/usr/local/openssl",
"/usr/local/share",
"/usr/lib/ssl",
"/usr/ssl",
"/etc/openssl",
"/etc/pki/tls",
"/etc/ssl",
].iter().map(|s| Path::new(*s)).filter(|p| {
p.exists()
}).collect()
}
pub fn
|
() {
let ProbeResult { cert_file, cert_dir } = probe();
match cert_file {
Some(path) => put("SSL_CERT_FILE", path),
None => {}
}
match cert_dir {
Some(path) => put("SSL_CERT_DIR", path),
None => {}
}
fn put(var: &str, path: Path) {
// Don't stomp over what anyone else has set
match os::getenv(var) {
Some(..) => {}
None => os::setenv(var, path),
}
}
}
pub fn probe() -> ProbeResult {
let mut result = ProbeResult {
cert_file: os::getenv("SSL_CERT_FILE").map(Path::new),
cert_dir: os::getenv("SSL_CERT_DIR").map(Path::new),
};
for certs_dir in find_certs_dirs().iter() {
// cert.pem looks to be an openssl 1.0.1 thing, while
// certs/ca-certificates.crt appears to be a 0.9.8 thing
try(&mut result.cert_file, certs_dir.join("cert.pem"));
try(&mut result.cert_file, certs_dir.join("certs/ca-certificates.crt"));
try(&mut result.cert_file, certs_dir.join("certs/ca-root-nss.crt"));
try(&mut result.cert_dir, certs_dir.join("certs"));
}
result
}
fn try(dst: &mut Option<Path>, val: Path) {
if dst.is_none() && val.exists() {
*dst = Some(val);
}
}
|
init_ssl_cert_env_vars
|
identifier_name
|
probe.rs
|
use std::os;
use std::io::fs::PathExtensions;
pub struct ProbeResult {
pub cert_file: Option<Path>,
pub cert_dir: Option<Path>,
}
/// Probe the system for the directory in which CA certificates should likely be
/// found.
///
/// This will only search known system locations.
pub fn find_certs_dirs() -> Vec<Path>
|
pub fn init_ssl_cert_env_vars() {
let ProbeResult { cert_file, cert_dir } = probe();
match cert_file {
Some(path) => put("SSL_CERT_FILE", path),
None => {}
}
match cert_dir {
Some(path) => put("SSL_CERT_DIR", path),
None => {}
}
fn put(var: &str, path: Path) {
// Don't stomp over what anyone else has set
match os::getenv(var) {
Some(..) => {}
None => os::setenv(var, path),
}
}
}
pub fn probe() -> ProbeResult {
let mut result = ProbeResult {
cert_file: os::getenv("SSL_CERT_FILE").map(Path::new),
cert_dir: os::getenv("SSL_CERT_DIR").map(Path::new),
};
for certs_dir in find_certs_dirs().iter() {
// cert.pem looks to be an openssl 1.0.1 thing, while
// certs/ca-certificates.crt appears to be a 0.9.8 thing
try(&mut result.cert_file, certs_dir.join("cert.pem"));
try(&mut result.cert_file, certs_dir.join("certs/ca-certificates.crt"));
try(&mut result.cert_file, certs_dir.join("certs/ca-root-nss.crt"));
try(&mut result.cert_dir, certs_dir.join("certs"));
}
result
}
fn try(dst: &mut Option<Path>, val: Path) {
if dst.is_none() && val.exists() {
*dst = Some(val);
}
}
|
{
// see http://gagravarr.org/writing/openssl-certs/others.shtml
[
"/var/ssl",
"/usr/share/ssl",
"/usr/local/ssl",
"/usr/local/openssl",
"/usr/local/share",
"/usr/lib/ssl",
"/usr/ssl",
"/etc/openssl",
"/etc/pki/tls",
"/etc/ssl",
].iter().map(|s| Path::new(*s)).filter(|p| {
p.exists()
}).collect()
}
|
identifier_body
|
probe.rs
|
use std::os;
use std::io::fs::PathExtensions;
pub struct ProbeResult {
pub cert_file: Option<Path>,
pub cert_dir: Option<Path>,
|
/// Probe the system for the directory in which CA certificates should likely be
/// found.
///
/// This will only search known system locations.
pub fn find_certs_dirs() -> Vec<Path> {
// see http://gagravarr.org/writing/openssl-certs/others.shtml
[
"/var/ssl",
"/usr/share/ssl",
"/usr/local/ssl",
"/usr/local/openssl",
"/usr/local/share",
"/usr/lib/ssl",
"/usr/ssl",
"/etc/openssl",
"/etc/pki/tls",
"/etc/ssl",
].iter().map(|s| Path::new(*s)).filter(|p| {
p.exists()
}).collect()
}
pub fn init_ssl_cert_env_vars() {
let ProbeResult { cert_file, cert_dir } = probe();
match cert_file {
Some(path) => put("SSL_CERT_FILE", path),
None => {}
}
match cert_dir {
Some(path) => put("SSL_CERT_DIR", path),
None => {}
}
fn put(var: &str, path: Path) {
// Don't stomp over what anyone else has set
match os::getenv(var) {
Some(..) => {}
None => os::setenv(var, path),
}
}
}
pub fn probe() -> ProbeResult {
let mut result = ProbeResult {
cert_file: os::getenv("SSL_CERT_FILE").map(Path::new),
cert_dir: os::getenv("SSL_CERT_DIR").map(Path::new),
};
for certs_dir in find_certs_dirs().iter() {
// cert.pem looks to be an openssl 1.0.1 thing, while
// certs/ca-certificates.crt appears to be a 0.9.8 thing
try(&mut result.cert_file, certs_dir.join("cert.pem"));
try(&mut result.cert_file, certs_dir.join("certs/ca-certificates.crt"));
try(&mut result.cert_file, certs_dir.join("certs/ca-root-nss.crt"));
try(&mut result.cert_dir, certs_dir.join("certs"));
}
result
}
fn try(dst: &mut Option<Path>, val: Path) {
if dst.is_none() && val.exists() {
*dst = Some(val);
}
}
|
}
|
random_line_split
|
response_parameters.rs
|
use serde::de::{Deserialize, Deserializer, Error};
use crate::types::*;
/// All API responses are from this type. Mostly used internal.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub enum ResponseWrapper<T> {
/// Request was successful.
Success {
/// Response result.
result: T,
},
|
/// Request was unsuccessful.
Error {
/// Human-readable description of the result.
description: String,
/// Contains information about why a request was unsuccessful.
parameters: Option<ResponseParameters>,
},
}
impl<'de, T: Deserialize<'de>> Deserialize<'de> for ResponseWrapper<T> {
fn deserialize<D>(deserializer: D) -> Result<ResponseWrapper<T>, D::Error>
where
D: Deserializer<'de>,
{
let raw: RawResponse<T> = Deserialize::deserialize(deserializer)?;
match (raw.ok, raw.description, raw.result) {
(false, Some(description), None) => Ok(ResponseWrapper::Error {
description: description,
parameters: raw.parameters,
}),
(true, None, Some(result)) => Ok(ResponseWrapper::Success { result: result }),
_ => Err(D::Error::custom("ambiguous response")),
}
}
}
/// Directly mapped telegram API response.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Deserialize)]
pub struct RawResponse<T> {
/// If ‘ok’ equals true, the request was successful.
ok: bool,
/// Human-readable description of the result.
description: Option<String>,
/// Result of the query.
result: Option<T>,
/// Information about why a request was unsuccessful.
parameters: Option<ResponseParameters>,
}
/// Contains information about why a request was unsuccessful.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Deserialize)]
pub struct ResponseParameters {
/// The group has been migrated to a supergroup with the specified identifier.
pub migrate_to_chat_id: Option<Integer>,
/// In case of exceeding flood control, the number of seconds left to wait
/// before the request can be repeated.
pub retry_after: Option<Integer>,
}
|
random_line_split
|
|
response_parameters.rs
|
use serde::de::{Deserialize, Deserializer, Error};
use crate::types::*;
/// All API responses are from this type. Mostly used internal.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub enum ResponseWrapper<T> {
/// Request was successful.
Success {
/// Response result.
result: T,
},
/// Request was unsuccessful.
Error {
/// Human-readable description of the result.
description: String,
/// Contains information about why a request was unsuccessful.
parameters: Option<ResponseParameters>,
},
}
impl<'de, T: Deserialize<'de>> Deserialize<'de> for ResponseWrapper<T> {
fn deserialize<D>(deserializer: D) -> Result<ResponseWrapper<T>, D::Error>
where
D: Deserializer<'de>,
{
let raw: RawResponse<T> = Deserialize::deserialize(deserializer)?;
match (raw.ok, raw.description, raw.result) {
(false, Some(description), None) => Ok(ResponseWrapper::Error {
description: description,
parameters: raw.parameters,
}),
(true, None, Some(result)) => Ok(ResponseWrapper::Success { result: result }),
_ => Err(D::Error::custom("ambiguous response")),
}
}
}
/// Directly mapped telegram API response.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Deserialize)]
pub struct
|
<T> {
/// If ‘ok’ equals true, the request was successful.
ok: bool,
/// Human-readable description of the result.
description: Option<String>,
/// Result of the query.
result: Option<T>,
/// Information about why a request was unsuccessful.
parameters: Option<ResponseParameters>,
}
/// Contains information about why a request was unsuccessful.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Deserialize)]
pub struct ResponseParameters {
/// The group has been migrated to a supergroup with the specified identifier.
pub migrate_to_chat_id: Option<Integer>,
/// In case of exceeding flood control, the number of seconds left to wait
/// before the request can be repeated.
pub retry_after: Option<Integer>,
}
|
RawResponse
|
identifier_name
|
blocks.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Block chunker and rebuilder tests.
use devtools::RandomTempPath;
use error::Error;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
use blockchain::BlockChain;
use snapshot::{chunk_blocks, BlockRebuilder, Error as SnapshotError, Progress};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use util::{Mutex, snappy};
use util::kvdb::{Database, DatabaseConfig};
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
fn
|
(amount: u64) {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let orig_path = RandomTempPath::create_dir();
let new_path = RandomTempPath::create_dir();
let mut snapshot_path = new_path.as_path().to_owned();
snapshot_path.push("SNAP");
let old_db = Arc::new(Database::open(&db_cfg, orig_path.as_str()).unwrap());
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
// build the blockchain.
let mut batch = old_db.transaction();
for _ in 0..amount {
let block = canon_chain.generate(&mut finalizer).unwrap();
bc.insert_block(&mut batch, &block, vec![]);
bc.commit();
}
old_db.write(batch).unwrap();
let best_hash = bc.best_block_hash();
// snapshot it.
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
let block_hashes = chunk_blocks(&bc, best_hash, &writer, &Progress::default()).unwrap();
let manifest = ::snapshot::ManifestData {
state_hashes: Vec::new(),
block_hashes: block_hashes,
state_root: ::util::sha3::SHA3_NULL_RLP,
block_number: amount,
block_hash: best_hash,
};
writer.into_inner().finish(manifest.clone()).unwrap();
// restore it.
let new_db = Arc::new(Database::open(&db_cfg, new_path.as_str()).unwrap());
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap();
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
let flag = AtomicBool::new(true);
for chunk_hash in &reader.manifest().block_hashes {
let compressed = reader.chunk(*chunk_hash).unwrap();
let chunk = snappy::decompress(&compressed).unwrap();
rebuilder.feed(&chunk, &engine, &flag).unwrap();
}
rebuilder.finalize(HashMap::new()).unwrap();
// and test it.
let new_chain = BlockChain::new(Default::default(), &genesis, new_db);
assert_eq!(new_chain.best_block_hash(), best_hash);
}
#[test]
fn chunk_and_restore_500() { chunk_and_restore(500) }
#[test]
fn chunk_and_restore_40k() { chunk_and_restore(40000) }
#[test]
fn checks_flag() {
use ::rlp::{RlpStream, Stream};
use util::H256;
let mut stream = RlpStream::new_list(5);
stream.append(&100u64)
.append(&H256::default())
.append(&(!0u64));
stream.append_empty_data().append_empty_data();
let genesis = {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
canon_chain.generate(&mut finalizer).unwrap()
};
let chunk = stream.out();
let path = RandomTempPath::create_dir();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let db = Arc::new(Database::open(&db_cfg, path.as_str()).unwrap());
let chain = BlockChain::new(Default::default(), &genesis, db.clone());
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
let manifest = ::snapshot::ManifestData {
state_hashes: Vec::new(),
block_hashes: Vec::new(),
state_root: ::util::sha3::SHA3_NULL_RLP,
block_number: 102,
block_hash: H256::default(),
};
let mut rebuilder = BlockRebuilder::new(chain, db.clone(), &manifest).unwrap();
match rebuilder.feed(&chunk, &engine, &AtomicBool::new(false)) {
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {}
_ => panic!("Wrong result on abort flag set")
}
}
|
chunk_and_restore
|
identifier_name
|
blocks.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Block chunker and rebuilder tests.
use devtools::RandomTempPath;
use error::Error;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
use blockchain::BlockChain;
use snapshot::{chunk_blocks, BlockRebuilder, Error as SnapshotError, Progress};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use util::{Mutex, snappy};
use util::kvdb::{Database, DatabaseConfig};
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
fn chunk_and_restore(amount: u64) {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let orig_path = RandomTempPath::create_dir();
let new_path = RandomTempPath::create_dir();
let mut snapshot_path = new_path.as_path().to_owned();
snapshot_path.push("SNAP");
let old_db = Arc::new(Database::open(&db_cfg, orig_path.as_str()).unwrap());
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
// build the blockchain.
let mut batch = old_db.transaction();
for _ in 0..amount {
let block = canon_chain.generate(&mut finalizer).unwrap();
bc.insert_block(&mut batch, &block, vec![]);
bc.commit();
}
old_db.write(batch).unwrap();
let best_hash = bc.best_block_hash();
// snapshot it.
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
let block_hashes = chunk_blocks(&bc, best_hash, &writer, &Progress::default()).unwrap();
let manifest = ::snapshot::ManifestData {
state_hashes: Vec::new(),
block_hashes: block_hashes,
state_root: ::util::sha3::SHA3_NULL_RLP,
block_number: amount,
block_hash: best_hash,
};
writer.into_inner().finish(manifest.clone()).unwrap();
// restore it.
let new_db = Arc::new(Database::open(&db_cfg, new_path.as_str()).unwrap());
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap();
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
let flag = AtomicBool::new(true);
for chunk_hash in &reader.manifest().block_hashes {
let compressed = reader.chunk(*chunk_hash).unwrap();
let chunk = snappy::decompress(&compressed).unwrap();
rebuilder.feed(&chunk, &engine, &flag).unwrap();
}
rebuilder.finalize(HashMap::new()).unwrap();
// and test it.
let new_chain = BlockChain::new(Default::default(), &genesis, new_db);
assert_eq!(new_chain.best_block_hash(), best_hash);
}
#[test]
fn chunk_and_restore_500()
|
#[test]
fn chunk_and_restore_40k() { chunk_and_restore(40000) }
#[test]
fn checks_flag() {
use ::rlp::{RlpStream, Stream};
use util::H256;
let mut stream = RlpStream::new_list(5);
stream.append(&100u64)
.append(&H256::default())
.append(&(!0u64));
stream.append_empty_data().append_empty_data();
let genesis = {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
canon_chain.generate(&mut finalizer).unwrap()
};
let chunk = stream.out();
let path = RandomTempPath::create_dir();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let db = Arc::new(Database::open(&db_cfg, path.as_str()).unwrap());
let chain = BlockChain::new(Default::default(), &genesis, db.clone());
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
let manifest = ::snapshot::ManifestData {
state_hashes: Vec::new(),
block_hashes: Vec::new(),
state_root: ::util::sha3::SHA3_NULL_RLP,
block_number: 102,
block_hash: H256::default(),
};
let mut rebuilder = BlockRebuilder::new(chain, db.clone(), &manifest).unwrap();
match rebuilder.feed(&chunk, &engine, &AtomicBool::new(false)) {
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {}
_ => panic!("Wrong result on abort flag set")
}
}
|
{ chunk_and_restore(500) }
|
identifier_body
|
blocks.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Block chunker and rebuilder tests.
use devtools::RandomTempPath;
use error::Error;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
use blockchain::BlockChain;
use snapshot::{chunk_blocks, BlockRebuilder, Error as SnapshotError, Progress};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use util::{Mutex, snappy};
use util::kvdb::{Database, DatabaseConfig};
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
fn chunk_and_restore(amount: u64) {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let orig_path = RandomTempPath::create_dir();
let new_path = RandomTempPath::create_dir();
let mut snapshot_path = new_path.as_path().to_owned();
snapshot_path.push("SNAP");
let old_db = Arc::new(Database::open(&db_cfg, orig_path.as_str()).unwrap());
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
// build the blockchain.
let mut batch = old_db.transaction();
for _ in 0..amount {
let block = canon_chain.generate(&mut finalizer).unwrap();
bc.insert_block(&mut batch, &block, vec![]);
bc.commit();
}
old_db.write(batch).unwrap();
let best_hash = bc.best_block_hash();
// snapshot it.
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
let block_hashes = chunk_blocks(&bc, best_hash, &writer, &Progress::default()).unwrap();
let manifest = ::snapshot::ManifestData {
state_hashes: Vec::new(),
block_hashes: block_hashes,
state_root: ::util::sha3::SHA3_NULL_RLP,
block_number: amount,
block_hash: best_hash,
};
writer.into_inner().finish(manifest.clone()).unwrap();
// restore it.
let new_db = Arc::new(Database::open(&db_cfg, new_path.as_str()).unwrap());
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap();
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
let flag = AtomicBool::new(true);
for chunk_hash in &reader.manifest().block_hashes {
let compressed = reader.chunk(*chunk_hash).unwrap();
let chunk = snappy::decompress(&compressed).unwrap();
rebuilder.feed(&chunk, &engine, &flag).unwrap();
}
rebuilder.finalize(HashMap::new()).unwrap();
// and test it.
let new_chain = BlockChain::new(Default::default(), &genesis, new_db);
assert_eq!(new_chain.best_block_hash(), best_hash);
}
#[test]
fn chunk_and_restore_500() { chunk_and_restore(500) }
#[test]
fn chunk_and_restore_40k() { chunk_and_restore(40000) }
|
fn checks_flag() {
use ::rlp::{RlpStream, Stream};
use util::H256;
let mut stream = RlpStream::new_list(5);
stream.append(&100u64)
.append(&H256::default())
.append(&(!0u64));
stream.append_empty_data().append_empty_data();
let genesis = {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
canon_chain.generate(&mut finalizer).unwrap()
};
let chunk = stream.out();
let path = RandomTempPath::create_dir();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let db = Arc::new(Database::open(&db_cfg, path.as_str()).unwrap());
let chain = BlockChain::new(Default::default(), &genesis, db.clone());
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
let manifest = ::snapshot::ManifestData {
state_hashes: Vec::new(),
block_hashes: Vec::new(),
state_root: ::util::sha3::SHA3_NULL_RLP,
block_number: 102,
block_hash: H256::default(),
};
let mut rebuilder = BlockRebuilder::new(chain, db.clone(), &manifest).unwrap();
match rebuilder.feed(&chunk, &engine, &AtomicBool::new(false)) {
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {}
_ => panic!("Wrong result on abort flag set")
}
}
|
#[test]
|
random_line_split
|
main.rs
|
/*
Copyright © 2021 Alastair Feille
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
SPDX-License-Identifier: MPL-2.0
*/
use std::{env,
fs,
io,
io::Write,
path::PathBuf,
process,
process::Command};
use chrono::Local;
mod history;
fn m
|
)
{
let session_time = Local::now();
let mut fallback_mode = false;
ctrlc::set_handler(|| ()).expect("Error setting Ctrl-C handler");
loop
{
let cwd: String = env::current_dir().map(|p| {
// Attempt to canonicalize the path else return "???"
fs::canonicalize(p).unwrap_or("???".into())
.to_string_lossy()
.into()
})
// Attempt to get the current path else return "???"
.unwrap_or("???".into());
// Print prompt
print!("{}{}@{}:{}> ",
if fallback_mode { "(bash) " } else { "" },
whoami::username(),
whoami::hostname(),
cwd);
io::stdout().flush().expect("failed to print prompt");
// Read in line
let mut line = String::new();
io::stdin().read_line(&mut line)
.expect("failed to read from stdin");
let original_line = line.clone();
let line = line.trim();
// Skip empty lines
if line.is_empty()
{
continue;
}
if line == "\u{0014}"
{
// toggle fallback mode
fallback_mode =!fallback_mode;
continue;
}
// Split line into command and arguments
let tokens: Vec<&str> = line.split_whitespace().collect();
let (head, args) = tokens.split_at(1);
if let Some(cmd) = head.get(0)
{
// If the line doesn't start with a space
if!original_line.starts_with(char::is_whitespace)
{
history::log(session_time,
Local::now(),
line.to_string(),
env::current_dir().ok(),
fallback_mode);
}
if cmd.to_string() == "exit"
{
process::exit(0);
}
if cmd.to_string() == "cd"
{
let path_string = args.join(" ");
// if no directory is given
let p = if path_string.trim().is_empty()
{
// change into the home directory
dirs::home_dir().expect("can't get home directory")
}
else
{
// use the given directory
PathBuf::from(path_string)
};
if let Err(e) = env::set_current_dir(&p)
{
eprintln!("mush: cd: {}: {}", p.display(), e);
}
continue;
}
let (cmd, args) = if fallback_mode
{
let aliases =
String::from_utf8_lossy(
&Command::new("bash").args(vec![
"-c",
"source ~/.bash_aliases 2> /dev/null; alias",
])
.output()
.expect("failed to load aliases")
.stdout,
).to_string();
(&"bash",
vec!["-c".to_string(),
format!("shopt -s expand_aliases\n{}\n{}", aliases, line)])
}
else
{
let args: Vec<String> = args.iter().map(|s| s.to_string()).collect();
(cmd, args)
};
match Command::new(cmd).args(args).spawn()
{
Ok(mut child) =>
{
let _ecode = child.wait().expect("failed to wait on child");
},
Err(e) => eprintln!("mush: could not run command {}: {}", cmd, e),
}
}
}
}
|
ain(
|
identifier_name
|
main.rs
|
/*
Copyright © 2021 Alastair Feille
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
SPDX-License-Identifier: MPL-2.0
*/
use std::{env,
fs,
io,
io::Write,
path::PathBuf,
process,
process::Command};
use chrono::Local;
mod history;
fn main()
{
|
whoami::hostname(),
cwd);
io::stdout().flush().expect("failed to print prompt");
// Read in line
let mut line = String::new();
io::stdin().read_line(&mut line)
.expect("failed to read from stdin");
let original_line = line.clone();
let line = line.trim();
// Skip empty lines
if line.is_empty()
{
continue;
}
if line == "\u{0014}"
{
// toggle fallback mode
fallback_mode =!fallback_mode;
continue;
}
// Split line into command and arguments
let tokens: Vec<&str> = line.split_whitespace().collect();
let (head, args) = tokens.split_at(1);
if let Some(cmd) = head.get(0)
{
// If the line doesn't start with a space
if!original_line.starts_with(char::is_whitespace)
{
history::log(session_time,
Local::now(),
line.to_string(),
env::current_dir().ok(),
fallback_mode);
}
if cmd.to_string() == "exit"
{
process::exit(0);
}
if cmd.to_string() == "cd"
{
let path_string = args.join(" ");
// if no directory is given
let p = if path_string.trim().is_empty()
{
// change into the home directory
dirs::home_dir().expect("can't get home directory")
}
else
{
// use the given directory
PathBuf::from(path_string)
};
if let Err(e) = env::set_current_dir(&p)
{
eprintln!("mush: cd: {}: {}", p.display(), e);
}
continue;
}
let (cmd, args) = if fallback_mode
{
let aliases =
String::from_utf8_lossy(
&Command::new("bash").args(vec![
"-c",
"source ~/.bash_aliases 2> /dev/null; alias",
])
.output()
.expect("failed to load aliases")
.stdout,
).to_string();
(&"bash",
vec!["-c".to_string(),
format!("shopt -s expand_aliases\n{}\n{}", aliases, line)])
}
else
{
let args: Vec<String> = args.iter().map(|s| s.to_string()).collect();
(cmd, args)
};
match Command::new(cmd).args(args).spawn()
{
Ok(mut child) =>
{
let _ecode = child.wait().expect("failed to wait on child");
},
Err(e) => eprintln!("mush: could not run command {}: {}", cmd, e),
}
}
}
}
|
let session_time = Local::now();
let mut fallback_mode = false;
ctrlc::set_handler(|| ()).expect("Error setting Ctrl-C handler");
loop
{
let cwd: String = env::current_dir().map(|p| {
// Attempt to canonicalize the path else return "???"
fs::canonicalize(p).unwrap_or("???".into())
.to_string_lossy()
.into()
})
// Attempt to get the current path else return "???"
.unwrap_or("???".into());
// Print prompt
print!("{}{}@{}:{}> ",
if fallback_mode { "(bash) " } else { "" },
whoami::username(),
|
identifier_body
|
main.rs
|
/*
Copyright © 2021 Alastair Feille
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
SPDX-License-Identifier: MPL-2.0
*/
use std::{env,
fs,
io,
io::Write,
path::PathBuf,
process,
process::Command};
use chrono::Local;
mod history;
fn main()
{
let session_time = Local::now();
let mut fallback_mode = false;
ctrlc::set_handler(|| ()).expect("Error setting Ctrl-C handler");
loop
{
let cwd: String = env::current_dir().map(|p| {
// Attempt to canonicalize the path else return "???"
fs::canonicalize(p).unwrap_or("???".into())
.to_string_lossy()
.into()
})
// Attempt to get the current path else return "???"
.unwrap_or("???".into());
// Print prompt
print!("{}{}@{}:{}> ",
if fallback_mode { "(bash) " } else { "" },
whoami::username(),
whoami::hostname(),
cwd);
io::stdout().flush().expect("failed to print prompt");
// Read in line
let mut line = String::new();
io::stdin().read_line(&mut line)
.expect("failed to read from stdin");
let original_line = line.clone();
let line = line.trim();
// Skip empty lines
if line.is_empty()
{
continue;
}
if line == "\u{0014}"
{
// toggle fallback mode
fallback_mode =!fallback_mode;
continue;
}
// Split line into command and arguments
let tokens: Vec<&str> = line.split_whitespace().collect();
let (head, args) = tokens.split_at(1);
if let Some(cmd) = head.get(0)
{
// If the line doesn't start with a space
if!original_line.starts_with(char::is_whitespace)
{
history::log(session_time,
Local::now(),
line.to_string(),
env::current_dir().ok(),
fallback_mode);
}
if cmd.to_string() == "exit"
{
process::exit(0);
}
if cmd.to_string() == "cd"
{
let path_string = args.join(" ");
// if no directory is given
let p = if path_string.trim().is_empty()
{
// change into the home directory
dirs::home_dir().expect("can't get home directory")
}
else
{
// use the given directory
PathBuf::from(path_string)
};
if let Err(e) = env::set_current_dir(&p)
{
eprintln!("mush: cd: {}: {}", p.display(), e);
}
continue;
}
let (cmd, args) = if fallback_mode
{
let aliases =
String::from_utf8_lossy(
&Command::new("bash").args(vec![
"-c",
"source ~/.bash_aliases 2> /dev/null; alias",
|
).to_string();
(&"bash",
vec!["-c".to_string(),
format!("shopt -s expand_aliases\n{}\n{}", aliases, line)])
}
else
{
let args: Vec<String> = args.iter().map(|s| s.to_string()).collect();
(cmd, args)
};
match Command::new(cmd).args(args).spawn()
{
Ok(mut child) =>
{
let _ecode = child.wait().expect("failed to wait on child");
},
Err(e) => eprintln!("mush: could not run command {}: {}", cmd, e),
}
}
}
}
|
])
.output()
.expect("failed to load aliases")
.stdout,
|
random_line_split
|
raw.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
use crate::common::*;
use crate::user;
use chrono;
use serde::Deserialize;
use super::DMEntities;
#[derive(Debug, Deserialize)]
pub struct RawDirectMessage {
///Numeric ID for this DM.
pub id: u64,
///UTC timestamp from when this DM was created.
#[serde(deserialize_with = "deserialize_datetime")]
pub created_at: chrono::DateTime<chrono::Utc>,
///The text of the DM.
pub text: String,
///Link, hashtag, and user mention information parsed out of the DM.
pub entities: DMEntities,
///The screen name of the user who sent the DM.
pub sender_screen_name: String,
///The ID of the user who sent the DM.
pub sender_id: u64,
|
///The screen name of the user who received the DM.
pub recipient_screen_name: String,
///The ID of the user who received the DM.
pub recipient_id: u64,
///Full information for the user who received the DM.
pub recipient: Box<user::TwitterUser>,
}
|
///Full information of the user who sent the DM.
pub sender: Box<user::TwitterUser>,
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.