file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
advapi32.rs
|
// Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
#![feature(test)]
#![cfg(windows)]
extern crate advapi32;
extern crate test;
use advapi32::*;
use test::black_box as bb;
#[test]
fn functions() {
|
bb(QueryServiceStatusEx);
bb(RegCloseKey);
bb(RegConnectRegistryA);
bb(RegConnectRegistryW);
bb(RegCopyTreeA);
bb(RegCopyTreeW);
bb(RegCreateKeyExA);
bb(RegCreateKeyExW);
bb(RegDeleteKeyA);
bb(RegDeleteKeyExA);
bb(RegDeleteKeyExW);
bb(RegDeleteKeyValueA);
bb(RegDeleteKeyValueW);
bb(RegDeleteKeyW);
bb(RegDeleteTreeA);
bb(RegDeleteTreeW);
bb(RegDeleteValueA);
bb(RegDeleteValueW);
bb(RegDisablePredefinedCache);
bb(RegDisablePredefinedCacheEx);
bb(RegDisableReflectionKey);
bb(RegEnableReflectionKey);
bb(RegEnumKeyExA);
bb(RegEnumKeyExW);
bb(RegEnumValueA);
bb(RegEnumValueW);
bb(RegFlushKey);
bb(RegGetValueA);
bb(RegGetValueW);
bb(RegLoadMUIStringW);
bb(RegNotifyChangeKeyValue);
bb(RegOpenCurrentUser);
bb(RegOpenKeyExA);
bb(RegOpenKeyExW);
bb(RegOpenUserClassesRoot);
bb(RegOverridePredefKey);
bb(RegQueryInfoKeyA);
bb(RegQueryInfoKeyW);
bb(RegQueryMultipleValuesA);
bb(RegQueryMultipleValuesW);
bb(RegQueryReflectionKey);
bb(RegQueryValueExA);
bb(RegQueryValueExW);
bb(RegSetKeyValueA);
bb(RegSetValueExA);
bb(RegSetValueExW);
bb(RegSetKeyValueW);
bb(RegisterServiceCtrlHandlerA);
bb(RegisterServiceCtrlHandlerExA);
bb(RegisterServiceCtrlHandlerExW);
bb(RegisterServiceCtrlHandlerW);
bb(SetServiceStatus);
bb(StartServiceCtrlDispatcherA);
bb(StartServiceCtrlDispatcherW);
}
|
bb(AdjustTokenPrivileges);
bb(CloseServiceHandle);
bb(ControlService);
bb(CreateServiceA);
bb(CreateServiceW);
bb(CryptAcquireContextA);
bb(CryptAcquireContextW);
bb(CryptCreateHash);
bb(CryptDestroyHash);
bb(CryptGetHashParam);
bb(CryptHashData);
bb(CryptReleaseContext);
bb(DeleteService);
bb(OpenProcessToken);
bb(OpenSCManagerA);
bb(OpenSCManagerW);
bb(OpenServiceA);
bb(OpenServiceW);
bb(QueryServiceStatus);
|
identifier_body
|
advapi32.rs
|
// Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
#![feature(test)]
#![cfg(windows)]
extern crate advapi32;
extern crate test;
use advapi32::*;
use test::black_box as bb;
#[test]
fn functions() {
bb(AdjustTokenPrivileges);
bb(CloseServiceHandle);
bb(ControlService);
bb(CreateServiceA);
bb(CreateServiceW);
bb(CryptAcquireContextA);
bb(CryptAcquireContextW);
bb(CryptCreateHash);
bb(CryptDestroyHash);
bb(CryptGetHashParam);
bb(CryptHashData);
bb(CryptReleaseContext);
bb(DeleteService);
bb(OpenProcessToken);
bb(OpenSCManagerA);
bb(OpenSCManagerW);
bb(OpenServiceA);
bb(OpenServiceW);
bb(QueryServiceStatus);
bb(QueryServiceStatusEx);
bb(RegCloseKey);
bb(RegConnectRegistryA);
bb(RegConnectRegistryW);
bb(RegCopyTreeA);
bb(RegCopyTreeW);
bb(RegCreateKeyExA);
bb(RegCreateKeyExW);
bb(RegDeleteKeyA);
bb(RegDeleteKeyExA);
bb(RegDeleteKeyExW);
bb(RegDeleteKeyValueA);
bb(RegDeleteKeyValueW);
bb(RegDeleteKeyW);
bb(RegDeleteTreeA);
bb(RegDeleteTreeW);
bb(RegDeleteValueA);
bb(RegDeleteValueW);
bb(RegDisablePredefinedCache);
bb(RegDisablePredefinedCacheEx);
bb(RegDisableReflectionKey);
bb(RegEnableReflectionKey);
bb(RegEnumKeyExA);
bb(RegEnumKeyExW);
bb(RegEnumValueA);
bb(RegEnumValueW);
bb(RegFlushKey);
bb(RegGetValueA);
bb(RegGetValueW);
bb(RegLoadMUIStringW);
bb(RegNotifyChangeKeyValue);
bb(RegOpenCurrentUser);
bb(RegOpenKeyExA);
bb(RegOpenKeyExW);
bb(RegOpenUserClassesRoot);
bb(RegOverridePredefKey);
|
bb(RegQueryMultipleValuesA);
bb(RegQueryMultipleValuesW);
bb(RegQueryReflectionKey);
bb(RegQueryValueExA);
bb(RegQueryValueExW);
bb(RegSetKeyValueA);
bb(RegSetValueExA);
bb(RegSetValueExW);
bb(RegSetKeyValueW);
bb(RegisterServiceCtrlHandlerA);
bb(RegisterServiceCtrlHandlerExA);
bb(RegisterServiceCtrlHandlerExW);
bb(RegisterServiceCtrlHandlerW);
bb(SetServiceStatus);
bb(StartServiceCtrlDispatcherA);
bb(StartServiceCtrlDispatcherW);
}
|
bb(RegQueryInfoKeyA);
bb(RegQueryInfoKeyW);
|
random_line_split
|
basic.rs
|
#![feature(core)]
extern crate cql_ffi_safe;
extern crate cql_ffi;
use cql_ffi_safe::*;
#[derive(Debug,Copy,PartialEq)]
struct Basic {
bln:bool,
flt:f32,
dbl:f64,
i32:i32,
i64:i64,
}
static INSERT_QUERY_CMD:&'static str = "INSERT INTO examples.basic (key, bln, flt, dbl, i32, i64) VALUES (?,?,?,?,?,?);";
static SELECT_QUERY_CMD:&'static str = "SELECT * FROM examples.basic WHERE key =?";
static CREATE_KEYSPACE_CMD:&'static str = "CREATE KEYSPACE IF NOT EXISTS examples WITH replication = { 'class': 'SimpleStrategy','replication_factor': '3' };";
static CREATE_TABLE_CMD:&'static str = "CREATE TABLE IF NOT EXISTS examples.basic (key text, bln boolean, flt float, dbl double, i32 int, i64 bigint, PRIMARY KEY (key));";
static CONTACT_POINTS:&'static str = "127.0.0.1,127.0.0.2,127.0.0.3";
fn execute_query(session: &mut CassSession, query: &str) -> Result<(),CassError> {
let statement = CassStatement::new(query,0);
let mut future = session.execute(statement);
future.wait();
future.error_code()
}
fn insert_into_basic(session:&mut CassSession, key:&str, basic:Basic) -> Result<(),CassError> {
use cql_ffi_safe::CassBindable::*;
let query=INSERT_QUERY_CMD;
let mut statement = CassStatement::new(query, 7);
//FIXME with a macro that automatically does this for an arbitrary struct
try!(statement.bind_all(vec!(
STR(key.to_string()),
BOOL(basic.bln),
F32(basic.flt),
F64(basic.dbl),
I32(basic.i32),
|
future.wait();
try!(future.error_code());
Ok(())
}
fn select_from_basic(session:&mut CassSession, key:&str) -> Result<Basic,CassError> {
let query = SELECT_QUERY_CMD;
let mut statement = CassStatement::new(query, 1);
try!(statement.bind_string(0, key));
let result_iter = session.execute(statement)
.wait()
.get_result()
.unwrap()
.iter();
for row in result_iter {
return Ok(Basic{
//FIXME use FromCol once https://github.com/rust-lang/rust/issues/22037 is fixed
bln:try!(row.get_column(1).get_bool()),
dbl:try!(row.get_column(2).get_double()),
flt:try!(row.get_column(3).get_float()),
i32:try!(row.get_column(4).get_int32()),
i64:try!(row.get_column(5).get_int64())
});
}
panic!("no results");
}
fn main() {
match CassCluster::new().set_contact_points(CONTACT_POINTS) {
Err(err) => panic!("err: {:?}", err),
Ok(cluster) => {
let mut session = CassSession::new();
let input = Basic{bln:true, flt:0.001f32, dbl:0.0002f64, i32:1, i64:2 };
session.connect(cluster).wait();
session.execute(CassStatement::new(CREATE_KEYSPACE_CMD, 0));
match execute_query(&mut session, CREATE_TABLE_CMD) {
Ok(_) => {}
Err(err) => panic!("err: {:?}", err),
}
match insert_into_basic(&mut session,"test", input) {
Ok(response) => println!("insert response {:?}",response),
Err(err) => {panic!("insert err {:?}",err)}
}
match select_from_basic(&mut session,"test") {
Ok(output) => assert!(input == output),
Err(err) => panic!("select err: {:?}", err),
}
session.close().wait();
}
}
}
|
I64(basic.i64)
)));
let mut future = session.execute(statement);
|
random_line_split
|
basic.rs
|
#![feature(core)]
extern crate cql_ffi_safe;
extern crate cql_ffi;
use cql_ffi_safe::*;
#[derive(Debug,Copy,PartialEq)]
struct
|
{
bln:bool,
flt:f32,
dbl:f64,
i32:i32,
i64:i64,
}
static INSERT_QUERY_CMD:&'static str = "INSERT INTO examples.basic (key, bln, flt, dbl, i32, i64) VALUES (?,?,?,?,?,?);";
static SELECT_QUERY_CMD:&'static str = "SELECT * FROM examples.basic WHERE key =?";
static CREATE_KEYSPACE_CMD:&'static str = "CREATE KEYSPACE IF NOT EXISTS examples WITH replication = { 'class': 'SimpleStrategy','replication_factor': '3' };";
static CREATE_TABLE_CMD:&'static str = "CREATE TABLE IF NOT EXISTS examples.basic (key text, bln boolean, flt float, dbl double, i32 int, i64 bigint, PRIMARY KEY (key));";
static CONTACT_POINTS:&'static str = "127.0.0.1,127.0.0.2,127.0.0.3";
fn execute_query(session: &mut CassSession, query: &str) -> Result<(),CassError> {
let statement = CassStatement::new(query,0);
let mut future = session.execute(statement);
future.wait();
future.error_code()
}
fn insert_into_basic(session:&mut CassSession, key:&str, basic:Basic) -> Result<(),CassError> {
use cql_ffi_safe::CassBindable::*;
let query=INSERT_QUERY_CMD;
let mut statement = CassStatement::new(query, 7);
//FIXME with a macro that automatically does this for an arbitrary struct
try!(statement.bind_all(vec!(
STR(key.to_string()),
BOOL(basic.bln),
F32(basic.flt),
F64(basic.dbl),
I32(basic.i32),
I64(basic.i64)
)));
let mut future = session.execute(statement);
future.wait();
try!(future.error_code());
Ok(())
}
fn select_from_basic(session:&mut CassSession, key:&str) -> Result<Basic,CassError> {
let query = SELECT_QUERY_CMD;
let mut statement = CassStatement::new(query, 1);
try!(statement.bind_string(0, key));
let result_iter = session.execute(statement)
.wait()
.get_result()
.unwrap()
.iter();
for row in result_iter {
return Ok(Basic{
//FIXME use FromCol once https://github.com/rust-lang/rust/issues/22037 is fixed
bln:try!(row.get_column(1).get_bool()),
dbl:try!(row.get_column(2).get_double()),
flt:try!(row.get_column(3).get_float()),
i32:try!(row.get_column(4).get_int32()),
i64:try!(row.get_column(5).get_int64())
});
}
panic!("no results");
}
fn main() {
match CassCluster::new().set_contact_points(CONTACT_POINTS) {
Err(err) => panic!("err: {:?}", err),
Ok(cluster) => {
let mut session = CassSession::new();
let input = Basic{bln:true, flt:0.001f32, dbl:0.0002f64, i32:1, i64:2 };
session.connect(cluster).wait();
session.execute(CassStatement::new(CREATE_KEYSPACE_CMD, 0));
match execute_query(&mut session, CREATE_TABLE_CMD) {
Ok(_) => {}
Err(err) => panic!("err: {:?}", err),
}
match insert_into_basic(&mut session,"test", input) {
Ok(response) => println!("insert response {:?}",response),
Err(err) => {panic!("insert err {:?}",err)}
}
match select_from_basic(&mut session,"test") {
Ok(output) => assert!(input == output),
Err(err) => panic!("select err: {:?}", err),
}
session.close().wait();
}
}
}
|
Basic
|
identifier_name
|
basic.rs
|
#![feature(core)]
extern crate cql_ffi_safe;
extern crate cql_ffi;
use cql_ffi_safe::*;
#[derive(Debug,Copy,PartialEq)]
struct Basic {
bln:bool,
flt:f32,
dbl:f64,
i32:i32,
i64:i64,
}
static INSERT_QUERY_CMD:&'static str = "INSERT INTO examples.basic (key, bln, flt, dbl, i32, i64) VALUES (?,?,?,?,?,?);";
static SELECT_QUERY_CMD:&'static str = "SELECT * FROM examples.basic WHERE key =?";
static CREATE_KEYSPACE_CMD:&'static str = "CREATE KEYSPACE IF NOT EXISTS examples WITH replication = { 'class': 'SimpleStrategy','replication_factor': '3' };";
static CREATE_TABLE_CMD:&'static str = "CREATE TABLE IF NOT EXISTS examples.basic (key text, bln boolean, flt float, dbl double, i32 int, i64 bigint, PRIMARY KEY (key));";
static CONTACT_POINTS:&'static str = "127.0.0.1,127.0.0.2,127.0.0.3";
fn execute_query(session: &mut CassSession, query: &str) -> Result<(),CassError> {
let statement = CassStatement::new(query,0);
let mut future = session.execute(statement);
future.wait();
future.error_code()
}
fn insert_into_basic(session:&mut CassSession, key:&str, basic:Basic) -> Result<(),CassError> {
use cql_ffi_safe::CassBindable::*;
let query=INSERT_QUERY_CMD;
let mut statement = CassStatement::new(query, 7);
//FIXME with a macro that automatically does this for an arbitrary struct
try!(statement.bind_all(vec!(
STR(key.to_string()),
BOOL(basic.bln),
F32(basic.flt),
F64(basic.dbl),
I32(basic.i32),
I64(basic.i64)
)));
let mut future = session.execute(statement);
future.wait();
try!(future.error_code());
Ok(())
}
fn select_from_basic(session:&mut CassSession, key:&str) -> Result<Basic,CassError> {
let query = SELECT_QUERY_CMD;
let mut statement = CassStatement::new(query, 1);
try!(statement.bind_string(0, key));
let result_iter = session.execute(statement)
.wait()
.get_result()
.unwrap()
.iter();
for row in result_iter {
return Ok(Basic{
//FIXME use FromCol once https://github.com/rust-lang/rust/issues/22037 is fixed
bln:try!(row.get_column(1).get_bool()),
dbl:try!(row.get_column(2).get_double()),
flt:try!(row.get_column(3).get_float()),
i32:try!(row.get_column(4).get_int32()),
i64:try!(row.get_column(5).get_int64())
});
}
panic!("no results");
}
fn main()
|
}
session.close().wait();
}
}
}
|
{
match CassCluster::new().set_contact_points(CONTACT_POINTS) {
Err(err) => panic!("err: {:?}", err),
Ok(cluster) => {
let mut session = CassSession::new();
let input = Basic{bln:true, flt:0.001f32, dbl:0.0002f64, i32:1, i64:2 };
session.connect(cluster).wait();
session.execute(CassStatement::new(CREATE_KEYSPACE_CMD, 0));
match execute_query(&mut session, CREATE_TABLE_CMD) {
Ok(_) => {}
Err(err) => panic!("err: {:?}", err),
}
match insert_into_basic(&mut session,"test", input) {
Ok(response) => println!("insert response {:?}",response),
Err(err) => {panic!("insert err {:?}",err)}
}
match select_from_basic(&mut session,"test") {
Ok(output) => assert!(input == output),
Err(err) => panic!("select err: {:?}", err),
|
identifier_body
|
buffer.rs
|
use std::default::Default;
#[derive(Default)]
pub struct Cursor {
x: u32,
y: u32,
}
#[derive(Default)]
pub struct Buffer {
x: u32,
y: u32,
contents: Vec<String>,
}
impl Buffer {
pub fn new() -> Buffer {Buffer{x: 0, y: 0, contents: vec![String::new()]}}
pub fn insert(&mut self, character: char) {
self.contents[self.y as usize].insert(self.x as usize, character);
self.move_right();
}
pub fn back_space(&mut self) {
self.contents[self.y as usize].remove(self.x as usize);
self.move_left();
}
pub fn
|
(&self) -> i32 {self.x as i32}
pub fn get_y(&self) -> i32 {self.y as i32}
pub fn get_contents(&self) -> String {self.contents.connect("\n")}
pub fn insert_line(&mut self) {
self.contents.insert(self.y as usize, String::new());
self.move_down();
}
pub fn move_up(&mut self) {
if self.y > 0 {
self.y -= 1;
}
}
pub fn move_down(&mut self) {
let length = self.contents.len().to_string();
self.contents[0].push_str(&length);
if self.y + 1 < self.contents.len() as u32 {
self.y += 1;
}
}
pub fn move_left(&mut self) {
if self.x > 0 {self.x -= 1}
}
pub fn move_right(&mut self) {
if self.x + 1 < self.contents[self.y as usize].len() as u32 {
self.x += 1;
}
}
}
|
get_x
|
identifier_name
|
buffer.rs
|
use std::default::Default;
#[derive(Default)]
pub struct Cursor {
x: u32,
y: u32,
}
#[derive(Default)]
pub struct Buffer {
x: u32,
y: u32,
contents: Vec<String>,
}
|
pub fn insert(&mut self, character: char) {
self.contents[self.y as usize].insert(self.x as usize, character);
self.move_right();
}
pub fn back_space(&mut self) {
self.contents[self.y as usize].remove(self.x as usize);
self.move_left();
}
pub fn get_x(&self) -> i32 {self.x as i32}
pub fn get_y(&self) -> i32 {self.y as i32}
pub fn get_contents(&self) -> String {self.contents.connect("\n")}
pub fn insert_line(&mut self) {
self.contents.insert(self.y as usize, String::new());
self.move_down();
}
pub fn move_up(&mut self) {
if self.y > 0 {
self.y -= 1;
}
}
pub fn move_down(&mut self) {
let length = self.contents.len().to_string();
self.contents[0].push_str(&length);
if self.y + 1 < self.contents.len() as u32 {
self.y += 1;
}
}
pub fn move_left(&mut self) {
if self.x > 0 {self.x -= 1}
}
pub fn move_right(&mut self) {
if self.x + 1 < self.contents[self.y as usize].len() as u32 {
self.x += 1;
}
}
}
|
impl Buffer {
pub fn new() -> Buffer {Buffer{x: 0, y: 0, contents: vec![String::new()]}}
|
random_line_split
|
buffer.rs
|
use std::default::Default;
#[derive(Default)]
pub struct Cursor {
x: u32,
y: u32,
}
#[derive(Default)]
pub struct Buffer {
x: u32,
y: u32,
contents: Vec<String>,
}
impl Buffer {
pub fn new() -> Buffer {Buffer{x: 0, y: 0, contents: vec![String::new()]}}
pub fn insert(&mut self, character: char) {
self.contents[self.y as usize].insert(self.x as usize, character);
self.move_right();
}
pub fn back_space(&mut self) {
self.contents[self.y as usize].remove(self.x as usize);
self.move_left();
}
pub fn get_x(&self) -> i32 {self.x as i32}
pub fn get_y(&self) -> i32 {self.y as i32}
pub fn get_contents(&self) -> String {self.contents.connect("\n")}
pub fn insert_line(&mut self) {
self.contents.insert(self.y as usize, String::new());
self.move_down();
}
pub fn move_up(&mut self) {
if self.y > 0
|
}
pub fn move_down(&mut self) {
let length = self.contents.len().to_string();
self.contents[0].push_str(&length);
if self.y + 1 < self.contents.len() as u32 {
self.y += 1;
}
}
pub fn move_left(&mut self) {
if self.x > 0 {self.x -= 1}
}
pub fn move_right(&mut self) {
if self.x + 1 < self.contents[self.y as usize].len() as u32 {
self.x += 1;
}
}
}
|
{
self.y -= 1;
}
|
conditional_block
|
buffer.rs
|
use std::default::Default;
#[derive(Default)]
pub struct Cursor {
x: u32,
y: u32,
}
#[derive(Default)]
pub struct Buffer {
x: u32,
y: u32,
contents: Vec<String>,
}
impl Buffer {
pub fn new() -> Buffer {Buffer{x: 0, y: 0, contents: vec![String::new()]}}
pub fn insert(&mut self, character: char) {
self.contents[self.y as usize].insert(self.x as usize, character);
self.move_right();
}
pub fn back_space(&mut self) {
self.contents[self.y as usize].remove(self.x as usize);
self.move_left();
}
pub fn get_x(&self) -> i32 {self.x as i32}
pub fn get_y(&self) -> i32 {self.y as i32}
pub fn get_contents(&self) -> String {self.contents.connect("\n")}
pub fn insert_line(&mut self)
|
pub fn move_up(&mut self) {
if self.y > 0 {
self.y -= 1;
}
}
pub fn move_down(&mut self) {
let length = self.contents.len().to_string();
self.contents[0].push_str(&length);
if self.y + 1 < self.contents.len() as u32 {
self.y += 1;
}
}
pub fn move_left(&mut self) {
if self.x > 0 {self.x -= 1}
}
pub fn move_right(&mut self) {
if self.x + 1 < self.contents[self.y as usize].len() as u32 {
self.x += 1;
}
}
}
|
{
self.contents.insert(self.y as usize, String::new());
self.move_down();
}
|
identifier_body
|
cfile.rs
|
use crate::SBError;
use std::fs::File;
use cpp::cpp;
#[cfg(unix)]
use std::os::unix::prelude::*;
#[cfg(windows)]
use std::os::windows::prelude::*;
#[repr(C)]
pub struct FILE;
// The returned FILE takes ownership of file's descriptor.
pub fn cfile_from_file(file: File, write: bool) -> Result<*mut FILE, SBError> {
#[cfg(unix)]
let fd = file.into_raw_fd() as isize;
#[cfg(windows)]
let fd = file.into_raw_handle() as isize;
let mut error = SBError::new();
let cfile = cpp!(unsafe [fd as "intptr_t", write as "bool", mut error as "SBError"] -> *mut FILE as "FILE*" {
FILE* cfile;
#ifdef _WIN32
cfile = fdopen(_open_osfhandle(fd, write? 0 : _O_RDONLY), write? "w" : "r");
#else
cfile = fdopen(fd, write? "w" : "r");
#endif
if (cfile) {
setvbuf(cfile, nullptr, _IOLBF, BUFSIZ);
int x = fileno(cfile);
if (x < 0)
return nullptr;
return cfile;
} else {
error.SetErrorToErrno();
return nullptr;
}
});
if!cfile.is_null()
|
else {
Err(error)
}
}
|
{
Ok(cfile)
}
|
conditional_block
|
cfile.rs
|
use crate::SBError;
use std::fs::File;
use cpp::cpp;
#[cfg(unix)]
use std::os::unix::prelude::*;
#[cfg(windows)]
use std::os::windows::prelude::*;
#[repr(C)]
|
#[cfg(unix)]
let fd = file.into_raw_fd() as isize;
#[cfg(windows)]
let fd = file.into_raw_handle() as isize;
let mut error = SBError::new();
let cfile = cpp!(unsafe [fd as "intptr_t", write as "bool", mut error as "SBError"] -> *mut FILE as "FILE*" {
FILE* cfile;
#ifdef _WIN32
cfile = fdopen(_open_osfhandle(fd, write? 0 : _O_RDONLY), write? "w" : "r");
#else
cfile = fdopen(fd, write? "w" : "r");
#endif
if (cfile) {
setvbuf(cfile, nullptr, _IOLBF, BUFSIZ);
int x = fileno(cfile);
if (x < 0)
return nullptr;
return cfile;
} else {
error.SetErrorToErrno();
return nullptr;
}
});
if!cfile.is_null() {
Ok(cfile)
} else {
Err(error)
}
}
|
pub struct FILE;
// The returned FILE takes ownership of file's descriptor.
pub fn cfile_from_file(file: File, write: bool) -> Result<*mut FILE, SBError> {
|
random_line_split
|
cfile.rs
|
use crate::SBError;
use std::fs::File;
use cpp::cpp;
#[cfg(unix)]
use std::os::unix::prelude::*;
#[cfg(windows)]
use std::os::windows::prelude::*;
#[repr(C)]
pub struct
|
;
// The returned FILE takes ownership of file's descriptor.
pub fn cfile_from_file(file: File, write: bool) -> Result<*mut FILE, SBError> {
#[cfg(unix)]
let fd = file.into_raw_fd() as isize;
#[cfg(windows)]
let fd = file.into_raw_handle() as isize;
let mut error = SBError::new();
let cfile = cpp!(unsafe [fd as "intptr_t", write as "bool", mut error as "SBError"] -> *mut FILE as "FILE*" {
FILE* cfile;
#ifdef _WIN32
cfile = fdopen(_open_osfhandle(fd, write? 0 : _O_RDONLY), write? "w" : "r");
#else
cfile = fdopen(fd, write? "w" : "r");
#endif
if (cfile) {
setvbuf(cfile, nullptr, _IOLBF, BUFSIZ);
int x = fileno(cfile);
if (x < 0)
return nullptr;
return cfile;
} else {
error.SetErrorToErrno();
return nullptr;
}
});
if!cfile.is_null() {
Ok(cfile)
} else {
Err(error)
}
}
|
FILE
|
identifier_name
|
cfile.rs
|
use crate::SBError;
use std::fs::File;
use cpp::cpp;
#[cfg(unix)]
use std::os::unix::prelude::*;
#[cfg(windows)]
use std::os::windows::prelude::*;
#[repr(C)]
pub struct FILE;
// The returned FILE takes ownership of file's descriptor.
pub fn cfile_from_file(file: File, write: bool) -> Result<*mut FILE, SBError>
|
} else {
error.SetErrorToErrno();
return nullptr;
}
});
if!cfile.is_null() {
Ok(cfile)
} else {
Err(error)
}
}
|
{
#[cfg(unix)]
let fd = file.into_raw_fd() as isize;
#[cfg(windows)]
let fd = file.into_raw_handle() as isize;
let mut error = SBError::new();
let cfile = cpp!(unsafe [fd as "intptr_t", write as "bool", mut error as "SBError"] -> *mut FILE as "FILE*" {
FILE* cfile;
#ifdef _WIN32
cfile = fdopen(_open_osfhandle(fd, write ? 0 : _O_RDONLY), write ? "w" : "r");
#else
cfile = fdopen(fd, write ? "w" : "r");
#endif
if (cfile) {
setvbuf(cfile, nullptr, _IOLBF, BUFSIZ);
int x = fileno(cfile);
if (x < 0)
return nullptr;
return cfile;
|
identifier_body
|
object-one-type-two-traits.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Testing creating two vtables with the same self type, but different
// traits.
#![allow(unknown_features)]
#![feature(box_syntax)]
use std::any::Any;
trait Wrap {
fn get(&self) -> int;
fn wrap(self: Box<Self>) -> Box<Any+'static>;
}
impl Wrap for int {
|
fn wrap(self: Box<int>) -> Box<Any+'static> {
self as Box<Any+'static>
}
}
fn is<T:'static>(x: &Any) -> bool {
x.is::<T>()
}
fn main() {
let x = box 22i as Box<Wrap>;
println!("x={}", x.get());
let y = x.wrap();
}
|
fn get(&self) -> int {
*self
}
|
random_line_split
|
object-one-type-two-traits.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Testing creating two vtables with the same self type, but different
// traits.
#![allow(unknown_features)]
#![feature(box_syntax)]
use std::any::Any;
trait Wrap {
fn get(&self) -> int;
fn wrap(self: Box<Self>) -> Box<Any+'static>;
}
impl Wrap for int {
fn get(&self) -> int {
*self
}
fn wrap(self: Box<int>) -> Box<Any+'static> {
self as Box<Any+'static>
}
}
fn is<T:'static>(x: &Any) -> bool
|
fn main() {
let x = box 22i as Box<Wrap>;
println!("x={}", x.get());
let y = x.wrap();
}
|
{
x.is::<T>()
}
|
identifier_body
|
object-one-type-two-traits.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Testing creating two vtables with the same self type, but different
// traits.
#![allow(unknown_features)]
#![feature(box_syntax)]
use std::any::Any;
trait Wrap {
fn get(&self) -> int;
fn wrap(self: Box<Self>) -> Box<Any+'static>;
}
impl Wrap for int {
fn get(&self) -> int {
*self
}
fn wrap(self: Box<int>) -> Box<Any+'static> {
self as Box<Any+'static>
}
}
fn
|
<T:'static>(x: &Any) -> bool {
x.is::<T>()
}
fn main() {
let x = box 22i as Box<Wrap>;
println!("x={}", x.get());
let y = x.wrap();
}
|
is
|
identifier_name
|
changelg.rs
|
extern crate git2;
extern crate regex;
use self::git2::{Repository, Commit};
use self::regex::Regex;
macro_rules! filter_option {
($e:expr) => (match $e { Ok(t) => t, Err(e) => return Some(Err(e)) })
}
macro_rules! option_match {
($e:expr) => (match $e { Ok(t) => t, Err(e) => panic!("err! - {}", e) })
}
// Todo: Return revwalk and defer iterating over comits to another function
pub fn get_commits(args: Vec<String>) {
let repo = option_match!(Repository::open("./"));
let mut revwalk = option_match!(repo.revwalk());
option_match!(
revwalk.push_range(
&commit_range(&args[1][..], &args[2][..])[..]
)
);
let revwalk = revwalk.filter_map(|id| {
let id = option_match!(id);
let commit = filter_option!(repo.find_commit(id));
Some(Ok(commit))
});
for commit in revwalk {
let commit = option_match!(commit);
print_to_stdout(&commit);
}
}
fn commit_range(commit_from: &str, commit_to: &str) -> String {
let mut commit_range = String::from("");
commit_range.push_str(commit_from);
commit_range.push_str("..");
commit_range.push_str(commit_to);
commit_range
}
fn print_to_stdout(commit: &Commit) {
for line in String::from_utf8_lossy(commit.message_bytes()).lines() {
if match_changelog_identifier(line) {
println!("{} by {}", strip_changelog_hashtag(line), commit.author());
};
};
}
fn match_changelog_identifier(line: &str) -> bool {
let re = Regex::new(r"^#changelog.*$").unwrap();
re.is_match(line)
}
fn strip_changelog_hashtag(commit_msg: &str) -> &str {
if commit_msg.to_lowercase().starts_with("#changelog ") {
return &commit_msg[11..].trim_left();
}
commit_msg
}
#[test]
#[should_panic]
fn
|
() {
fn something_bad_happens<T>() -> Result<T, &'static str> {
Err("Fail")
};
match something_bad_happens() {
Ok(t) => t,
Err(e) => panic!("err! - {}", e)
}
}
#[test]
fn it_should_return_result_on_ok_in_option_match() {
fn something_good_happens() -> Result<&'static str, &'static str> {
Ok("Good")
};
let result = match something_good_happens() {
Ok(t) => t,
Err(e) => panic!("err! - {}", e)
};
assert_eq!(result, "Good");
}
#[test]
fn it_should_return_expected_commit_range_string(){
let commit_range = commit_range("377d686351969f27f288dec2fb09d0d5431fcde1", "3763e0e3ff218cbdfbf99c68109a04d666e81abeto");
assert_eq!(commit_range, "377d686351969f27f288dec2fb09d0d5431fcde1..3763e0e3ff218cbdfbf99c68109a04d666e81abeto");
}
#[test]
fn it_should_return_true_when_a_string_is_tagged_changelog_(){
let result = match_changelog_identifier("#changelog Hello World");
assert_eq!(result, true);
}
#[test]
fn it_should_return_false_when_a_string_is_not_tagged_changelog_(){
let result = match_changelog_identifier("Hello World");
assert_eq!(result, false);
}
#[test]
fn it_should_return_message_without_hashtag() {
let result = strip_changelog_hashtag("#changelog This is a test commit message");
assert_eq!(result, "This is a test commit message");
}
#[test]
fn it_should_return_message_without_hashtag_and_surplus_whitespace() {
let result = strip_changelog_hashtag("#changelog This is a test commit message");
assert_eq!(result, "This is a test commit message");
}
#[test]
fn it_should_return_message_without_changes_if_not_changelog() {
let result = strip_changelog_hashtag("This is a test commit message without a changelog hashtag");
assert_eq!(result, "This is a test commit message without a changelog hashtag");
}
|
it_should_panic_on_error_in_option_match
|
identifier_name
|
changelg.rs
|
extern crate git2;
extern crate regex;
use self::git2::{Repository, Commit};
use self::regex::Regex;
macro_rules! filter_option {
($e:expr) => (match $e { Ok(t) => t, Err(e) => return Some(Err(e)) })
|
}
macro_rules! option_match {
($e:expr) => (match $e { Ok(t) => t, Err(e) => panic!("err! - {}", e) })
}
// Todo: Return revwalk and defer iterating over comits to another function
pub fn get_commits(args: Vec<String>) {
let repo = option_match!(Repository::open("./"));
let mut revwalk = option_match!(repo.revwalk());
option_match!(
revwalk.push_range(
&commit_range(&args[1][..], &args[2][..])[..]
)
);
let revwalk = revwalk.filter_map(|id| {
let id = option_match!(id);
let commit = filter_option!(repo.find_commit(id));
Some(Ok(commit))
});
for commit in revwalk {
let commit = option_match!(commit);
print_to_stdout(&commit);
}
}
fn commit_range(commit_from: &str, commit_to: &str) -> String {
let mut commit_range = String::from("");
commit_range.push_str(commit_from);
commit_range.push_str("..");
commit_range.push_str(commit_to);
commit_range
}
fn print_to_stdout(commit: &Commit) {
for line in String::from_utf8_lossy(commit.message_bytes()).lines() {
if match_changelog_identifier(line) {
println!("{} by {}", strip_changelog_hashtag(line), commit.author());
};
};
}
fn match_changelog_identifier(line: &str) -> bool {
let re = Regex::new(r"^#changelog.*$").unwrap();
re.is_match(line)
}
fn strip_changelog_hashtag(commit_msg: &str) -> &str {
if commit_msg.to_lowercase().starts_with("#changelog ") {
return &commit_msg[11..].trim_left();
}
commit_msg
}
#[test]
#[should_panic]
fn it_should_panic_on_error_in_option_match() {
fn something_bad_happens<T>() -> Result<T, &'static str> {
Err("Fail")
};
match something_bad_happens() {
Ok(t) => t,
Err(e) => panic!("err! - {}", e)
}
}
#[test]
fn it_should_return_result_on_ok_in_option_match() {
fn something_good_happens() -> Result<&'static str, &'static str> {
Ok("Good")
};
let result = match something_good_happens() {
Ok(t) => t,
Err(e) => panic!("err! - {}", e)
};
assert_eq!(result, "Good");
}
#[test]
fn it_should_return_expected_commit_range_string(){
let commit_range = commit_range("377d686351969f27f288dec2fb09d0d5431fcde1", "3763e0e3ff218cbdfbf99c68109a04d666e81abeto");
assert_eq!(commit_range, "377d686351969f27f288dec2fb09d0d5431fcde1..3763e0e3ff218cbdfbf99c68109a04d666e81abeto");
}
#[test]
fn it_should_return_true_when_a_string_is_tagged_changelog_(){
let result = match_changelog_identifier("#changelog Hello World");
assert_eq!(result, true);
}
#[test]
fn it_should_return_false_when_a_string_is_not_tagged_changelog_(){
let result = match_changelog_identifier("Hello World");
assert_eq!(result, false);
}
#[test]
fn it_should_return_message_without_hashtag() {
let result = strip_changelog_hashtag("#changelog This is a test commit message");
assert_eq!(result, "This is a test commit message");
}
#[test]
fn it_should_return_message_without_hashtag_and_surplus_whitespace() {
let result = strip_changelog_hashtag("#changelog This is a test commit message");
assert_eq!(result, "This is a test commit message");
}
#[test]
fn it_should_return_message_without_changes_if_not_changelog() {
let result = strip_changelog_hashtag("This is a test commit message without a changelog hashtag");
assert_eq!(result, "This is a test commit message without a changelog hashtag");
}
|
random_line_split
|
|
changelg.rs
|
extern crate git2;
extern crate regex;
use self::git2::{Repository, Commit};
use self::regex::Regex;
macro_rules! filter_option {
($e:expr) => (match $e { Ok(t) => t, Err(e) => return Some(Err(e)) })
}
macro_rules! option_match {
($e:expr) => (match $e { Ok(t) => t, Err(e) => panic!("err! - {}", e) })
}
// Todo: Return revwalk and defer iterating over comits to another function
pub fn get_commits(args: Vec<String>) {
let repo = option_match!(Repository::open("./"));
let mut revwalk = option_match!(repo.revwalk());
option_match!(
revwalk.push_range(
&commit_range(&args[1][..], &args[2][..])[..]
)
);
let revwalk = revwalk.filter_map(|id| {
let id = option_match!(id);
let commit = filter_option!(repo.find_commit(id));
Some(Ok(commit))
});
for commit in revwalk {
let commit = option_match!(commit);
print_to_stdout(&commit);
}
}
fn commit_range(commit_from: &str, commit_to: &str) -> String {
let mut commit_range = String::from("");
commit_range.push_str(commit_from);
commit_range.push_str("..");
commit_range.push_str(commit_to);
commit_range
}
fn print_to_stdout(commit: &Commit) {
for line in String::from_utf8_lossy(commit.message_bytes()).lines() {
if match_changelog_identifier(line) {
println!("{} by {}", strip_changelog_hashtag(line), commit.author());
};
};
}
fn match_changelog_identifier(line: &str) -> bool {
let re = Regex::new(r"^#changelog.*$").unwrap();
re.is_match(line)
}
fn strip_changelog_hashtag(commit_msg: &str) -> &str {
if commit_msg.to_lowercase().starts_with("#changelog ") {
return &commit_msg[11..].trim_left();
}
commit_msg
}
#[test]
#[should_panic]
fn it_should_panic_on_error_in_option_match() {
fn something_bad_happens<T>() -> Result<T, &'static str> {
Err("Fail")
};
match something_bad_happens() {
Ok(t) => t,
Err(e) => panic!("err! - {}", e)
}
}
#[test]
fn it_should_return_result_on_ok_in_option_match() {
fn something_good_happens() -> Result<&'static str, &'static str> {
Ok("Good")
};
let result = match something_good_happens() {
Ok(t) => t,
Err(e) => panic!("err! - {}", e)
};
assert_eq!(result, "Good");
}
#[test]
fn it_should_return_expected_commit_range_string()
|
#[test]
fn it_should_return_true_when_a_string_is_tagged_changelog_(){
let result = match_changelog_identifier("#changelog Hello World");
assert_eq!(result, true);
}
#[test]
fn it_should_return_false_when_a_string_is_not_tagged_changelog_(){
let result = match_changelog_identifier("Hello World");
assert_eq!(result, false);
}
#[test]
fn it_should_return_message_without_hashtag() {
let result = strip_changelog_hashtag("#changelog This is a test commit message");
assert_eq!(result, "This is a test commit message");
}
#[test]
fn it_should_return_message_without_hashtag_and_surplus_whitespace() {
let result = strip_changelog_hashtag("#changelog This is a test commit message");
assert_eq!(result, "This is a test commit message");
}
#[test]
fn it_should_return_message_without_changes_if_not_changelog() {
let result = strip_changelog_hashtag("This is a test commit message without a changelog hashtag");
assert_eq!(result, "This is a test commit message without a changelog hashtag");
}
|
{
let commit_range = commit_range("377d686351969f27f288dec2fb09d0d5431fcde1", "3763e0e3ff218cbdfbf99c68109a04d666e81abeto");
assert_eq!(commit_range, "377d686351969f27f288dec2fb09d0d5431fcde1..3763e0e3ff218cbdfbf99c68109a04d666e81abeto");
}
|
identifier_body
|
vec-tail-matching.rs
|
struct Foo {
string: ~str
}
pub fn main()
|
}
_ => {
unreachable!();
}
}
}
_ => {
unreachable!();
}
}
}
|
{
let x = ~[
Foo { string: ~"foo" },
Foo { string: ~"bar" },
Foo { string: ~"baz" }
];
match x {
[ref first, ..tail] => {
assert!(first.string == ~"foo");
assert_eq!(tail.len(), 2);
assert!(tail[0].string == ~"bar");
assert!(tail[1].string == ~"baz");
match tail {
[Foo { _ }, _, Foo { _ }, .. _tail] => {
unreachable!();
}
[Foo { string: ref a }, Foo { string: ref b }] => {
assert_eq!("bar", a.slice(0, a.len()));
assert_eq!("baz", b.slice(0, b.len()));
|
identifier_body
|
vec-tail-matching.rs
|
struct Foo {
string: ~str
}
pub fn main() {
let x = ~[
Foo { string: ~"foo" },
Foo { string: ~"bar" },
Foo { string: ~"baz" }
];
match x {
[ref first,..tail] => {
assert!(first.string == ~"foo");
assert_eq!(tail.len(), 2);
assert!(tail[0].string == ~"bar");
assert!(tail[1].string == ~"baz");
match tail {
[Foo { _ }, _, Foo { _ },.. _tail] => {
unreachable!();
}
[Foo { string: ref a }, Foo { string: ref b }] => {
assert_eq!("bar", a.slice(0, a.len()));
assert_eq!("baz", b.slice(0, b.len()));
}
_ => {
|
unreachable!();
}
}
}
_ => {
unreachable!();
}
}
}
|
random_line_split
|
|
vec-tail-matching.rs
|
struct
|
{
string: ~str
}
pub fn main() {
let x = ~[
Foo { string: ~"foo" },
Foo { string: ~"bar" },
Foo { string: ~"baz" }
];
match x {
[ref first,..tail] => {
assert!(first.string == ~"foo");
assert_eq!(tail.len(), 2);
assert!(tail[0].string == ~"bar");
assert!(tail[1].string == ~"baz");
match tail {
[Foo { _ }, _, Foo { _ },.. _tail] => {
unreachable!();
}
[Foo { string: ref a }, Foo { string: ref b }] => {
assert_eq!("bar", a.slice(0, a.len()));
assert_eq!("baz", b.slice(0, b.len()));
}
_ => {
unreachable!();
}
}
}
_ => {
unreachable!();
}
}
}
|
Foo
|
identifier_name
|
lib.rs
|
use std::iter::DoubleEndedIterator;
use std::ops::{Index, IndexMut};
pub struct VecMultiMap<T>(Vec<Vec<T>>);
pub struct VecMultiMapAdapter<'a, T>(pub &'a mut Vec<Vec<T>>);
impl<'a, T> VecMultiMapAdapter<'a, T> {
fn pad_to(&mut self, index: usize) {
if index >= self.0.len() {
self.0.resize_with(index + 1, Vec::new)
}
}
pub fn push_to(&mut self, index: usize, value: T) {
self.pad_to(index);
self.0[index].push(value);
}
pub fn get(&mut self, index: usize) -> &Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked(index) }
}
pub fn get_or_fail(&self, index: usize) -> Option<&Vec<T>> {
self.0.get(index)
}
pub fn get_mut(&mut self, index: usize) -> &mut Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked_mut(index) }
}
}
impl<T> VecMultiMap<T> {
pub fn new() -> Self {
VecMultiMap(Vec::new())
}
fn pad_to(&mut self, index: usize) {
if index >= self.0.len() {
self.0.resize_with(index + 1, Vec::new)
}
}
pub fn push_to(&mut self, index: usize, value: T)
|
pub fn get(&mut self, index: usize) -> &Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked(index) }
}
pub fn get_or_fail(&self, index: usize) -> Option<&Vec<T>> {
self.0.get(index)
}
pub fn get_mut(&mut self, index: usize) -> &mut Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked_mut(index) }
}
pub fn into_vec_with_size(mut self, size: usize) -> Vec<Vec<T>> {
self.0.resize_with(size, Vec::new);
self.0
}
}
impl<'a, T> From<&'a mut Vec<Vec<T>>> for VecMultiMapAdapter<'a, T> {
fn from(v: &'a mut Vec<Vec<T>>) -> Self {
Self(v)
}
}
impl<T> Into<Vec<Vec<T>>> for VecMultiMap<T> {
fn into(self) -> Vec<Vec<T>> {
// find greatest index with non-empty vector
if let Some((last_filled_index, _)) = self.0.iter().enumerate().rfind(|(_, v)| v.len() > 0)
{
self.into_vec_with_size(last_filled_index + 1)
} else {
Vec::new()
}
}
}
impl<'a, T> Index<usize> for VecMultiMapAdapter<'a, T> {
type Output = Vec<T>;
fn index(&self, index: usize) -> &Self::Output {
self.get_or_fail(index)
.expect("immutable index out of bounds")
}
}
impl<'a, T> IndexMut<usize> for VecMultiMapAdapter<'a, T> {
fn index_mut(&mut self, index: usize) -> &mut <Self as Index<usize>>::Output {
self.get_mut(index)
}
}
|
{
self.pad_to(index);
self.0[index].push(value);
}
|
identifier_body
|
lib.rs
|
use std::iter::DoubleEndedIterator;
use std::ops::{Index, IndexMut};
pub struct VecMultiMap<T>(Vec<Vec<T>>);
pub struct VecMultiMapAdapter<'a, T>(pub &'a mut Vec<Vec<T>>);
impl<'a, T> VecMultiMapAdapter<'a, T> {
fn pad_to(&mut self, index: usize) {
if index >= self.0.len() {
self.0.resize_with(index + 1, Vec::new)
}
}
pub fn push_to(&mut self, index: usize, value: T) {
self.pad_to(index);
self.0[index].push(value);
}
|
}
pub fn get_or_fail(&self, index: usize) -> Option<&Vec<T>> {
self.0.get(index)
}
pub fn get_mut(&mut self, index: usize) -> &mut Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked_mut(index) }
}
}
impl<T> VecMultiMap<T> {
pub fn new() -> Self {
VecMultiMap(Vec::new())
}
fn pad_to(&mut self, index: usize) {
if index >= self.0.len() {
self.0.resize_with(index + 1, Vec::new)
}
}
pub fn push_to(&mut self, index: usize, value: T) {
self.pad_to(index);
self.0[index].push(value);
}
pub fn get(&mut self, index: usize) -> &Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked(index) }
}
pub fn get_or_fail(&self, index: usize) -> Option<&Vec<T>> {
self.0.get(index)
}
pub fn get_mut(&mut self, index: usize) -> &mut Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked_mut(index) }
}
pub fn into_vec_with_size(mut self, size: usize) -> Vec<Vec<T>> {
self.0.resize_with(size, Vec::new);
self.0
}
}
impl<'a, T> From<&'a mut Vec<Vec<T>>> for VecMultiMapAdapter<'a, T> {
fn from(v: &'a mut Vec<Vec<T>>) -> Self {
Self(v)
}
}
impl<T> Into<Vec<Vec<T>>> for VecMultiMap<T> {
fn into(self) -> Vec<Vec<T>> {
// find greatest index with non-empty vector
if let Some((last_filled_index, _)) = self.0.iter().enumerate().rfind(|(_, v)| v.len() > 0)
{
self.into_vec_with_size(last_filled_index + 1)
} else {
Vec::new()
}
}
}
impl<'a, T> Index<usize> for VecMultiMapAdapter<'a, T> {
type Output = Vec<T>;
fn index(&self, index: usize) -> &Self::Output {
self.get_or_fail(index)
.expect("immutable index out of bounds")
}
}
impl<'a, T> IndexMut<usize> for VecMultiMapAdapter<'a, T> {
fn index_mut(&mut self, index: usize) -> &mut <Self as Index<usize>>::Output {
self.get_mut(index)
}
}
|
pub fn get(&mut self, index: usize) -> &Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked(index) }
|
random_line_split
|
lib.rs
|
use std::iter::DoubleEndedIterator;
use std::ops::{Index, IndexMut};
pub struct VecMultiMap<T>(Vec<Vec<T>>);
pub struct VecMultiMapAdapter<'a, T>(pub &'a mut Vec<Vec<T>>);
impl<'a, T> VecMultiMapAdapter<'a, T> {
fn pad_to(&mut self, index: usize) {
if index >= self.0.len() {
self.0.resize_with(index + 1, Vec::new)
}
}
pub fn push_to(&mut self, index: usize, value: T) {
self.pad_to(index);
self.0[index].push(value);
}
pub fn get(&mut self, index: usize) -> &Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked(index) }
}
pub fn get_or_fail(&self, index: usize) -> Option<&Vec<T>> {
self.0.get(index)
}
pub fn get_mut(&mut self, index: usize) -> &mut Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked_mut(index) }
}
}
impl<T> VecMultiMap<T> {
pub fn new() -> Self {
VecMultiMap(Vec::new())
}
fn pad_to(&mut self, index: usize) {
if index >= self.0.len()
|
}
pub fn push_to(&mut self, index: usize, value: T) {
self.pad_to(index);
self.0[index].push(value);
}
pub fn get(&mut self, index: usize) -> &Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked(index) }
}
pub fn get_or_fail(&self, index: usize) -> Option<&Vec<T>> {
self.0.get(index)
}
pub fn get_mut(&mut self, index: usize) -> &mut Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked_mut(index) }
}
pub fn into_vec_with_size(mut self, size: usize) -> Vec<Vec<T>> {
self.0.resize_with(size, Vec::new);
self.0
}
}
impl<'a, T> From<&'a mut Vec<Vec<T>>> for VecMultiMapAdapter<'a, T> {
fn from(v: &'a mut Vec<Vec<T>>) -> Self {
Self(v)
}
}
impl<T> Into<Vec<Vec<T>>> for VecMultiMap<T> {
fn into(self) -> Vec<Vec<T>> {
// find greatest index with non-empty vector
if let Some((last_filled_index, _)) = self.0.iter().enumerate().rfind(|(_, v)| v.len() > 0)
{
self.into_vec_with_size(last_filled_index + 1)
} else {
Vec::new()
}
}
}
impl<'a, T> Index<usize> for VecMultiMapAdapter<'a, T> {
type Output = Vec<T>;
fn index(&self, index: usize) -> &Self::Output {
self.get_or_fail(index)
.expect("immutable index out of bounds")
}
}
impl<'a, T> IndexMut<usize> for VecMultiMapAdapter<'a, T> {
fn index_mut(&mut self, index: usize) -> &mut <Self as Index<usize>>::Output {
self.get_mut(index)
}
}
|
{
self.0.resize_with(index + 1, Vec::new)
}
|
conditional_block
|
lib.rs
|
use std::iter::DoubleEndedIterator;
use std::ops::{Index, IndexMut};
pub struct VecMultiMap<T>(Vec<Vec<T>>);
pub struct VecMultiMapAdapter<'a, T>(pub &'a mut Vec<Vec<T>>);
impl<'a, T> VecMultiMapAdapter<'a, T> {
fn pad_to(&mut self, index: usize) {
if index >= self.0.len() {
self.0.resize_with(index + 1, Vec::new)
}
}
pub fn push_to(&mut self, index: usize, value: T) {
self.pad_to(index);
self.0[index].push(value);
}
pub fn get(&mut self, index: usize) -> &Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked(index) }
}
pub fn get_or_fail(&self, index: usize) -> Option<&Vec<T>> {
self.0.get(index)
}
pub fn get_mut(&mut self, index: usize) -> &mut Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked_mut(index) }
}
}
impl<T> VecMultiMap<T> {
pub fn new() -> Self {
VecMultiMap(Vec::new())
}
fn pad_to(&mut self, index: usize) {
if index >= self.0.len() {
self.0.resize_with(index + 1, Vec::new)
}
}
pub fn push_to(&mut self, index: usize, value: T) {
self.pad_to(index);
self.0[index].push(value);
}
pub fn get(&mut self, index: usize) -> &Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked(index) }
}
pub fn get_or_fail(&self, index: usize) -> Option<&Vec<T>> {
self.0.get(index)
}
pub fn get_mut(&mut self, index: usize) -> &mut Vec<T> {
self.pad_to(index);
unsafe { self.0.get_unchecked_mut(index) }
}
pub fn into_vec_with_size(mut self, size: usize) -> Vec<Vec<T>> {
self.0.resize_with(size, Vec::new);
self.0
}
}
impl<'a, T> From<&'a mut Vec<Vec<T>>> for VecMultiMapAdapter<'a, T> {
fn
|
(v: &'a mut Vec<Vec<T>>) -> Self {
Self(v)
}
}
impl<T> Into<Vec<Vec<T>>> for VecMultiMap<T> {
fn into(self) -> Vec<Vec<T>> {
// find greatest index with non-empty vector
if let Some((last_filled_index, _)) = self.0.iter().enumerate().rfind(|(_, v)| v.len() > 0)
{
self.into_vec_with_size(last_filled_index + 1)
} else {
Vec::new()
}
}
}
impl<'a, T> Index<usize> for VecMultiMapAdapter<'a, T> {
type Output = Vec<T>;
fn index(&self, index: usize) -> &Self::Output {
self.get_or_fail(index)
.expect("immutable index out of bounds")
}
}
impl<'a, T> IndexMut<usize> for VecMultiMapAdapter<'a, T> {
fn index_mut(&mut self, index: usize) -> &mut <Self as Index<usize>>::Output {
self.get_mut(index)
}
}
|
from
|
identifier_name
|
main.rs
|
extern crate clap;
extern crate csv;
extern crate xmler;
use clap::{App, Arg};
use std::collections::HashSet;
use std::process;
fn main() {
let matches = App::new("xmler")
.version("0.1.0")
.author("Daniel Gregoire <[email protected]>")
.about("TBD")
.arg(
Arg::with_name("glob_pattern")
.value_name("GLOB_PATTERN")
.required(true)
.index(1)
.help("Glob pattern which limits which XML files to analyze"),
)
.arg(
Arg::with_name("ouptut_file")
.value_name("OUTPUT_FILE")
.required(false)
.help(
"Output file to which a CSV version of the report will be written",
),
)
.get_matches();
// Required by options ^^^, so by here it will have a value.
let glob_pattern = matches.value_of("glob_pattern").unwrap();
let mut report = xmler::fresh_report();
println!("Processing all files that match your glob pattern...");
xmler::process(glob_pattern, &mut report);
println!(
"
Report:
* {} Page URLs",
report.page_urls.len()
);
let page_urls_set: HashSet<String> = report.page_urls.clone().into_iter().collect();
println!(
"
* {} Unique Page URLs",
page_urls_set.len()
);
println!(
"
* {} files were consulted",
report.files.len()
);
println!(
"
* XML prefixes encountered: {:?}",
report.prefixes
);
// Persist report to CSV if output file-name specified
if let Some(file_path) = matches.value_of("output_file") {
println!("Writing output to CSV {}", file_path);
match csv::Writer::from_path(file_path) {
Ok(mut writer) => {
for url in page_urls_set {
if let Err(e) = writer.serialize(xmler::indexable_entry(url)) {
eprintln!("Error writing entry to output file {}: {:?}", file_path, e);
process::exit(1);
}
}
if let Err(e) = writer.flush() {
eprintln!(
"Error flushing writer while writing to {}: {:?}",
file_path,
e
);
process::exit(1);
}
|
Err(e) => {
eprintln!("Error creating writer for {}: {:?}", file_path, e);
process::exit(1);
}
}
}
}
|
}
|
random_line_split
|
main.rs
|
extern crate clap;
extern crate csv;
extern crate xmler;
use clap::{App, Arg};
use std::collections::HashSet;
use std::process;
fn main()
|
.get_matches();
// Required by options ^^^, so by here it will have a value.
let glob_pattern = matches.value_of("glob_pattern").unwrap();
let mut report = xmler::fresh_report();
println!("Processing all files that match your glob pattern...");
xmler::process(glob_pattern, &mut report);
println!(
"
Report:
* {} Page URLs",
report.page_urls.len()
);
let page_urls_set: HashSet<String> = report.page_urls.clone().into_iter().collect();
println!(
"
* {} Unique Page URLs",
page_urls_set.len()
);
println!(
"
* {} files were consulted",
report.files.len()
);
println!(
"
* XML prefixes encountered: {:?}",
report.prefixes
);
// Persist report to CSV if output file-name specified
if let Some(file_path) = matches.value_of("output_file") {
println!("Writing output to CSV {}", file_path);
match csv::Writer::from_path(file_path) {
Ok(mut writer) => {
for url in page_urls_set {
if let Err(e) = writer.serialize(xmler::indexable_entry(url)) {
eprintln!("Error writing entry to output file {}: {:?}", file_path, e);
process::exit(1);
}
}
if let Err(e) = writer.flush() {
eprintln!(
"Error flushing writer while writing to {}: {:?}",
file_path,
e
);
process::exit(1);
}
}
Err(e) => {
eprintln!("Error creating writer for {}: {:?}", file_path, e);
process::exit(1);
}
}
}
}
|
{
let matches = App::new("xmler")
.version("0.1.0")
.author("Daniel Gregoire <[email protected]>")
.about("TBD")
.arg(
Arg::with_name("glob_pattern")
.value_name("GLOB_PATTERN")
.required(true)
.index(1)
.help("Glob pattern which limits which XML files to analyze"),
)
.arg(
Arg::with_name("ouptut_file")
.value_name("OUTPUT_FILE")
.required(false)
.help(
"Output file to which a CSV version of the report will be written",
),
)
|
identifier_body
|
main.rs
|
extern crate clap;
extern crate csv;
extern crate xmler;
use clap::{App, Arg};
use std::collections::HashSet;
use std::process;
fn
|
() {
let matches = App::new("xmler")
.version("0.1.0")
.author("Daniel Gregoire <[email protected]>")
.about("TBD")
.arg(
Arg::with_name("glob_pattern")
.value_name("GLOB_PATTERN")
.required(true)
.index(1)
.help("Glob pattern which limits which XML files to analyze"),
)
.arg(
Arg::with_name("ouptut_file")
.value_name("OUTPUT_FILE")
.required(false)
.help(
"Output file to which a CSV version of the report will be written",
),
)
.get_matches();
// Required by options ^^^, so by here it will have a value.
let glob_pattern = matches.value_of("glob_pattern").unwrap();
let mut report = xmler::fresh_report();
println!("Processing all files that match your glob pattern...");
xmler::process(glob_pattern, &mut report);
println!(
"
Report:
* {} Page URLs",
report.page_urls.len()
);
let page_urls_set: HashSet<String> = report.page_urls.clone().into_iter().collect();
println!(
"
* {} Unique Page URLs",
page_urls_set.len()
);
println!(
"
* {} files were consulted",
report.files.len()
);
println!(
"
* XML prefixes encountered: {:?}",
report.prefixes
);
// Persist report to CSV if output file-name specified
if let Some(file_path) = matches.value_of("output_file") {
println!("Writing output to CSV {}", file_path);
match csv::Writer::from_path(file_path) {
Ok(mut writer) => {
for url in page_urls_set {
if let Err(e) = writer.serialize(xmler::indexable_entry(url)) {
eprintln!("Error writing entry to output file {}: {:?}", file_path, e);
process::exit(1);
}
}
if let Err(e) = writer.flush() {
eprintln!(
"Error flushing writer while writing to {}: {:?}",
file_path,
e
);
process::exit(1);
}
}
Err(e) => {
eprintln!("Error creating writer for {}: {:?}", file_path, e);
process::exit(1);
}
}
}
}
|
main
|
identifier_name
|
common.rs
|
use lexer;
use parser;
use codegen;
use std::io::{stderr, Write};
extern crate regex;
use self::regex::Regex;
extern crate ansi_term;
use self::ansi_term::Colour;
use CODEGEN;
// parse -> codegen -> write llvm bitcode to output file
pub fn run_file<'a>(filename: &'a str) {
// parser::Parser::new(&mut lexer).run(&mut nodes);
|
// DEBUG: println!("\nllvm-ir test output:");
unsafe {
let mut nodes = Vec::new();
let mut lexer = lexer::Lexer::new(filename.to_string());
let mut parser = parser::Parser::new(&mut lexer);
loop {
match parser.read_toplevel(&mut nodes) {
Err(parser::Error::EOF) => break,
Err(_) => continue,
_ => {}
}
match CODEGEN.lock().unwrap().run(&nodes) {
Ok(_) => {}
// TODO: implement err handler for codegen
Err(codegen::Error::MsgWithPos(msg, pos)) => {
writeln!(
&mut stderr(),
"{}: {} {}: {}",
parser.lexer.get_filename(),
Colour::Red.bold().paint("error:"),
pos.line,
msg
).unwrap();
writeln!(
&mut stderr(),
"{}",
parser.lexer.get_surrounding_code_with_err_point(pos.pos)
).unwrap();
println!(
"{} error{} generated.",
parser.err_counts + 1,
if parser.err_counts + 1 > 1 { "s" } else { "" }
);
::std::process::exit(-1);
}
_ => panic!("this is a bug. fix soon"),
}
nodes.clear();
}
parser.show_total_errors();
let output_file_name = Regex::new(r"\..*$").unwrap().replace_all(filename, ".bc");
CODEGEN
.lock()
.unwrap()
.write_llvm_bitcode_to_file(output_file_name.to_string().as_str());
}
}
|
// DEBUG: for node in &ast {
// DEBUG: node.show();
// DEBUG: }
|
random_line_split
|
common.rs
|
use lexer;
use parser;
use codegen;
use std::io::{stderr, Write};
extern crate regex;
use self::regex::Regex;
extern crate ansi_term;
use self::ansi_term::Colour;
use CODEGEN;
// parse -> codegen -> write llvm bitcode to output file
pub fn run_file<'a>(filename: &'a str)
|
Ok(_) => {}
// TODO: implement err handler for codegen
Err(codegen::Error::MsgWithPos(msg, pos)) => {
writeln!(
&mut stderr(),
"{}: {} {}: {}",
parser.lexer.get_filename(),
Colour::Red.bold().paint("error:"),
pos.line,
msg
).unwrap();
writeln!(
&mut stderr(),
"{}",
parser.lexer.get_surrounding_code_with_err_point(pos.pos)
).unwrap();
println!(
"{} error{} generated.",
parser.err_counts + 1,
if parser.err_counts + 1 > 1 { "s" } else { "" }
);
::std::process::exit(-1);
}
_ => panic!("this is a bug. fix soon"),
}
nodes.clear();
}
parser.show_total_errors();
let output_file_name = Regex::new(r"\..*$").unwrap().replace_all(filename, ".bc");
CODEGEN
.lock()
.unwrap()
.write_llvm_bitcode_to_file(output_file_name.to_string().as_str());
}
}
|
{
// parser::Parser::new(&mut lexer).run(&mut nodes);
// DEBUG: for node in &ast {
// DEBUG: node.show();
// DEBUG: }
// DEBUG: println!("\nllvm-ir test output:");
unsafe {
let mut nodes = Vec::new();
let mut lexer = lexer::Lexer::new(filename.to_string());
let mut parser = parser::Parser::new(&mut lexer);
loop {
match parser.read_toplevel(&mut nodes) {
Err(parser::Error::EOF) => break,
Err(_) => continue,
_ => {}
}
match CODEGEN.lock().unwrap().run(&nodes) {
|
identifier_body
|
common.rs
|
use lexer;
use parser;
use codegen;
use std::io::{stderr, Write};
extern crate regex;
use self::regex::Regex;
extern crate ansi_term;
use self::ansi_term::Colour;
use CODEGEN;
// parse -> codegen -> write llvm bitcode to output file
pub fn
|
<'a>(filename: &'a str) {
// parser::Parser::new(&mut lexer).run(&mut nodes);
// DEBUG: for node in &ast {
// DEBUG: node.show();
// DEBUG: }
// DEBUG: println!("\nllvm-ir test output:");
unsafe {
let mut nodes = Vec::new();
let mut lexer = lexer::Lexer::new(filename.to_string());
let mut parser = parser::Parser::new(&mut lexer);
loop {
match parser.read_toplevel(&mut nodes) {
Err(parser::Error::EOF) => break,
Err(_) => continue,
_ => {}
}
match CODEGEN.lock().unwrap().run(&nodes) {
Ok(_) => {}
// TODO: implement err handler for codegen
Err(codegen::Error::MsgWithPos(msg, pos)) => {
writeln!(
&mut stderr(),
"{}: {} {}: {}",
parser.lexer.get_filename(),
Colour::Red.bold().paint("error:"),
pos.line,
msg
).unwrap();
writeln!(
&mut stderr(),
"{}",
parser.lexer.get_surrounding_code_with_err_point(pos.pos)
).unwrap();
println!(
"{} error{} generated.",
parser.err_counts + 1,
if parser.err_counts + 1 > 1 { "s" } else { "" }
);
::std::process::exit(-1);
}
_ => panic!("this is a bug. fix soon"),
}
nodes.clear();
}
parser.show_total_errors();
let output_file_name = Regex::new(r"\..*$").unwrap().replace_all(filename, ".bc");
CODEGEN
.lock()
.unwrap()
.write_llvm_bitcode_to_file(output_file_name.to_string().as_str());
}
}
|
run_file
|
identifier_name
|
common.rs
|
use lexer;
use parser;
use codegen;
use std::io::{stderr, Write};
extern crate regex;
use self::regex::Regex;
extern crate ansi_term;
use self::ansi_term::Colour;
use CODEGEN;
// parse -> codegen -> write llvm bitcode to output file
pub fn run_file<'a>(filename: &'a str) {
// parser::Parser::new(&mut lexer).run(&mut nodes);
// DEBUG: for node in &ast {
// DEBUG: node.show();
// DEBUG: }
// DEBUG: println!("\nllvm-ir test output:");
unsafe {
let mut nodes = Vec::new();
let mut lexer = lexer::Lexer::new(filename.to_string());
let mut parser = parser::Parser::new(&mut lexer);
loop {
match parser.read_toplevel(&mut nodes) {
Err(parser::Error::EOF) => break,
Err(_) => continue,
_ => {}
}
match CODEGEN.lock().unwrap().run(&nodes) {
Ok(_) =>
|
// TODO: implement err handler for codegen
Err(codegen::Error::MsgWithPos(msg, pos)) => {
writeln!(
&mut stderr(),
"{}: {} {}: {}",
parser.lexer.get_filename(),
Colour::Red.bold().paint("error:"),
pos.line,
msg
).unwrap();
writeln!(
&mut stderr(),
"{}",
parser.lexer.get_surrounding_code_with_err_point(pos.pos)
).unwrap();
println!(
"{} error{} generated.",
parser.err_counts + 1,
if parser.err_counts + 1 > 1 { "s" } else { "" }
);
::std::process::exit(-1);
}
_ => panic!("this is a bug. fix soon"),
}
nodes.clear();
}
parser.show_total_errors();
let output_file_name = Regex::new(r"\..*$").unwrap().replace_all(filename, ".bc");
CODEGEN
.lock()
.unwrap()
.write_llvm_bitcode_to_file(output_file_name.to_string().as_str());
}
}
|
{}
|
conditional_block
|
children.rs
|
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicBool, Ordering};
use std::{thread, time};
use nix::sys::signal;
use nix::unistd::getpgid;
use nix::unistd::Pid;
use tokio::process::{Child, Command};
// We keep this relatively low to start to encourage interactivity. If users end up needing longer graceful
// shutdown periods, we can expose it as a per-process setting.
const GRACEFUL_SHUTDOWN_MAX_WAIT_TIME: time::Duration = time::Duration::from_secs(3);
const GRACEFUL_SHUTDOWN_POLL_TIME: time::Duration = time::Duration::from_millis(50);
/// A child process running in its own PGID, with a drop implementation that will kill that
/// PGID.
///
/// TODO: If this API is useful, we should consider extending it to parented Nailgun processes
/// and to all local execution in general. It could also be adjusted for sending other posix
/// signals in sequence for https://github.com/pantsbuild/pants/issues/13230.
pub struct ManagedChild {
child: Child,
killed: AtomicBool,
}
impl ManagedChild {
pub fn spawn(mut command: Command) -> Result<Self, String> {
// Set `kill_on_drop` to encourage `tokio` to `wait` the process via its own "reaping"
// mechanism:
// see https://docs.rs/tokio/1.14.0/tokio/process/struct.Command.html#method.kill_on_drop
command.kill_on_drop(true);
// Adjust the Command to create its own PGID as it starts, to make it safe to kill the PGID
// later.
unsafe {
command.pre_exec(|| {
nix::unistd::setsid().map(|_pgid| ()).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("Could not create new pgid: {}", e),
)
})
});
};
// Then spawn.
let child = command
.spawn()
.map_err(|e| format!("Error executing interactive process: {}", e))?;
Ok(Self {
child,
killed: AtomicBool::new(false),
})
}
fn get_pgid(&self) -> Result<Pid, String> {
let pid = self.id().ok_or_else(|| "Process had no PID.".to_owned())?;
let pgid = getpgid(Some(Pid::from_raw(pid as i32)))
.map_err(|e| format!("Could not get process group id of child process: {}", e))?;
Ok(pgid)
}
/// Send a signal to the child process group.
fn signal_pg<T: Into<Option<signal::Signal>>>(&mut self, signal: T) -> Result<(), String> {
let pgid = self.get_pgid()?;
// the negative PGID will signal the entire process group.
signal::kill(Pid::from_raw(-pgid.as_raw()), signal)
.map_err(|e| format!("Failed to interrupt child process group: {}", e))?;
Ok(())
}
/// Check if the child has exited.
///
/// This returns true if the child has exited with any return code, or false
/// if the child has not yet exited. An error indicated a system error checking
/// the result of the child process, and does not necessarily indicate that
/// has exited or not.
fn check_child_has_exited(&mut self) -> Result<bool, String> {
self
.child
.try_wait()
.map(|o| o.is_some())
.map_err(|e| e.to_string())
}
/// Synchronously wait for the child to exit.
///
/// This method will repeatedly poll the child process until it exits, an error occurrs
/// or the timeout is reached.
///
/// A return value of Ok(true) indicates that the child has terminated, Ok(false) indicates
/// that we reached the max_wait_duration while waiting for the child to terminate.
///
/// This method *will* block the current thread but will do so for a bounded amount of time.
fn wait_for_child_exit_sync(
&mut self,
max_wait_duration: time::Duration,
) -> Result<bool, String> {
let deadline = time::Instant::now() + max_wait_duration;
while time::Instant::now() <= deadline {
if self.check_child_has_exited()? {
return Ok(true);
}
thread::sleep(GRACEFUL_SHUTDOWN_POLL_TIME);
}
// if we get here we have timed-out
Ok(false)
}
/// Attempt to gracefully shutdown the process.
///
/// This will send a SIGINT to the process and give it a chance to shutdown gracefully. If the
/// process does not respond to the SIGINT within a fixed interval, a SIGKILL will be sent.
///
/// This method *will* block the current thread but will do so for a bounded amount of time.
pub fn graceful_shutdown_sync(&mut self) -> Result<(), String> {
self.signal_pg(signal::Signal::SIGINT)?;
match self.wait_for_child_exit_sync(GRACEFUL_SHUTDOWN_MAX_WAIT_TIME) {
Ok(true) => {
// process was gracefully shutdown
self.killed.store(true, Ordering::SeqCst);
Ok(())
}
Ok(false) => {
// we timed out waiting for the child to exit, so we need to kill it.
log::warn!(
"Timed out waiting for graceful shutdown of process group. Will try SIGKILL instead."
);
self.kill_pgid()
}
Err(e) => {
log::warn!("An error occurred while waiting for graceful shutdown of process group ({}). Will try SIGKILL instead.", e);
self.kill_pgid()
}
}
}
/// Kill the process's unique PGID or return an error if we don't have a PID or cannot kill.
fn kill_pgid(&mut self) -> Result<(), String> {
self.signal_pg(signal::Signal::SIGKILL)?;
self.killed.store(true, Ordering::SeqCst);
Ok(())
}
}
impl Deref for ManagedChild {
type Target = Child;
fn deref(&self) -> &Child
|
}
impl DerefMut for ManagedChild {
fn deref_mut(&mut self) -> &mut Child {
&mut self.child
}
}
/// Implements drop by killing the process group.
impl Drop for ManagedChild {
fn drop(&mut self) {
if!self.killed.load(Ordering::SeqCst) {
let _ = self.graceful_shutdown_sync();
}
}
}
|
{
&self.child
}
|
identifier_body
|
children.rs
|
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicBool, Ordering};
use std::{thread, time};
use nix::sys::signal;
use nix::unistd::getpgid;
use nix::unistd::Pid;
use tokio::process::{Child, Command};
// We keep this relatively low to start to encourage interactivity. If users end up needing longer graceful
// shutdown periods, we can expose it as a per-process setting.
const GRACEFUL_SHUTDOWN_MAX_WAIT_TIME: time::Duration = time::Duration::from_secs(3);
const GRACEFUL_SHUTDOWN_POLL_TIME: time::Duration = time::Duration::from_millis(50);
/// A child process running in its own PGID, with a drop implementation that will kill that
/// PGID.
///
/// TODO: If this API is useful, we should consider extending it to parented Nailgun processes
/// and to all local execution in general. It could also be adjusted for sending other posix
/// signals in sequence for https://github.com/pantsbuild/pants/issues/13230.
pub struct ManagedChild {
child: Child,
killed: AtomicBool,
}
impl ManagedChild {
pub fn spawn(mut command: Command) -> Result<Self, String> {
// Set `kill_on_drop` to encourage `tokio` to `wait` the process via its own "reaping"
// mechanism:
// see https://docs.rs/tokio/1.14.0/tokio/process/struct.Command.html#method.kill_on_drop
command.kill_on_drop(true);
// Adjust the Command to create its own PGID as it starts, to make it safe to kill the PGID
// later.
unsafe {
command.pre_exec(|| {
nix::unistd::setsid().map(|_pgid| ()).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("Could not create new pgid: {}", e),
)
})
});
};
// Then spawn.
let child = command
.spawn()
.map_err(|e| format!("Error executing interactive process: {}", e))?;
|
fn get_pgid(&self) -> Result<Pid, String> {
let pid = self.id().ok_or_else(|| "Process had no PID.".to_owned())?;
let pgid = getpgid(Some(Pid::from_raw(pid as i32)))
.map_err(|e| format!("Could not get process group id of child process: {}", e))?;
Ok(pgid)
}
/// Send a signal to the child process group.
fn signal_pg<T: Into<Option<signal::Signal>>>(&mut self, signal: T) -> Result<(), String> {
let pgid = self.get_pgid()?;
// the negative PGID will signal the entire process group.
signal::kill(Pid::from_raw(-pgid.as_raw()), signal)
.map_err(|e| format!("Failed to interrupt child process group: {}", e))?;
Ok(())
}
/// Check if the child has exited.
///
/// This returns true if the child has exited with any return code, or false
/// if the child has not yet exited. An error indicated a system error checking
/// the result of the child process, and does not necessarily indicate that
/// has exited or not.
fn check_child_has_exited(&mut self) -> Result<bool, String> {
self
.child
.try_wait()
.map(|o| o.is_some())
.map_err(|e| e.to_string())
}
/// Synchronously wait for the child to exit.
///
/// This method will repeatedly poll the child process until it exits, an error occurrs
/// or the timeout is reached.
///
/// A return value of Ok(true) indicates that the child has terminated, Ok(false) indicates
/// that we reached the max_wait_duration while waiting for the child to terminate.
///
/// This method *will* block the current thread but will do so for a bounded amount of time.
fn wait_for_child_exit_sync(
&mut self,
max_wait_duration: time::Duration,
) -> Result<bool, String> {
let deadline = time::Instant::now() + max_wait_duration;
while time::Instant::now() <= deadline {
if self.check_child_has_exited()? {
return Ok(true);
}
thread::sleep(GRACEFUL_SHUTDOWN_POLL_TIME);
}
// if we get here we have timed-out
Ok(false)
}
/// Attempt to gracefully shutdown the process.
///
/// This will send a SIGINT to the process and give it a chance to shutdown gracefully. If the
/// process does not respond to the SIGINT within a fixed interval, a SIGKILL will be sent.
///
/// This method *will* block the current thread but will do so for a bounded amount of time.
pub fn graceful_shutdown_sync(&mut self) -> Result<(), String> {
self.signal_pg(signal::Signal::SIGINT)?;
match self.wait_for_child_exit_sync(GRACEFUL_SHUTDOWN_MAX_WAIT_TIME) {
Ok(true) => {
// process was gracefully shutdown
self.killed.store(true, Ordering::SeqCst);
Ok(())
}
Ok(false) => {
// we timed out waiting for the child to exit, so we need to kill it.
log::warn!(
"Timed out waiting for graceful shutdown of process group. Will try SIGKILL instead."
);
self.kill_pgid()
}
Err(e) => {
log::warn!("An error occurred while waiting for graceful shutdown of process group ({}). Will try SIGKILL instead.", e);
self.kill_pgid()
}
}
}
/// Kill the process's unique PGID or return an error if we don't have a PID or cannot kill.
fn kill_pgid(&mut self) -> Result<(), String> {
self.signal_pg(signal::Signal::SIGKILL)?;
self.killed.store(true, Ordering::SeqCst);
Ok(())
}
}
impl Deref for ManagedChild {
type Target = Child;
fn deref(&self) -> &Child {
&self.child
}
}
impl DerefMut for ManagedChild {
fn deref_mut(&mut self) -> &mut Child {
&mut self.child
}
}
/// Implements drop by killing the process group.
impl Drop for ManagedChild {
fn drop(&mut self) {
if!self.killed.load(Ordering::SeqCst) {
let _ = self.graceful_shutdown_sync();
}
}
}
|
Ok(Self {
child,
killed: AtomicBool::new(false),
})
}
|
random_line_split
|
children.rs
|
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicBool, Ordering};
use std::{thread, time};
use nix::sys::signal;
use nix::unistd::getpgid;
use nix::unistd::Pid;
use tokio::process::{Child, Command};
// We keep this relatively low to start to encourage interactivity. If users end up needing longer graceful
// shutdown periods, we can expose it as a per-process setting.
const GRACEFUL_SHUTDOWN_MAX_WAIT_TIME: time::Duration = time::Duration::from_secs(3);
const GRACEFUL_SHUTDOWN_POLL_TIME: time::Duration = time::Duration::from_millis(50);
/// A child process running in its own PGID, with a drop implementation that will kill that
/// PGID.
///
/// TODO: If this API is useful, we should consider extending it to parented Nailgun processes
/// and to all local execution in general. It could also be adjusted for sending other posix
/// signals in sequence for https://github.com/pantsbuild/pants/issues/13230.
pub struct ManagedChild {
child: Child,
killed: AtomicBool,
}
impl ManagedChild {
pub fn spawn(mut command: Command) -> Result<Self, String> {
// Set `kill_on_drop` to encourage `tokio` to `wait` the process via its own "reaping"
// mechanism:
// see https://docs.rs/tokio/1.14.0/tokio/process/struct.Command.html#method.kill_on_drop
command.kill_on_drop(true);
// Adjust the Command to create its own PGID as it starts, to make it safe to kill the PGID
// later.
unsafe {
command.pre_exec(|| {
nix::unistd::setsid().map(|_pgid| ()).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("Could not create new pgid: {}", e),
)
})
});
};
// Then spawn.
let child = command
.spawn()
.map_err(|e| format!("Error executing interactive process: {}", e))?;
Ok(Self {
child,
killed: AtomicBool::new(false),
})
}
fn get_pgid(&self) -> Result<Pid, String> {
let pid = self.id().ok_or_else(|| "Process had no PID.".to_owned())?;
let pgid = getpgid(Some(Pid::from_raw(pid as i32)))
.map_err(|e| format!("Could not get process group id of child process: {}", e))?;
Ok(pgid)
}
/// Send a signal to the child process group.
fn signal_pg<T: Into<Option<signal::Signal>>>(&mut self, signal: T) -> Result<(), String> {
let pgid = self.get_pgid()?;
// the negative PGID will signal the entire process group.
signal::kill(Pid::from_raw(-pgid.as_raw()), signal)
.map_err(|e| format!("Failed to interrupt child process group: {}", e))?;
Ok(())
}
/// Check if the child has exited.
///
/// This returns true if the child has exited with any return code, or false
/// if the child has not yet exited. An error indicated a system error checking
/// the result of the child process, and does not necessarily indicate that
/// has exited or not.
fn check_child_has_exited(&mut self) -> Result<bool, String> {
self
.child
.try_wait()
.map(|o| o.is_some())
.map_err(|e| e.to_string())
}
/// Synchronously wait for the child to exit.
///
/// This method will repeatedly poll the child process until it exits, an error occurrs
/// or the timeout is reached.
///
/// A return value of Ok(true) indicates that the child has terminated, Ok(false) indicates
/// that we reached the max_wait_duration while waiting for the child to terminate.
///
/// This method *will* block the current thread but will do so for a bounded amount of time.
fn wait_for_child_exit_sync(
&mut self,
max_wait_duration: time::Duration,
) -> Result<bool, String> {
let deadline = time::Instant::now() + max_wait_duration;
while time::Instant::now() <= deadline {
if self.check_child_has_exited()? {
return Ok(true);
}
thread::sleep(GRACEFUL_SHUTDOWN_POLL_TIME);
}
// if we get here we have timed-out
Ok(false)
}
/// Attempt to gracefully shutdown the process.
///
/// This will send a SIGINT to the process and give it a chance to shutdown gracefully. If the
/// process does not respond to the SIGINT within a fixed interval, a SIGKILL will be sent.
///
/// This method *will* block the current thread but will do so for a bounded amount of time.
pub fn graceful_shutdown_sync(&mut self) -> Result<(), String> {
self.signal_pg(signal::Signal::SIGINT)?;
match self.wait_for_child_exit_sync(GRACEFUL_SHUTDOWN_MAX_WAIT_TIME) {
Ok(true) => {
// process was gracefully shutdown
self.killed.store(true, Ordering::SeqCst);
Ok(())
}
Ok(false) => {
// we timed out waiting for the child to exit, so we need to kill it.
log::warn!(
"Timed out waiting for graceful shutdown of process group. Will try SIGKILL instead."
);
self.kill_pgid()
}
Err(e) => {
log::warn!("An error occurred while waiting for graceful shutdown of process group ({}). Will try SIGKILL instead.", e);
self.kill_pgid()
}
}
}
/// Kill the process's unique PGID or return an error if we don't have a PID or cannot kill.
fn kill_pgid(&mut self) -> Result<(), String> {
self.signal_pg(signal::Signal::SIGKILL)?;
self.killed.store(true, Ordering::SeqCst);
Ok(())
}
}
impl Deref for ManagedChild {
type Target = Child;
fn
|
(&self) -> &Child {
&self.child
}
}
impl DerefMut for ManagedChild {
fn deref_mut(&mut self) -> &mut Child {
&mut self.child
}
}
/// Implements drop by killing the process group.
impl Drop for ManagedChild {
fn drop(&mut self) {
if!self.killed.load(Ordering::SeqCst) {
let _ = self.graceful_shutdown_sync();
}
}
}
|
deref
|
identifier_name
|
children.rs
|
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicBool, Ordering};
use std::{thread, time};
use nix::sys::signal;
use nix::unistd::getpgid;
use nix::unistd::Pid;
use tokio::process::{Child, Command};
// We keep this relatively low to start to encourage interactivity. If users end up needing longer graceful
// shutdown periods, we can expose it as a per-process setting.
const GRACEFUL_SHUTDOWN_MAX_WAIT_TIME: time::Duration = time::Duration::from_secs(3);
const GRACEFUL_SHUTDOWN_POLL_TIME: time::Duration = time::Duration::from_millis(50);
/// A child process running in its own PGID, with a drop implementation that will kill that
/// PGID.
///
/// TODO: If this API is useful, we should consider extending it to parented Nailgun processes
/// and to all local execution in general. It could also be adjusted for sending other posix
/// signals in sequence for https://github.com/pantsbuild/pants/issues/13230.
pub struct ManagedChild {
child: Child,
killed: AtomicBool,
}
impl ManagedChild {
pub fn spawn(mut command: Command) -> Result<Self, String> {
// Set `kill_on_drop` to encourage `tokio` to `wait` the process via its own "reaping"
// mechanism:
// see https://docs.rs/tokio/1.14.0/tokio/process/struct.Command.html#method.kill_on_drop
command.kill_on_drop(true);
// Adjust the Command to create its own PGID as it starts, to make it safe to kill the PGID
// later.
unsafe {
command.pre_exec(|| {
nix::unistd::setsid().map(|_pgid| ()).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("Could not create new pgid: {}", e),
)
})
});
};
// Then spawn.
let child = command
.spawn()
.map_err(|e| format!("Error executing interactive process: {}", e))?;
Ok(Self {
child,
killed: AtomicBool::new(false),
})
}
fn get_pgid(&self) -> Result<Pid, String> {
let pid = self.id().ok_or_else(|| "Process had no PID.".to_owned())?;
let pgid = getpgid(Some(Pid::from_raw(pid as i32)))
.map_err(|e| format!("Could not get process group id of child process: {}", e))?;
Ok(pgid)
}
/// Send a signal to the child process group.
fn signal_pg<T: Into<Option<signal::Signal>>>(&mut self, signal: T) -> Result<(), String> {
let pgid = self.get_pgid()?;
// the negative PGID will signal the entire process group.
signal::kill(Pid::from_raw(-pgid.as_raw()), signal)
.map_err(|e| format!("Failed to interrupt child process group: {}", e))?;
Ok(())
}
/// Check if the child has exited.
///
/// This returns true if the child has exited with any return code, or false
/// if the child has not yet exited. An error indicated a system error checking
/// the result of the child process, and does not necessarily indicate that
/// has exited or not.
fn check_child_has_exited(&mut self) -> Result<bool, String> {
self
.child
.try_wait()
.map(|o| o.is_some())
.map_err(|e| e.to_string())
}
/// Synchronously wait for the child to exit.
///
/// This method will repeatedly poll the child process until it exits, an error occurrs
/// or the timeout is reached.
///
/// A return value of Ok(true) indicates that the child has terminated, Ok(false) indicates
/// that we reached the max_wait_duration while waiting for the child to terminate.
///
/// This method *will* block the current thread but will do so for a bounded amount of time.
fn wait_for_child_exit_sync(
&mut self,
max_wait_duration: time::Duration,
) -> Result<bool, String> {
let deadline = time::Instant::now() + max_wait_duration;
while time::Instant::now() <= deadline {
if self.check_child_has_exited()? {
return Ok(true);
}
thread::sleep(GRACEFUL_SHUTDOWN_POLL_TIME);
}
// if we get here we have timed-out
Ok(false)
}
/// Attempt to gracefully shutdown the process.
///
/// This will send a SIGINT to the process and give it a chance to shutdown gracefully. If the
/// process does not respond to the SIGINT within a fixed interval, a SIGKILL will be sent.
///
/// This method *will* block the current thread but will do so for a bounded amount of time.
pub fn graceful_shutdown_sync(&mut self) -> Result<(), String> {
self.signal_pg(signal::Signal::SIGINT)?;
match self.wait_for_child_exit_sync(GRACEFUL_SHUTDOWN_MAX_WAIT_TIME) {
Ok(true) => {
// process was gracefully shutdown
self.killed.store(true, Ordering::SeqCst);
Ok(())
}
Ok(false) =>
|
Err(e) => {
log::warn!("An error occurred while waiting for graceful shutdown of process group ({}). Will try SIGKILL instead.", e);
self.kill_pgid()
}
}
}
/// Kill the process's unique PGID or return an error if we don't have a PID or cannot kill.
fn kill_pgid(&mut self) -> Result<(), String> {
self.signal_pg(signal::Signal::SIGKILL)?;
self.killed.store(true, Ordering::SeqCst);
Ok(())
}
}
impl Deref for ManagedChild {
type Target = Child;
fn deref(&self) -> &Child {
&self.child
}
}
impl DerefMut for ManagedChild {
fn deref_mut(&mut self) -> &mut Child {
&mut self.child
}
}
/// Implements drop by killing the process group.
impl Drop for ManagedChild {
fn drop(&mut self) {
if!self.killed.load(Ordering::SeqCst) {
let _ = self.graceful_shutdown_sync();
}
}
}
|
{
// we timed out waiting for the child to exit, so we need to kill it.
log::warn!(
"Timed out waiting for graceful shutdown of process group. Will try SIGKILL instead."
);
self.kill_pgid()
}
|
conditional_block
|
autogen.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Thrift compiler auto-generated support.
//!
//!
//! Types and functions used internally by the Thrift compiler's Rust plugin
//! to implement required functionality. Users should never have to use code
//! in this module directly.
use ::protocol::{TInputProtocol, TOutputProtocol};
/// Specifies the minimum functionality an auto-generated client should provide
/// to communicate with a Thrift server.
pub trait TThriftClient {
/// Returns the input protocol used to read serialized Thrift messages
/// from the Thrift server.
fn i_prot_mut(&mut self) -> &mut TInputProtocol;
/// Returns the output protocol used to write serialized Thrift messages
/// to the Thrift server.
fn o_prot_mut(&mut self) -> &mut TOutputProtocol;
/// Returns the sequence number of the last message written to the Thrift
|
/// simply because the Thrift protocol encodes sequence numbers as `i32` on
/// the wire.
fn sequence_number(&self) -> i32; // FIXME: consider returning a u32
/// Increments the sequence number, indicating that a message with that
/// number has been sent to the Thrift server.
fn increment_sequence_number(&mut self) -> i32;
}
|
/// server. Returns `0` if no messages have been written. Sequence
/// numbers should *never* be negative, and this method returns an `i32`
|
random_line_split
|
inline.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for inlining external documentation into the current AST.
use syntax::ast;
use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
use rustc::middle::ty;
use rustc::middle::subst;
use rustc::middle::stability;
use core::DocContext;
use doctree;
use clean;
use super::Clean;
/// Attempt to inline the definition of a local node id into this AST.
///
/// This function will fetch the definition of the id specified, and if it is
/// from another crate it will attempt to inline the documentation from the
/// other crate into this crate.
///
/// This is primarily used for `pub use` statements which are, in general,
/// implementation details. Inlining the documentation should help provide a
/// better experience when reading the documentation in this use case.
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
let tcx = match cx.tcx_opt() {
Some(tcx) => tcx,
None => return None,
};
let def = match tcx.def_map.borrow().get(&id) {
Some(d) => d.full_def(),
None => return None,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(cx, tcx, def).map(|vec| {
vec.into_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean(cx));
}
_ => {}
}
item
}).collect()
})
}
fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, false) => {
// If this function is a tuple struct constructor, we just skip it
record_extern_fqn(cx, did, clean::TypeFunction);
clean::FunctionItem(build_external_function(cx, tcx, did))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).into_iter());
clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did, false) => {
record_extern_fqn(cx, did, clean::TypeTypedef);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
def::DefTy(did, true) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
def::DefVariant(..) => return Some(Vec::new()),
def::DefMod(did) => {
record_extern_fqn(cx, did, clean::TypeModule);
clean::ModuleItem(build_module(cx, tcx, did))
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
def::DefConst(did) => {
record_extern_fqn(cx, did, clean::TypeConst);
clean::ConstantItem(build_const(cx, tcx, did))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Attribute> {
let attrs = csearch::get_item_attrs(&tcx.sess.cstore, did);
attrs.into_iter().map(|a| a.clean(cx)).collect()
}
/// Record an external fully qualified name in the external_paths cache.
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
match cx.tcx_opt() {
Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
None => {}
}
}
pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean(cx);
let predicates = ty::lookup_predicates(tcx, did);
clean::Trait {
unsafety: def.unsafety,
generics: (&def.generics, &predicates, subst::TypeSpace).clean(cx),
items: trait_items,
bounds: vec![], // supertraits can be found in the list of predicates
}
}
fn build_external_function(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
let (decl, style) = match t.ty.sty {
ty::ty_bare_fn(_, ref f) => ((did, &f.sig).clean(cx), f.unsafety),
_ => panic!("bad function"),
};
let predicates = ty::lookup_predicates(tcx, did);
clean::Function {
decl: decl,
generics: (&t.generics, &predicates, subst::FnSpace).clean(cx),
unsafety: style,
}
}
fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
let fields = ty::lookup_struct_fields(tcx, did);
clean::Struct {
struct_type: match &*fields {
[] => doctree::Unit,
[ref f] if f.name == unnamed_field.name => doctree::Newtype,
[ref f,..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
fields: fields.clean(cx),
fields_stripped: false,
}
}
fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
match t.ty.sty {
ty::ty_enum(edid, _) if!csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
variants_stripped: false,
variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
type_: t.ty.clean(cx),
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
})
}
fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().get(&did) {
None => {}
Some(i) => {
impls.extend(i.iter().map(|&did| { build_impl(cx, tcx, did) }));
}
}
// If this is the first time we've inlined something from this crate, then
// we inline *all* impls from the crate into this crate. Note that there's
// currently no way for us to filter this based on type, and we likely need
// many impls for a variety of reasons.
//
// Primarily, the impls will be used to populate the documentation for this
// type being inlined, but impls can also be used when generating
// documentation for primitives (no way to find those specifically).
if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
did.krate,
|def, _, _| {
populate_impls(cx, tcx, def, &mut impls)
});
fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
decoder::DlImpl(did) => impls.push(build_impl(cx, tcx, did)),
decoder::DlDef(def::DefMod(did)) => {
csearch::each_child_of_item(&tcx.sess.cstore,
did,
|def, _, _| {
populate_impls(cx, tcx, def, impls)
})
}
_ => {}
}
}
}
impls.into_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &DocContext,
tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if!cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
let attrs = load_attrs(cx, tcx, did);
let associated_trait = csearch::get_impl_trait(tcx, did);
if let Some(ref t) = associated_trait {
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline
let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
}
// If this is a defaulted impl, then bail out early here
if csearch::is_default_impl(&tcx.sess.cstore, did) {
return Some(clean::Item {
inner: clean::DefaultImplItem(clean::DefaultImpl {
// FIXME: this should be decoded
unsafety: ast::Unsafety::Normal,
trait_: match associated_trait.as_ref().unwrap().clean(cx) {
clean::TraitBound(polyt, _) => polyt.trait_,
clean::RegionBound(..) => unreachable!(),
},
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
}
let predicates = ty::lookup_predicates(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
let impl_item = ty::impl_or_trait_item(tcx, did);
match impl_item {
ty::MethodTraitItem(method) => {
if method.vis!= ast::Public && associated_trait.is_none() {
return None
}
if method.provided_source.is_some() {
return None
}
let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
unsafety, decl, self_, generics, abi
}) => {
clean::MethodItem(clean::Method {
unsafety: unsafety,
decl: decl,
self_: self_,
generics: generics,
abi: abi
})
}
_ => panic!("not a tymethod"),
};
Some(item)
}
ty::TypeTraitItem(ref assoc_ty) => {
let did = assoc_ty.def_id;
let type_scheme = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
// Not sure the choice of ParamSpace actually matters here, because an
// associated type won't have generics on the LHS
let typedef = (type_scheme, predicates, subst::ParamSpace::TypeSpace).clean(cx);
Some(clean::Item {
name: Some(assoc_ty.name.clean(cx)),
inner: clean::TypedefItem(typedef),
source: clean::Span::empty(),
attrs: vec![],
visibility: None,
stability: stability::lookup(tcx, did).clean(cx),
def_id: did
})
}
}
}).collect();
let polarity = csearch::get_impl_polarity(tcx, did);
let ty = ty::lookup_item_type(tcx, did);
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
unsafety: ast::Unsafety::Normal, // FIXME: this should be decoded
derived: clean::detect_derived(&attrs),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(polyt, _) => polyt.trait_,
clean::RegionBound(..) => unreachable!(),
}
}),
for_: ty.ty.clean(cx),
generics: (&ty.generics, &predicates, subst::TypeSpace).clean(cx),
items: trait_items,
polarity: polarity.map(|p| { p.clean(cx) }),
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
fn is_doc_hidden(a: &clean::Attribute) -> bool {
match *a {
clean::List(ref name, ref inner) if *name == "doc" => {
inner.iter().any(|a| {
match *a {
clean::Word(ref s) => *s == "hidden",
_ => false,
}
})
}
_ => false
}
}
}
fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
items: items,
is_crate: false,
};
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
decoder::DlDef(def::DefForeignMod(did)) => {
fill_in(cx, tcx, did, items);
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
Some(i) => items.extend(i.into_iter()),
None => {}
}
}
decoder::DlDef(..) => {}
// All impls were inlined above
decoder::DlImpl(..) => {}
decoder::DlField => panic!("unimplemented field"),
}
});
}
}
fn build_const(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Constant {
use rustc::middle::const_eval;
use syntax::print::pprust;
let expr = const_eval::lookup_const_by_id(tcx, did).unwrap_or_else(|| {
panic!("expected lookup_const_by_id to succeed for {:?}", did);
});
debug!("converting constant expr {:?} to snippet", expr);
let sn = pprust::expr_to_string(expr);
debug!("got snippet {}", sn);
clean::Constant {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
expr: sn
}
}
fn
|
(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
}
|
build_static
|
identifier_name
|
inline.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for inlining external documentation into the current AST.
use syntax::ast;
use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
use rustc::middle::ty;
use rustc::middle::subst;
use rustc::middle::stability;
use core::DocContext;
use doctree;
use clean;
use super::Clean;
/// Attempt to inline the definition of a local node id into this AST.
///
/// This function will fetch the definition of the id specified, and if it is
/// from another crate it will attempt to inline the documentation from the
/// other crate into this crate.
///
/// This is primarily used for `pub use` statements which are, in general,
/// implementation details. Inlining the documentation should help provide a
/// better experience when reading the documentation in this use case.
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
let tcx = match cx.tcx_opt() {
Some(tcx) => tcx,
None => return None,
};
let def = match tcx.def_map.borrow().get(&id) {
Some(d) => d.full_def(),
None => return None,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(cx, tcx, def).map(|vec| {
vec.into_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean(cx));
}
_ => {}
}
item
}).collect()
})
}
fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, false) => {
// If this function is a tuple struct constructor, we just skip it
record_extern_fqn(cx, did, clean::TypeFunction);
clean::FunctionItem(build_external_function(cx, tcx, did))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).into_iter());
clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did, false) => {
record_extern_fqn(cx, did, clean::TypeTypedef);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
def::DefTy(did, true) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
def::DefVariant(..) => return Some(Vec::new()),
def::DefMod(did) => {
record_extern_fqn(cx, did, clean::TypeModule);
clean::ModuleItem(build_module(cx, tcx, did))
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
def::DefConst(did) => {
record_extern_fqn(cx, did, clean::TypeConst);
clean::ConstantItem(build_const(cx, tcx, did))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Attribute> {
let attrs = csearch::get_item_attrs(&tcx.sess.cstore, did);
attrs.into_iter().map(|a| a.clean(cx)).collect()
}
/// Record an external fully qualified name in the external_paths cache.
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
match cx.tcx_opt() {
Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
None => {}
}
}
pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean(cx);
let predicates = ty::lookup_predicates(tcx, did);
clean::Trait {
unsafety: def.unsafety,
generics: (&def.generics, &predicates, subst::TypeSpace).clean(cx),
items: trait_items,
bounds: vec![], // supertraits can be found in the list of predicates
}
}
fn build_external_function(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
let (decl, style) = match t.ty.sty {
ty::ty_bare_fn(_, ref f) => ((did, &f.sig).clean(cx), f.unsafety),
_ => panic!("bad function"),
};
let predicates = ty::lookup_predicates(tcx, did);
clean::Function {
decl: decl,
generics: (&t.generics, &predicates, subst::FnSpace).clean(cx),
unsafety: style,
}
}
fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
let fields = ty::lookup_struct_fields(tcx, did);
clean::Struct {
struct_type: match &*fields {
[] => doctree::Unit,
[ref f] if f.name == unnamed_field.name => doctree::Newtype,
[ref f,..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
fields: fields.clean(cx),
fields_stripped: false,
}
}
fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
match t.ty.sty {
ty::ty_enum(edid, _) if!csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
variants_stripped: false,
variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
type_: t.ty.clean(cx),
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
})
}
fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().get(&did) {
None => {}
Some(i) => {
impls.extend(i.iter().map(|&did| { build_impl(cx, tcx, did) }));
}
}
// If this is the first time we've inlined something from this crate, then
// we inline *all* impls from the crate into this crate. Note that there's
// currently no way for us to filter this based on type, and we likely need
// many impls for a variety of reasons.
//
// Primarily, the impls will be used to populate the documentation for this
// type being inlined, but impls can also be used when generating
// documentation for primitives (no way to find those specifically).
if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
did.krate,
|def, _, _| {
populate_impls(cx, tcx, def, &mut impls)
});
fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
decoder::DlImpl(did) => impls.push(build_impl(cx, tcx, did)),
decoder::DlDef(def::DefMod(did)) => {
csearch::each_child_of_item(&tcx.sess.cstore,
did,
|def, _, _| {
populate_impls(cx, tcx, def, impls)
})
}
_ => {}
}
}
}
impls.into_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &DocContext,
tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if!cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
let attrs = load_attrs(cx, tcx, did);
let associated_trait = csearch::get_impl_trait(tcx, did);
if let Some(ref t) = associated_trait {
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline
let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
}
// If this is a defaulted impl, then bail out early here
if csearch::is_default_impl(&tcx.sess.cstore, did) {
return Some(clean::Item {
inner: clean::DefaultImplItem(clean::DefaultImpl {
// FIXME: this should be decoded
unsafety: ast::Unsafety::Normal,
trait_: match associated_trait.as_ref().unwrap().clean(cx) {
clean::TraitBound(polyt, _) => polyt.trait_,
clean::RegionBound(..) => unreachable!(),
},
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
}
let predicates = ty::lookup_predicates(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
let impl_item = ty::impl_or_trait_item(tcx, did);
match impl_item {
ty::MethodTraitItem(method) => {
if method.vis!= ast::Public && associated_trait.is_none() {
return None
}
if method.provided_source.is_some() {
return None
}
let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
unsafety, decl, self_, generics, abi
}) => {
clean::MethodItem(clean::Method {
unsafety: unsafety,
decl: decl,
self_: self_,
generics: generics,
abi: abi
})
}
_ => panic!("not a tymethod"),
};
Some(item)
}
ty::TypeTraitItem(ref assoc_ty) => {
let did = assoc_ty.def_id;
let type_scheme = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
// Not sure the choice of ParamSpace actually matters here, because an
// associated type won't have generics on the LHS
let typedef = (type_scheme, predicates, subst::ParamSpace::TypeSpace).clean(cx);
Some(clean::Item {
name: Some(assoc_ty.name.clean(cx)),
inner: clean::TypedefItem(typedef),
source: clean::Span::empty(),
attrs: vec![],
visibility: None,
stability: stability::lookup(tcx, did).clean(cx),
def_id: did
})
}
}
}).collect();
let polarity = csearch::get_impl_polarity(tcx, did);
let ty = ty::lookup_item_type(tcx, did);
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
unsafety: ast::Unsafety::Normal, // FIXME: this should be decoded
derived: clean::detect_derived(&attrs),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(polyt, _) => polyt.trait_,
clean::RegionBound(..) => unreachable!(),
}
}),
for_: ty.ty.clean(cx),
generics: (&ty.generics, &predicates, subst::TypeSpace).clean(cx),
items: trait_items,
polarity: polarity.map(|p| { p.clean(cx) }),
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
fn is_doc_hidden(a: &clean::Attribute) -> bool {
match *a {
clean::List(ref name, ref inner) if *name == "doc" => {
inner.iter().any(|a| {
match *a {
clean::Word(ref s) => *s == "hidden",
_ => false,
}
})
}
_ => false
}
}
}
fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
items: items,
is_crate: false,
};
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
decoder::DlDef(def::DefForeignMod(did)) => {
fill_in(cx, tcx, did, items);
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
Some(i) => items.extend(i.into_iter()),
None => {}
}
}
decoder::DlDef(..) => {}
// All impls were inlined above
decoder::DlImpl(..) => {}
decoder::DlField => panic!("unimplemented field"),
}
});
}
}
fn build_const(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Constant {
use rustc::middle::const_eval;
use syntax::print::pprust;
let expr = const_eval::lookup_const_by_id(tcx, did).unwrap_or_else(|| {
panic!("expected lookup_const_by_id to succeed for {:?}", did);
});
debug!("converting constant expr {:?} to snippet", expr);
let sn = pprust::expr_to_string(expr);
debug!("got snippet {}", sn);
clean::Constant {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
expr: sn
|
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
}
|
}
}
fn build_static(cx: &DocContext, tcx: &ty::ctxt,
|
random_line_split
|
inline.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for inlining external documentation into the current AST.
use syntax::ast;
use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
use rustc::middle::ty;
use rustc::middle::subst;
use rustc::middle::stability;
use core::DocContext;
use doctree;
use clean;
use super::Clean;
/// Attempt to inline the definition of a local node id into this AST.
///
/// This function will fetch the definition of the id specified, and if it is
/// from another crate it will attempt to inline the documentation from the
/// other crate into this crate.
///
/// This is primarily used for `pub use` statements which are, in general,
/// implementation details. Inlining the documentation should help provide a
/// better experience when reading the documentation in this use case.
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
let tcx = match cx.tcx_opt() {
Some(tcx) => tcx,
None => return None,
};
let def = match tcx.def_map.borrow().get(&id) {
Some(d) => d.full_def(),
None => return None,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(cx, tcx, def).map(|vec| {
vec.into_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean(cx));
}
_ => {}
}
item
}).collect()
})
}
fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, false) => {
// If this function is a tuple struct constructor, we just skip it
record_extern_fqn(cx, did, clean::TypeFunction);
clean::FunctionItem(build_external_function(cx, tcx, did))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).into_iter());
clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did, false) => {
record_extern_fqn(cx, did, clean::TypeTypedef);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
def::DefTy(did, true) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
def::DefVariant(..) => return Some(Vec::new()),
def::DefMod(did) => {
record_extern_fqn(cx, did, clean::TypeModule);
clean::ModuleItem(build_module(cx, tcx, did))
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
def::DefConst(did) => {
record_extern_fqn(cx, did, clean::TypeConst);
clean::ConstantItem(build_const(cx, tcx, did))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Attribute> {
let attrs = csearch::get_item_attrs(&tcx.sess.cstore, did);
attrs.into_iter().map(|a| a.clean(cx)).collect()
}
/// Record an external fully qualified name in the external_paths cache.
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
match cx.tcx_opt() {
Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
None => {}
}
}
pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Trait
|
fn build_external_function(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
let (decl, style) = match t.ty.sty {
ty::ty_bare_fn(_, ref f) => ((did, &f.sig).clean(cx), f.unsafety),
_ => panic!("bad function"),
};
let predicates = ty::lookup_predicates(tcx, did);
clean::Function {
decl: decl,
generics: (&t.generics, &predicates, subst::FnSpace).clean(cx),
unsafety: style,
}
}
fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
let fields = ty::lookup_struct_fields(tcx, did);
clean::Struct {
struct_type: match &*fields {
[] => doctree::Unit,
[ref f] if f.name == unnamed_field.name => doctree::Newtype,
[ref f,..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
fields: fields.clean(cx),
fields_stripped: false,
}
}
fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
match t.ty.sty {
ty::ty_enum(edid, _) if!csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
variants_stripped: false,
variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
type_: t.ty.clean(cx),
generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx),
})
}
fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().get(&did) {
None => {}
Some(i) => {
impls.extend(i.iter().map(|&did| { build_impl(cx, tcx, did) }));
}
}
// If this is the first time we've inlined something from this crate, then
// we inline *all* impls from the crate into this crate. Note that there's
// currently no way for us to filter this based on type, and we likely need
// many impls for a variety of reasons.
//
// Primarily, the impls will be used to populate the documentation for this
// type being inlined, but impls can also be used when generating
// documentation for primitives (no way to find those specifically).
if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
did.krate,
|def, _, _| {
populate_impls(cx, tcx, def, &mut impls)
});
fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
decoder::DlImpl(did) => impls.push(build_impl(cx, tcx, did)),
decoder::DlDef(def::DefMod(did)) => {
csearch::each_child_of_item(&tcx.sess.cstore,
did,
|def, _, _| {
populate_impls(cx, tcx, def, impls)
})
}
_ => {}
}
}
}
impls.into_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &DocContext,
tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if!cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
let attrs = load_attrs(cx, tcx, did);
let associated_trait = csearch::get_impl_trait(tcx, did);
if let Some(ref t) = associated_trait {
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline
let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
}
// If this is a defaulted impl, then bail out early here
if csearch::is_default_impl(&tcx.sess.cstore, did) {
return Some(clean::Item {
inner: clean::DefaultImplItem(clean::DefaultImpl {
// FIXME: this should be decoded
unsafety: ast::Unsafety::Normal,
trait_: match associated_trait.as_ref().unwrap().clean(cx) {
clean::TraitBound(polyt, _) => polyt.trait_,
clean::RegionBound(..) => unreachable!(),
},
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
}
let predicates = ty::lookup_predicates(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
let impl_item = ty::impl_or_trait_item(tcx, did);
match impl_item {
ty::MethodTraitItem(method) => {
if method.vis!= ast::Public && associated_trait.is_none() {
return None
}
if method.provided_source.is_some() {
return None
}
let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
unsafety, decl, self_, generics, abi
}) => {
clean::MethodItem(clean::Method {
unsafety: unsafety,
decl: decl,
self_: self_,
generics: generics,
abi: abi
})
}
_ => panic!("not a tymethod"),
};
Some(item)
}
ty::TypeTraitItem(ref assoc_ty) => {
let did = assoc_ty.def_id;
let type_scheme = ty::lookup_item_type(tcx, did);
let predicates = ty::lookup_predicates(tcx, did);
// Not sure the choice of ParamSpace actually matters here, because an
// associated type won't have generics on the LHS
let typedef = (type_scheme, predicates, subst::ParamSpace::TypeSpace).clean(cx);
Some(clean::Item {
name: Some(assoc_ty.name.clean(cx)),
inner: clean::TypedefItem(typedef),
source: clean::Span::empty(),
attrs: vec![],
visibility: None,
stability: stability::lookup(tcx, did).clean(cx),
def_id: did
})
}
}
}).collect();
let polarity = csearch::get_impl_polarity(tcx, did);
let ty = ty::lookup_item_type(tcx, did);
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
unsafety: ast::Unsafety::Normal, // FIXME: this should be decoded
derived: clean::detect_derived(&attrs),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(polyt, _) => polyt.trait_,
clean::RegionBound(..) => unreachable!(),
}
}),
for_: ty.ty.clean(cx),
generics: (&ty.generics, &predicates, subst::TypeSpace).clean(cx),
items: trait_items,
polarity: polarity.map(|p| { p.clean(cx) }),
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
fn is_doc_hidden(a: &clean::Attribute) -> bool {
match *a {
clean::List(ref name, ref inner) if *name == "doc" => {
inner.iter().any(|a| {
match *a {
clean::Word(ref s) => *s == "hidden",
_ => false,
}
})
}
_ => false
}
}
}
fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
items: items,
is_crate: false,
};
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
decoder::DlDef(def::DefForeignMod(did)) => {
fill_in(cx, tcx, did, items);
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
Some(i) => items.extend(i.into_iter()),
None => {}
}
}
decoder::DlDef(..) => {}
// All impls were inlined above
decoder::DlImpl(..) => {}
decoder::DlField => panic!("unimplemented field"),
}
});
}
}
fn build_const(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Constant {
use rustc::middle::const_eval;
use syntax::print::pprust;
let expr = const_eval::lookup_const_by_id(tcx, did).unwrap_or_else(|| {
panic!("expected lookup_const_by_id to succeed for {:?}", did);
});
debug!("converting constant expr {:?} to snippet", expr);
let sn = pprust::expr_to_string(expr);
debug!("got snippet {}", sn);
clean::Constant {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
expr: sn
}
}
fn build_static(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
}
|
{
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean(cx);
let predicates = ty::lookup_predicates(tcx, did);
clean::Trait {
unsafety: def.unsafety,
generics: (&def.generics, &predicates, subst::TypeSpace).clean(cx),
items: trait_items,
bounds: vec![], // supertraits can be found in the list of predicates
}
}
|
identifier_body
|
login.rs
|
use std::io::{Read, Write};
use std::io;
use std::fs::File;
use std::error::Error;
use hyper;
use hyper::net::Fresh;
use hyper::server::{Server, Request, Response};
use hyper::uri::RequestUri;
use hyper::Url;
use hyper::header::{Headers,ContentType,Location,SetCookie};
use hyper::header;
use cookie::Cookie;
use mime::Mime;
use url::SchemeData::Relative;
use rustc_serialize::json;
use rustc_serialize::json::ToJson;
use std::path::Path;
use std::fs::PathExt;
use conduit_mime_types::Types;
use std::str::FromStr;
use std::env;
use websocket::{client,Client,stream,Message,Sender,Receiver};
use std::thread;
use cbor;
use cbor::ToCbor;
use value::Value;
use server::Event;
use relation::Change;
// The (increasingly misnamed) login server is responsible for:
// * handling authentication via authrocket
// * serving static files for the editor and clients
// TODO needs review
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Session {
client: String,
id: String,
user_id: String,
object: String,
created_at: f64,
expires_at: f64,
ip: String,
user: User,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct User {
custom: Custom,
id: String,
realm_id: String,
username: String,
state: String,
user_type: String,
reference: Option<String>,
name: String,
email: String,
object: String,
last_login_at: f64,
last_login_on: f64,
created_at: f64,
first_name: Option<String>,
last_name: Option<String>,
credentials: Vec<Credential>,
membership_count: f64,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Custom;
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Credential {
id: String,
credential_type: String,
object: String,
}
pub fn run()
|
fn read_file_bytes(filename: &str) -> Vec<u8> {
let mut file = File::open(&filename).unwrap();
let mut contents: Vec<u8> = Vec::new();
file.read_to_end(&mut contents).unwrap();
contents
}
fn file_exists(path: &str) -> bool {
let path_obj = Path::new(path);
path_obj.is_file() && path_obj.exists()
}
fn serve_local_or_file(mut res: Response<Fresh>, path: &Vec<String>, default_file: &str) -> io::Result<()> {
let mime_types = Types::new().unwrap();
let local_path = path[1..].iter().fold("../ui".to_owned(), |end, cur| end + "/" + cur);
let file;
let file_path: &str;
if file_exists(&local_path) {
file = read_file_bytes(&local_path);
file_path = &local_path;
} else {
let content = read_file_bytes(default_file);
//@HACK, @TODO: absolutize the html file so that it looks in the right place
//this allows us to use file:/// locally, while still doing the right thing
//when we're hooked up to the server
let mut str = String::from_utf8(content).unwrap();
str = str.replace("href=\"", "href=\"/app/");
str = str.replace("src=\"", "src=\"/app/");
str = str.replace("/app/http", "http");
file = str.as_bytes().to_owned();
file_path = default_file;
}
let mime: Mime = Mime::from_str(mime_types.mime_for_path(Path::new(&file_path))).unwrap();
res.headers_mut().set(ContentType(mime));
let mut res = try!(res.start());
try!(res.write_all(&file));
try!(res.end());
Ok(())
}
pub fn get_user_id(cookies: Option<&header::Cookie>) -> Option<String> {
match cookies {
Some(cookies) => {
match cookies.iter().find(|cookie| cookie.name == "userid") {
Some(user_id) => Some(user_id.value.clone()),
None => None,
}
},
None => None,
}
}
fn login(req: Request, mut res: Response<Fresh>) {
// Don't use a port if we're running in local mode
let mut port = ":8000";
for argument in env::args() {
match &*argument {
"local" => {
port = "";
},
_ => continue,
}
}
match (&req.method.clone(), &req.uri.clone()) {
(&hyper::Get, &RequestUri::AbsolutePath(ref relative_path)) => {
// Form a url from the path
let absolute_path = "http://localhost".to_string() + relative_path;
let url = Url::parse(&*absolute_path).unwrap();
// Get the file name
let scheme_data = &url.scheme_data;
let requested_file = match scheme_data {
&Relative(ref rsd) => rsd.path[0].clone(),
_ => panic!("Expected relative path"),
};
let path_info = match scheme_data {
&Relative(ref rsd) => rsd,
_ => panic!("Expected relative path"),
};
// Parse the query string
let query_pairs = &url.query_pairs();
// Handle login
match &*requested_file {
"app.html" | "app" => {
let result = serve_local_or_file(res, &path_info.path, "../ui/app.html");
if let Err(error) = result {
println!("Warning: serve error {:?}", error);
}
},
"editor.html" | "editor" => {
let result = serve_local_or_file(res, &path_info.path, "../ui/editor.html");
if let Err(error) = result {
println!("Warning: serve error {:?}", error);
}
},
"login.html" => {
println!("Authenticating User");
let referer = format!("{}",req.headers.get::<hyper::header::Referer>().unwrap());
let referer_url = Url::parse(&*referer).unwrap();
let pairs = query_pairs.clone().unwrap();
match &pairs[..] {
[(_, ref page), (ref token_type, ref token),..] if token_type.clone() == "token".to_string() => {
// We have a login token, now to authenticate
let client = hyper::client::Client::new();
let api_call = "https://api-e1.authrocket.com/v1/sessions/".to_string() + token;
let req = client.request(hyper::method::Method::Get,&*api_call);
// Set the appropriate headers for authorocket
let mut headers = Headers::new();
let json: Mime = "application/json".parse().unwrap();
let content_type = ContentType(json);
headers.set_raw("X-Authrocket-Account",vec!["org_0vC7wPw9XphPGQnSqYB6bz".to_string().into_bytes()]);
headers.set_raw("X-Authrocket-Api-Key",vec!["key_jtnCRWxQvDD0p5HATR9RBIe4WnxnwV6pWNzwmZQLnSZ".to_string().into_bytes()]);
headers.set_raw("X-Authrocket-Realm",vec!["rl_0vC7wd03CqwhpK7kT8fvAc".to_string().into_bytes()]);
headers.set_raw("Accept",vec!["application/json".to_string().into_bytes()]);
headers.set(content_type);
// Send the request and receive a response with session data
let mut client_res = req.headers(headers).send().unwrap();
match client_res.status_raw() {
&hyper::http::RawStatus(200,_) => {
let mut body = String::new();
client_res.read_to_string(&mut body).unwrap();
let session_data: Session = json::decode(&body).unwrap();
println!("Welcome to Eve, {:?}!",session_data.user.username);
println!("Login Successful. Redirecting to user area.");
// Connect to the Eve runtime and add the user to the eveusers table
let ws_result = open_websocket("ws://0.0.0.0:2794");
match ws_result {
// If things went okay, redirect to the Eve UI
Ok(mut sender) => {
// Form the response
*res.status_mut() = hyper::status::StatusCode::PermanentRedirect;
// Form the response headers
let mut headers = Headers::new();
let redirect_url = referer_url.scheme.clone() + "://" + referer_url.domain().unwrap().clone() + port + "/" + page;
let location = Location(redirect_url);
let user_cookie = Cookie::new("userid".to_string(),session_data.user.id.clone());
let cookies = SetCookie(vec![user_cookie]);
headers.set(location);
headers.set(cookies);
*res.headers_mut() = headers;
// Create eveusers table and insert the new user
let change = ("eveusers".to_owned(),
Change{
fields: vec!["eveusers: id".to_owned(), "eveusers: username".to_owned()],
insert: vec![vec![
Value::String(session_data.user.id.clone()),
Value::String(session_data.user.username.clone())
]],
remove: vec![],
});
let event = Event{changes: vec![change], commands: vec![]};
send_event(event, &mut sender);
let _ = sender.send_message(Message::Close(None));
}
// Otherwise, throw an error... maybe redirect to a special page.
Err(e) => {
println!("ERROR: Had trouble connecting to the Eve runtime: {}. Is the server running?",e);
*res.status_mut() = hyper::status::StatusCode::NotFound;
panic!("Oh no!");
//serve_file("404.html",res);
}
}
println!("Login complete.");
}
_ => {
println!("ERROR: Could not authenticate user with token {}",token);
*res.status_mut() = hyper::status::StatusCode::Forbidden;
panic!("Oh no!");
//serve_file("404.html",res);
}
};
},
_ => panic!("Oh no!"), //serve_file("404.html",res),
}
},
"logout.html" => {
println!("Logging out...");
let user_id = get_user_id(req.headers.get::<hyper::header::Cookie>());
println!("{:?}",user_id);
},
"favicon.ico" => (),
other => panic!("Cannot serve {}",other), //serve_file(&*requested_file,res),
};
}
_ => panic!("Oh no!"),
}
}
pub fn open_websocket(url_string: &str) -> Result<client::sender::Sender<stream::WebSocketStream>,String> {
//let mut context = SslContext::new(SslMethod::Tlsv1).unwrap();
//let _ = context.set_certificate_file(&(Path::new("server.crt")), X509FileType::PEM);
//let _ = context.set_private_key_file(&(Path::new("server.key")), X509FileType::PEM);
let url = Url::parse(url_string).unwrap();
println!("Connecting to {}", url);
let request = match Client::connect(url) {
Ok(t) => t,
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
let response = match request.send() {
Ok(t) => t,
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
match response.validate() {
Ok(_) => println!("Response valid. Start sending/receiving..."),
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
let (sender, mut receiver) = response.begin().split();
thread::spawn(move || {
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Text(_) => {
//let json = Json::from_str(&text).unwrap();
//let event: Event = FromJson::from_json(&json);
},
Message::Close(_) => {
println!("Received close message");
return;
}
_ => println!("Unknown message: {:?}", message)
}
}
});
Ok(sender)
}
pub fn send_event(event: Event, sender: &mut client::sender::Sender<stream::WebSocketStream>) {
let mut e = cbor::Encoder::from_memory();
let json = event.to_json();
let cbor = json.to_cbor();
e.encode(vec![cbor]).unwrap();
sender.send_message(Message::Binary(e.into_bytes())).unwrap();
}
|
{
// TODO high thread-count is a workaround for https://github.com/hyperium/hyper/issues/368
Server::http("0.0.0.0:8080").unwrap().handle_threads(login, 100).unwrap();
}
|
identifier_body
|
login.rs
|
use std::io::{Read, Write};
use std::io;
use std::fs::File;
use std::error::Error;
use hyper;
use hyper::net::Fresh;
use hyper::server::{Server, Request, Response};
use hyper::uri::RequestUri;
use hyper::Url;
use hyper::header::{Headers,ContentType,Location,SetCookie};
use hyper::header;
use cookie::Cookie;
use mime::Mime;
use url::SchemeData::Relative;
use rustc_serialize::json;
use rustc_serialize::json::ToJson;
use std::path::Path;
use std::fs::PathExt;
use conduit_mime_types::Types;
use std::str::FromStr;
use std::env;
use websocket::{client,Client,stream,Message,Sender,Receiver};
use std::thread;
use cbor;
use cbor::ToCbor;
use value::Value;
use server::Event;
use relation::Change;
// The (increasingly misnamed) login server is responsible for:
// * handling authentication via authrocket
// * serving static files for the editor and clients
// TODO needs review
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Session {
client: String,
id: String,
user_id: String,
object: String,
created_at: f64,
expires_at: f64,
ip: String,
user: User,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct User {
custom: Custom,
id: String,
realm_id: String,
username: String,
state: String,
user_type: String,
reference: Option<String>,
name: String,
email: String,
object: String,
last_login_at: f64,
last_login_on: f64,
created_at: f64,
first_name: Option<String>,
last_name: Option<String>,
credentials: Vec<Credential>,
membership_count: f64,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Custom;
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Credential {
id: String,
credential_type: String,
object: String,
}
pub fn run() {
// TODO high thread-count is a workaround for https://github.com/hyperium/hyper/issues/368
Server::http("0.0.0.0:8080").unwrap().handle_threads(login, 100).unwrap();
}
fn read_file_bytes(filename: &str) -> Vec<u8> {
let mut file = File::open(&filename).unwrap();
let mut contents: Vec<u8> = Vec::new();
file.read_to_end(&mut contents).unwrap();
contents
}
fn file_exists(path: &str) -> bool {
let path_obj = Path::new(path);
path_obj.is_file() && path_obj.exists()
}
fn serve_local_or_file(mut res: Response<Fresh>, path: &Vec<String>, default_file: &str) -> io::Result<()> {
let mime_types = Types::new().unwrap();
let local_path = path[1..].iter().fold("../ui".to_owned(), |end, cur| end + "/" + cur);
let file;
let file_path: &str;
if file_exists(&local_path) {
file = read_file_bytes(&local_path);
file_path = &local_path;
} else {
let content = read_file_bytes(default_file);
//@HACK, @TODO: absolutize the html file so that it looks in the right place
//this allows us to use file:/// locally, while still doing the right thing
//when we're hooked up to the server
let mut str = String::from_utf8(content).unwrap();
str = str.replace("href=\"", "href=\"/app/");
str = str.replace("src=\"", "src=\"/app/");
str = str.replace("/app/http", "http");
file = str.as_bytes().to_owned();
file_path = default_file;
}
let mime: Mime = Mime::from_str(mime_types.mime_for_path(Path::new(&file_path))).unwrap();
res.headers_mut().set(ContentType(mime));
let mut res = try!(res.start());
try!(res.write_all(&file));
try!(res.end());
Ok(())
}
pub fn get_user_id(cookies: Option<&header::Cookie>) -> Option<String> {
match cookies {
Some(cookies) => {
match cookies.iter().find(|cookie| cookie.name == "userid") {
Some(user_id) => Some(user_id.value.clone()),
None => None,
}
},
None => None,
}
}
fn login(req: Request, mut res: Response<Fresh>) {
// Don't use a port if we're running in local mode
let mut port = ":8000";
for argument in env::args() {
match &*argument {
"local" => {
port = "";
},
_ => continue,
}
}
match (&req.method.clone(), &req.uri.clone()) {
(&hyper::Get, &RequestUri::AbsolutePath(ref relative_path)) => {
// Form a url from the path
let absolute_path = "http://localhost".to_string() + relative_path;
let url = Url::parse(&*absolute_path).unwrap();
// Get the file name
let scheme_data = &url.scheme_data;
let requested_file = match scheme_data {
&Relative(ref rsd) => rsd.path[0].clone(),
_ => panic!("Expected relative path"),
};
let path_info = match scheme_data {
&Relative(ref rsd) => rsd,
_ => panic!("Expected relative path"),
};
// Parse the query string
let query_pairs = &url.query_pairs();
// Handle login
match &*requested_file {
"app.html" | "app" => {
let result = serve_local_or_file(res, &path_info.path, "../ui/app.html");
if let Err(error) = result {
println!("Warning: serve error {:?}", error);
}
},
"editor.html" | "editor" => {
let result = serve_local_or_file(res, &path_info.path, "../ui/editor.html");
if let Err(error) = result {
println!("Warning: serve error {:?}", error);
}
},
"login.html" => {
|
let referer = format!("{}",req.headers.get::<hyper::header::Referer>().unwrap());
let referer_url = Url::parse(&*referer).unwrap();
let pairs = query_pairs.clone().unwrap();
match &pairs[..] {
[(_, ref page), (ref token_type, ref token),..] if token_type.clone() == "token".to_string() => {
// We have a login token, now to authenticate
let client = hyper::client::Client::new();
let api_call = "https://api-e1.authrocket.com/v1/sessions/".to_string() + token;
let req = client.request(hyper::method::Method::Get,&*api_call);
// Set the appropriate headers for authorocket
let mut headers = Headers::new();
let json: Mime = "application/json".parse().unwrap();
let content_type = ContentType(json);
headers.set_raw("X-Authrocket-Account",vec!["org_0vC7wPw9XphPGQnSqYB6bz".to_string().into_bytes()]);
headers.set_raw("X-Authrocket-Api-Key",vec!["key_jtnCRWxQvDD0p5HATR9RBIe4WnxnwV6pWNzwmZQLnSZ".to_string().into_bytes()]);
headers.set_raw("X-Authrocket-Realm",vec!["rl_0vC7wd03CqwhpK7kT8fvAc".to_string().into_bytes()]);
headers.set_raw("Accept",vec!["application/json".to_string().into_bytes()]);
headers.set(content_type);
// Send the request and receive a response with session data
let mut client_res = req.headers(headers).send().unwrap();
match client_res.status_raw() {
&hyper::http::RawStatus(200,_) => {
let mut body = String::new();
client_res.read_to_string(&mut body).unwrap();
let session_data: Session = json::decode(&body).unwrap();
println!("Welcome to Eve, {:?}!",session_data.user.username);
println!("Login Successful. Redirecting to user area.");
// Connect to the Eve runtime and add the user to the eveusers table
let ws_result = open_websocket("ws://0.0.0.0:2794");
match ws_result {
// If things went okay, redirect to the Eve UI
Ok(mut sender) => {
// Form the response
*res.status_mut() = hyper::status::StatusCode::PermanentRedirect;
// Form the response headers
let mut headers = Headers::new();
let redirect_url = referer_url.scheme.clone() + "://" + referer_url.domain().unwrap().clone() + port + "/" + page;
let location = Location(redirect_url);
let user_cookie = Cookie::new("userid".to_string(),session_data.user.id.clone());
let cookies = SetCookie(vec![user_cookie]);
headers.set(location);
headers.set(cookies);
*res.headers_mut() = headers;
// Create eveusers table and insert the new user
let change = ("eveusers".to_owned(),
Change{
fields: vec!["eveusers: id".to_owned(), "eveusers: username".to_owned()],
insert: vec![vec![
Value::String(session_data.user.id.clone()),
Value::String(session_data.user.username.clone())
]],
remove: vec![],
});
let event = Event{changes: vec![change], commands: vec![]};
send_event(event, &mut sender);
let _ = sender.send_message(Message::Close(None));
}
// Otherwise, throw an error... maybe redirect to a special page.
Err(e) => {
println!("ERROR: Had trouble connecting to the Eve runtime: {}. Is the server running?",e);
*res.status_mut() = hyper::status::StatusCode::NotFound;
panic!("Oh no!");
//serve_file("404.html",res);
}
}
println!("Login complete.");
}
_ => {
println!("ERROR: Could not authenticate user with token {}",token);
*res.status_mut() = hyper::status::StatusCode::Forbidden;
panic!("Oh no!");
//serve_file("404.html",res);
}
};
},
_ => panic!("Oh no!"), //serve_file("404.html",res),
}
},
"logout.html" => {
println!("Logging out...");
let user_id = get_user_id(req.headers.get::<hyper::header::Cookie>());
println!("{:?}",user_id);
},
"favicon.ico" => (),
other => panic!("Cannot serve {}",other), //serve_file(&*requested_file,res),
};
}
_ => panic!("Oh no!"),
}
}
pub fn open_websocket(url_string: &str) -> Result<client::sender::Sender<stream::WebSocketStream>,String> {
//let mut context = SslContext::new(SslMethod::Tlsv1).unwrap();
//let _ = context.set_certificate_file(&(Path::new("server.crt")), X509FileType::PEM);
//let _ = context.set_private_key_file(&(Path::new("server.key")), X509FileType::PEM);
let url = Url::parse(url_string).unwrap();
println!("Connecting to {}", url);
let request = match Client::connect(url) {
Ok(t) => t,
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
let response = match request.send() {
Ok(t) => t,
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
match response.validate() {
Ok(_) => println!("Response valid. Start sending/receiving..."),
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
let (sender, mut receiver) = response.begin().split();
thread::spawn(move || {
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Text(_) => {
//let json = Json::from_str(&text).unwrap();
//let event: Event = FromJson::from_json(&json);
},
Message::Close(_) => {
println!("Received close message");
return;
}
_ => println!("Unknown message: {:?}", message)
}
}
});
Ok(sender)
}
pub fn send_event(event: Event, sender: &mut client::sender::Sender<stream::WebSocketStream>) {
let mut e = cbor::Encoder::from_memory();
let json = event.to_json();
let cbor = json.to_cbor();
e.encode(vec![cbor]).unwrap();
sender.send_message(Message::Binary(e.into_bytes())).unwrap();
}
|
println!("Authenticating User");
|
random_line_split
|
login.rs
|
use std::io::{Read, Write};
use std::io;
use std::fs::File;
use std::error::Error;
use hyper;
use hyper::net::Fresh;
use hyper::server::{Server, Request, Response};
use hyper::uri::RequestUri;
use hyper::Url;
use hyper::header::{Headers,ContentType,Location,SetCookie};
use hyper::header;
use cookie::Cookie;
use mime::Mime;
use url::SchemeData::Relative;
use rustc_serialize::json;
use rustc_serialize::json::ToJson;
use std::path::Path;
use std::fs::PathExt;
use conduit_mime_types::Types;
use std::str::FromStr;
use std::env;
use websocket::{client,Client,stream,Message,Sender,Receiver};
use std::thread;
use cbor;
use cbor::ToCbor;
use value::Value;
use server::Event;
use relation::Change;
// The (increasingly misnamed) login server is responsible for:
// * handling authentication via authrocket
// * serving static files for the editor and clients
// TODO needs review
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Session {
client: String,
id: String,
user_id: String,
object: String,
created_at: f64,
expires_at: f64,
ip: String,
user: User,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct
|
{
custom: Custom,
id: String,
realm_id: String,
username: String,
state: String,
user_type: String,
reference: Option<String>,
name: String,
email: String,
object: String,
last_login_at: f64,
last_login_on: f64,
created_at: f64,
first_name: Option<String>,
last_name: Option<String>,
credentials: Vec<Credential>,
membership_count: f64,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Custom;
#[derive(RustcDecodable, RustcEncodable, Debug, Clone)]
struct Credential {
id: String,
credential_type: String,
object: String,
}
pub fn run() {
// TODO high thread-count is a workaround for https://github.com/hyperium/hyper/issues/368
Server::http("0.0.0.0:8080").unwrap().handle_threads(login, 100).unwrap();
}
fn read_file_bytes(filename: &str) -> Vec<u8> {
let mut file = File::open(&filename).unwrap();
let mut contents: Vec<u8> = Vec::new();
file.read_to_end(&mut contents).unwrap();
contents
}
fn file_exists(path: &str) -> bool {
let path_obj = Path::new(path);
path_obj.is_file() && path_obj.exists()
}
fn serve_local_or_file(mut res: Response<Fresh>, path: &Vec<String>, default_file: &str) -> io::Result<()> {
let mime_types = Types::new().unwrap();
let local_path = path[1..].iter().fold("../ui".to_owned(), |end, cur| end + "/" + cur);
let file;
let file_path: &str;
if file_exists(&local_path) {
file = read_file_bytes(&local_path);
file_path = &local_path;
} else {
let content = read_file_bytes(default_file);
//@HACK, @TODO: absolutize the html file so that it looks in the right place
//this allows us to use file:/// locally, while still doing the right thing
//when we're hooked up to the server
let mut str = String::from_utf8(content).unwrap();
str = str.replace("href=\"", "href=\"/app/");
str = str.replace("src=\"", "src=\"/app/");
str = str.replace("/app/http", "http");
file = str.as_bytes().to_owned();
file_path = default_file;
}
let mime: Mime = Mime::from_str(mime_types.mime_for_path(Path::new(&file_path))).unwrap();
res.headers_mut().set(ContentType(mime));
let mut res = try!(res.start());
try!(res.write_all(&file));
try!(res.end());
Ok(())
}
pub fn get_user_id(cookies: Option<&header::Cookie>) -> Option<String> {
match cookies {
Some(cookies) => {
match cookies.iter().find(|cookie| cookie.name == "userid") {
Some(user_id) => Some(user_id.value.clone()),
None => None,
}
},
None => None,
}
}
fn login(req: Request, mut res: Response<Fresh>) {
// Don't use a port if we're running in local mode
let mut port = ":8000";
for argument in env::args() {
match &*argument {
"local" => {
port = "";
},
_ => continue,
}
}
match (&req.method.clone(), &req.uri.clone()) {
(&hyper::Get, &RequestUri::AbsolutePath(ref relative_path)) => {
// Form a url from the path
let absolute_path = "http://localhost".to_string() + relative_path;
let url = Url::parse(&*absolute_path).unwrap();
// Get the file name
let scheme_data = &url.scheme_data;
let requested_file = match scheme_data {
&Relative(ref rsd) => rsd.path[0].clone(),
_ => panic!("Expected relative path"),
};
let path_info = match scheme_data {
&Relative(ref rsd) => rsd,
_ => panic!("Expected relative path"),
};
// Parse the query string
let query_pairs = &url.query_pairs();
// Handle login
match &*requested_file {
"app.html" | "app" => {
let result = serve_local_or_file(res, &path_info.path, "../ui/app.html");
if let Err(error) = result {
println!("Warning: serve error {:?}", error);
}
},
"editor.html" | "editor" => {
let result = serve_local_or_file(res, &path_info.path, "../ui/editor.html");
if let Err(error) = result {
println!("Warning: serve error {:?}", error);
}
},
"login.html" => {
println!("Authenticating User");
let referer = format!("{}",req.headers.get::<hyper::header::Referer>().unwrap());
let referer_url = Url::parse(&*referer).unwrap();
let pairs = query_pairs.clone().unwrap();
match &pairs[..] {
[(_, ref page), (ref token_type, ref token),..] if token_type.clone() == "token".to_string() => {
// We have a login token, now to authenticate
let client = hyper::client::Client::new();
let api_call = "https://api-e1.authrocket.com/v1/sessions/".to_string() + token;
let req = client.request(hyper::method::Method::Get,&*api_call);
// Set the appropriate headers for authorocket
let mut headers = Headers::new();
let json: Mime = "application/json".parse().unwrap();
let content_type = ContentType(json);
headers.set_raw("X-Authrocket-Account",vec!["org_0vC7wPw9XphPGQnSqYB6bz".to_string().into_bytes()]);
headers.set_raw("X-Authrocket-Api-Key",vec!["key_jtnCRWxQvDD0p5HATR9RBIe4WnxnwV6pWNzwmZQLnSZ".to_string().into_bytes()]);
headers.set_raw("X-Authrocket-Realm",vec!["rl_0vC7wd03CqwhpK7kT8fvAc".to_string().into_bytes()]);
headers.set_raw("Accept",vec!["application/json".to_string().into_bytes()]);
headers.set(content_type);
// Send the request and receive a response with session data
let mut client_res = req.headers(headers).send().unwrap();
match client_res.status_raw() {
&hyper::http::RawStatus(200,_) => {
let mut body = String::new();
client_res.read_to_string(&mut body).unwrap();
let session_data: Session = json::decode(&body).unwrap();
println!("Welcome to Eve, {:?}!",session_data.user.username);
println!("Login Successful. Redirecting to user area.");
// Connect to the Eve runtime and add the user to the eveusers table
let ws_result = open_websocket("ws://0.0.0.0:2794");
match ws_result {
// If things went okay, redirect to the Eve UI
Ok(mut sender) => {
// Form the response
*res.status_mut() = hyper::status::StatusCode::PermanentRedirect;
// Form the response headers
let mut headers = Headers::new();
let redirect_url = referer_url.scheme.clone() + "://" + referer_url.domain().unwrap().clone() + port + "/" + page;
let location = Location(redirect_url);
let user_cookie = Cookie::new("userid".to_string(),session_data.user.id.clone());
let cookies = SetCookie(vec![user_cookie]);
headers.set(location);
headers.set(cookies);
*res.headers_mut() = headers;
// Create eveusers table and insert the new user
let change = ("eveusers".to_owned(),
Change{
fields: vec!["eveusers: id".to_owned(), "eveusers: username".to_owned()],
insert: vec![vec![
Value::String(session_data.user.id.clone()),
Value::String(session_data.user.username.clone())
]],
remove: vec![],
});
let event = Event{changes: vec![change], commands: vec![]};
send_event(event, &mut sender);
let _ = sender.send_message(Message::Close(None));
}
// Otherwise, throw an error... maybe redirect to a special page.
Err(e) => {
println!("ERROR: Had trouble connecting to the Eve runtime: {}. Is the server running?",e);
*res.status_mut() = hyper::status::StatusCode::NotFound;
panic!("Oh no!");
//serve_file("404.html",res);
}
}
println!("Login complete.");
}
_ => {
println!("ERROR: Could not authenticate user with token {}",token);
*res.status_mut() = hyper::status::StatusCode::Forbidden;
panic!("Oh no!");
//serve_file("404.html",res);
}
};
},
_ => panic!("Oh no!"), //serve_file("404.html",res),
}
},
"logout.html" => {
println!("Logging out...");
let user_id = get_user_id(req.headers.get::<hyper::header::Cookie>());
println!("{:?}",user_id);
},
"favicon.ico" => (),
other => panic!("Cannot serve {}",other), //serve_file(&*requested_file,res),
};
}
_ => panic!("Oh no!"),
}
}
pub fn open_websocket(url_string: &str) -> Result<client::sender::Sender<stream::WebSocketStream>,String> {
//let mut context = SslContext::new(SslMethod::Tlsv1).unwrap();
//let _ = context.set_certificate_file(&(Path::new("server.crt")), X509FileType::PEM);
//let _ = context.set_private_key_file(&(Path::new("server.key")), X509FileType::PEM);
let url = Url::parse(url_string).unwrap();
println!("Connecting to {}", url);
let request = match Client::connect(url) {
Ok(t) => t,
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
let response = match request.send() {
Ok(t) => t,
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
match response.validate() {
Ok(_) => println!("Response valid. Start sending/receiving..."),
Err(e) => {
return Err(format!("{}", e).to_string());
}
};
let (sender, mut receiver) = response.begin().split();
thread::spawn(move || {
for message in receiver.incoming_messages() {
let message = match message {
Ok(m) => m,
Err(_) => return,
};
match message {
Message::Text(_) => {
//let json = Json::from_str(&text).unwrap();
//let event: Event = FromJson::from_json(&json);
},
Message::Close(_) => {
println!("Received close message");
return;
}
_ => println!("Unknown message: {:?}", message)
}
}
});
Ok(sender)
}
pub fn send_event(event: Event, sender: &mut client::sender::Sender<stream::WebSocketStream>) {
let mut e = cbor::Encoder::from_memory();
let json = event.to_json();
let cbor = json.to_cbor();
e.encode(vec![cbor]).unwrap();
sender.send_message(Message::Binary(e.into_bytes())).unwrap();
}
|
User
|
identifier_name
|
eventfd.rs
|
use std::os::unix::io::RawFd;
use std::mem::transmute;
use nix::sys::eventfd::{eventfd, EFD_CLOEXEC, EFD_NONBLOCK};
use nix::poll::{poll, PollFd, POLLIN, EventFlags};
use nix::unistd::{read, write};
use nix::Error;
use nix::Errno::EINTR;
#[derive(Debug)]
pub struct Async(RawFd);
impl Async {
pub fn new() -> Async {
Async(eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK).expect("create eventfd"))
}
pub fn notify(&self) {
let buf: [u8; 8] = unsafe { transmute(1u64) };
write(self.0, &buf).expect("write eventfd");
}
pub fn
|
(&self) -> bool {
let mut buf = [0u8; 8];
loop {
match read(self.0, &mut buf) {
Ok(_) => return true,
Err(Error::Sys(EINTR)) => continue,
Err(_) => return false,
}
}
}
/// Wait (blocking) for notification, doesn't reset counter
///
/// Returns `false` if wait was interrupted by some error
/// (presumably EINTR)
pub fn wait(&self) -> bool {
let mut pollfd = [PollFd { fd: self.0, events: POLLIN,
revents: EventFlags::empty() }];
poll(&mut pollfd, -1).is_ok()
}
pub unsafe fn export_fd(&self) -> RawFd {
self.0
}
}
|
check
|
identifier_name
|
eventfd.rs
|
use std::os::unix::io::RawFd;
use std::mem::transmute;
use nix::sys::eventfd::{eventfd, EFD_CLOEXEC, EFD_NONBLOCK};
|
use nix::unistd::{read, write};
use nix::Error;
use nix::Errno::EINTR;
#[derive(Debug)]
pub struct Async(RawFd);
impl Async {
pub fn new() -> Async {
Async(eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK).expect("create eventfd"))
}
pub fn notify(&self) {
let buf: [u8; 8] = unsafe { transmute(1u64) };
write(self.0, &buf).expect("write eventfd");
}
pub fn check(&self) -> bool {
let mut buf = [0u8; 8];
loop {
match read(self.0, &mut buf) {
Ok(_) => return true,
Err(Error::Sys(EINTR)) => continue,
Err(_) => return false,
}
}
}
/// Wait (blocking) for notification, doesn't reset counter
///
/// Returns `false` if wait was interrupted by some error
/// (presumably EINTR)
pub fn wait(&self) -> bool {
let mut pollfd = [PollFd { fd: self.0, events: POLLIN,
revents: EventFlags::empty() }];
poll(&mut pollfd, -1).is_ok()
}
pub unsafe fn export_fd(&self) -> RawFd {
self.0
}
}
|
use nix::poll::{poll, PollFd, POLLIN, EventFlags};
|
random_line_split
|
eventfd.rs
|
use std::os::unix::io::RawFd;
use std::mem::transmute;
use nix::sys::eventfd::{eventfd, EFD_CLOEXEC, EFD_NONBLOCK};
use nix::poll::{poll, PollFd, POLLIN, EventFlags};
use nix::unistd::{read, write};
use nix::Error;
use nix::Errno::EINTR;
#[derive(Debug)]
pub struct Async(RawFd);
impl Async {
pub fn new() -> Async {
Async(eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK).expect("create eventfd"))
}
pub fn notify(&self) {
let buf: [u8; 8] = unsafe { transmute(1u64) };
write(self.0, &buf).expect("write eventfd");
}
pub fn check(&self) -> bool {
let mut buf = [0u8; 8];
loop {
match read(self.0, &mut buf) {
Ok(_) => return true,
Err(Error::Sys(EINTR)) => continue,
Err(_) => return false,
}
}
}
/// Wait (blocking) for notification, doesn't reset counter
///
/// Returns `false` if wait was interrupted by some error
/// (presumably EINTR)
pub fn wait(&self) -> bool
|
pub unsafe fn export_fd(&self) -> RawFd {
self.0
}
}
|
{
let mut pollfd = [PollFd { fd: self.0, events: POLLIN,
revents: EventFlags::empty() }];
poll(&mut pollfd, -1).is_ok()
}
|
identifier_body
|
constant.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! WebIDL constants.
use js::jsapi::{HandleObject, JSContext, JSPROP_ENUMERATE, JSPROP_PERMANENT};
use js::jsapi::{JSPROP_READONLY, JS_DefineProperty};
use js::jsval::{BooleanValue, DoubleValue, Int32Value, JSVal, NullValue, UInt32Value};
use libc;
/// Representation of an IDL constant.
|
pub struct ConstantSpec {
/// name of the constant.
pub name: &'static [u8],
/// value of the constant.
pub value: ConstantVal,
}
/// Representation of an IDL constant value.
#[derive(Clone)]
#[allow(dead_code)]
pub enum ConstantVal {
/// `long` constant.
IntVal(i32),
/// `unsigned long` constant.
UintVal(u32),
/// `double` constant.
DoubleVal(f64),
/// `boolean` constant.
BoolVal(bool),
/// `null` constant.
NullVal,
}
impl ConstantSpec {
/// Returns a `JSVal` that represents the value of this `ConstantSpec`.
pub fn get_value(&self) -> JSVal {
match self.value {
ConstantVal::NullVal => NullValue(),
ConstantVal::IntVal(i) => Int32Value(i),
ConstantVal::UintVal(u) => UInt32Value(u),
ConstantVal::DoubleVal(d) => DoubleValue(d),
ConstantVal::BoolVal(b) => BooleanValue(b),
}
}
}
/// Defines constants on `obj`.
/// Fails on JSAPI failure.
pub unsafe fn define_constants(
cx: *mut JSContext,
obj: HandleObject,
constants: &[ConstantSpec]) {
for spec in constants {
rooted!(in(cx) let value = spec.get_value());
assert!(JS_DefineProperty(cx,
obj,
spec.name.as_ptr() as *const libc::c_char,
value.handle(),
JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT,
None,
None));
}
}
|
#[derive(Clone)]
|
random_line_split
|
constant.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! WebIDL constants.
use js::jsapi::{HandleObject, JSContext, JSPROP_ENUMERATE, JSPROP_PERMANENT};
use js::jsapi::{JSPROP_READONLY, JS_DefineProperty};
use js::jsval::{BooleanValue, DoubleValue, Int32Value, JSVal, NullValue, UInt32Value};
use libc;
/// Representation of an IDL constant.
#[derive(Clone)]
pub struct ConstantSpec {
/// name of the constant.
pub name: &'static [u8],
/// value of the constant.
pub value: ConstantVal,
}
/// Representation of an IDL constant value.
#[derive(Clone)]
#[allow(dead_code)]
pub enum ConstantVal {
/// `long` constant.
IntVal(i32),
/// `unsigned long` constant.
UintVal(u32),
/// `double` constant.
DoubleVal(f64),
/// `boolean` constant.
BoolVal(bool),
/// `null` constant.
NullVal,
}
impl ConstantSpec {
/// Returns a `JSVal` that represents the value of this `ConstantSpec`.
pub fn get_value(&self) -> JSVal {
match self.value {
ConstantVal::NullVal => NullValue(),
ConstantVal::IntVal(i) => Int32Value(i),
ConstantVal::UintVal(u) => UInt32Value(u),
ConstantVal::DoubleVal(d) => DoubleValue(d),
ConstantVal::BoolVal(b) => BooleanValue(b),
}
}
}
/// Defines constants on `obj`.
/// Fails on JSAPI failure.
pub unsafe fn define_constants(
cx: *mut JSContext,
obj: HandleObject,
constants: &[ConstantSpec])
|
{
for spec in constants {
rooted!(in(cx) let value = spec.get_value());
assert!(JS_DefineProperty(cx,
obj,
spec.name.as_ptr() as *const libc::c_char,
value.handle(),
JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT,
None,
None));
}
}
|
identifier_body
|
|
constant.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! WebIDL constants.
use js::jsapi::{HandleObject, JSContext, JSPROP_ENUMERATE, JSPROP_PERMANENT};
use js::jsapi::{JSPROP_READONLY, JS_DefineProperty};
use js::jsval::{BooleanValue, DoubleValue, Int32Value, JSVal, NullValue, UInt32Value};
use libc;
/// Representation of an IDL constant.
#[derive(Clone)]
pub struct ConstantSpec {
/// name of the constant.
pub name: &'static [u8],
/// value of the constant.
pub value: ConstantVal,
}
/// Representation of an IDL constant value.
#[derive(Clone)]
#[allow(dead_code)]
pub enum ConstantVal {
/// `long` constant.
IntVal(i32),
/// `unsigned long` constant.
UintVal(u32),
/// `double` constant.
DoubleVal(f64),
/// `boolean` constant.
BoolVal(bool),
/// `null` constant.
NullVal,
}
impl ConstantSpec {
/// Returns a `JSVal` that represents the value of this `ConstantSpec`.
pub fn get_value(&self) -> JSVal {
match self.value {
ConstantVal::NullVal => NullValue(),
ConstantVal::IntVal(i) => Int32Value(i),
ConstantVal::UintVal(u) => UInt32Value(u),
ConstantVal::DoubleVal(d) => DoubleValue(d),
ConstantVal::BoolVal(b) => BooleanValue(b),
}
}
}
/// Defines constants on `obj`.
/// Fails on JSAPI failure.
pub unsafe fn
|
(
cx: *mut JSContext,
obj: HandleObject,
constants: &[ConstantSpec]) {
for spec in constants {
rooted!(in(cx) let value = spec.get_value());
assert!(JS_DefineProperty(cx,
obj,
spec.name.as_ptr() as *const libc::c_char,
value.handle(),
JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT,
None,
None));
}
}
|
define_constants
|
identifier_name
|
main.rs
|
extern crate iron_oxide_browser;
use iron_oxide_browser::{command, css, css_parse, dom, html_parse, layout, render, style};
use std::env;
use std::fs::File;
use std::io::{Read, BufReader};
fn main() {
let nodes = test_html();
for node in nodes.iter() {
dom::pretty_print(node, 0);
}
let ref node = nodes[0];
println!("");
let ss = test_css();
print!("{:?}", ss);
println!("");
let style_tree_root = style::StyledNode::new(&node, &ss);
style::pretty_print(&style_tree_root, 0);
println!("");
let mut viewport = layout::Dimensions::default();
viewport.content.width = 1024.0;
viewport.content.height = 768.0;
let layout_tree = layout::layout_tree(&style_tree_root, viewport);
layout::pretty_print(&layout_tree, 0);
let display_commands = command::build_display_commands(&layout_tree);
render::render_loop(&display_commands);
}
fn test_html() -> Vec<dom::Node>
|
fn test_css() -> css::Stylesheet {
let mut path = env::current_dir().unwrap();
path.push("tests/parserTestFiles/blockTypeTest.css");
let mut file_reader = match File::open(&path) {
Ok(f) => BufReader::new(f),
Err(e) => panic!("file: {}\nerror: {}", path.display(), e)
};
let mut css_input = String::new();
file_reader.read_to_string(&mut css_input).unwrap();
let stylesheet = css_parse::CssParser::new(&css_input).parse_stylesheet();
stylesheet
}
// TODO change this into a binary crate consumer of the rest of the code
// TODO change the rest of the code into library crates
|
{
let mut path = env::current_dir().unwrap();
path.push("tests/parserTestFiles/blockTypeTest.html");
let mut file_reader = match File::open(&path) {
Ok(f) => BufReader::new(f),
Err(e) => panic!("file: {}\nerror: {}", path.display(), e)
};
let mut html_input = String::new();
file_reader.read_to_string(&mut html_input).unwrap();
let nodes = html_parse::HtmlParser::new(&html_input).parse_nodes();
nodes
}
|
identifier_body
|
main.rs
|
extern crate iron_oxide_browser;
use iron_oxide_browser::{command, css, css_parse, dom, html_parse, layout, render, style};
use std::env;
use std::fs::File;
use std::io::{Read, BufReader};
fn main() {
let nodes = test_html();
for node in nodes.iter() {
dom::pretty_print(node, 0);
}
let ref node = nodes[0];
println!("");
let ss = test_css();
print!("{:?}", ss);
println!("");
let style_tree_root = style::StyledNode::new(&node, &ss);
style::pretty_print(&style_tree_root, 0);
println!("");
let mut viewport = layout::Dimensions::default();
viewport.content.width = 1024.0;
viewport.content.height = 768.0;
let layout_tree = layout::layout_tree(&style_tree_root, viewport);
layout::pretty_print(&layout_tree, 0);
let display_commands = command::build_display_commands(&layout_tree);
render::render_loop(&display_commands);
}
fn
|
() -> Vec<dom::Node> {
let mut path = env::current_dir().unwrap();
path.push("tests/parserTestFiles/blockTypeTest.html");
let mut file_reader = match File::open(&path) {
Ok(f) => BufReader::new(f),
Err(e) => panic!("file: {}\nerror: {}", path.display(), e)
};
let mut html_input = String::new();
file_reader.read_to_string(&mut html_input).unwrap();
let nodes = html_parse::HtmlParser::new(&html_input).parse_nodes();
nodes
}
fn test_css() -> css::Stylesheet {
let mut path = env::current_dir().unwrap();
path.push("tests/parserTestFiles/blockTypeTest.css");
let mut file_reader = match File::open(&path) {
Ok(f) => BufReader::new(f),
Err(e) => panic!("file: {}\nerror: {}", path.display(), e)
};
let mut css_input = String::new();
file_reader.read_to_string(&mut css_input).unwrap();
let stylesheet = css_parse::CssParser::new(&css_input).parse_stylesheet();
stylesheet
}
// TODO change this into a binary crate consumer of the rest of the code
// TODO change the rest of the code into library crates
|
test_html
|
identifier_name
|
main.rs
|
extern crate iron_oxide_browser;
use iron_oxide_browser::{command, css, css_parse, dom, html_parse, layout, render, style};
use std::env;
use std::fs::File;
use std::io::{Read, BufReader};
fn main() {
let nodes = test_html();
for node in nodes.iter() {
dom::pretty_print(node, 0);
}
let ref node = nodes[0];
println!("");
let ss = test_css();
print!("{:?}", ss);
println!("");
let style_tree_root = style::StyledNode::new(&node, &ss);
style::pretty_print(&style_tree_root, 0);
|
let mut viewport = layout::Dimensions::default();
viewport.content.width = 1024.0;
viewport.content.height = 768.0;
let layout_tree = layout::layout_tree(&style_tree_root, viewport);
layout::pretty_print(&layout_tree, 0);
let display_commands = command::build_display_commands(&layout_tree);
render::render_loop(&display_commands);
}
fn test_html() -> Vec<dom::Node> {
let mut path = env::current_dir().unwrap();
path.push("tests/parserTestFiles/blockTypeTest.html");
let mut file_reader = match File::open(&path) {
Ok(f) => BufReader::new(f),
Err(e) => panic!("file: {}\nerror: {}", path.display(), e)
};
let mut html_input = String::new();
file_reader.read_to_string(&mut html_input).unwrap();
let nodes = html_parse::HtmlParser::new(&html_input).parse_nodes();
nodes
}
fn test_css() -> css::Stylesheet {
let mut path = env::current_dir().unwrap();
path.push("tests/parserTestFiles/blockTypeTest.css");
let mut file_reader = match File::open(&path) {
Ok(f) => BufReader::new(f),
Err(e) => panic!("file: {}\nerror: {}", path.display(), e)
};
let mut css_input = String::new();
file_reader.read_to_string(&mut css_input).unwrap();
let stylesheet = css_parse::CssParser::new(&css_input).parse_stylesheet();
stylesheet
}
// TODO change this into a binary crate consumer of the rest of the code
// TODO change the rest of the code into library crates
|
println!("");
|
random_line_split
|
main.rs
|
use std::ops::Add;
use std::ops::Mul;
fn main() {
// Rust allows limited operators overloading
// There are special traits for overloadng operators
#[derive(Debug)]
struct Point { x: i32, y: i32, }
impl Add for Point {
type Output = Point;
fn add(self, other: Point) -> Point {
Point { x: self.x + other.x, y: self.y + other.y }
}
}
let p1 = Point { x: 1, y: 0 };
let p2 = Point { x: 2, y: 3 };
let p3 = p1 + p2;
println!("{:?}", p3);
// not clear why ops trait require to move args
// using operator overloads is traits for generic structs
trait HasArea<T> { fn area(&self) -> T; }
struct
|
<T> { _x: T, _y: T, side: T, }
impl<T> HasArea<T> for Square<T>
where T: Mul<Output=T> + Copy {
fn area(&self) -> T {
self.side * self.side // Copy is important here, Rust would try to move self.side to return value then
}
}
let s = Square { _x: 0.0f64, _y: 0.0f64, side: 12.0f64 };
println!("square (side = {}) area is {}", s.side, s.area());
}
|
Square
|
identifier_name
|
main.rs
|
use std::ops::Add;
use std::ops::Mul;
fn main() {
// Rust allows limited operators overloading
// There are special traits for overloadng operators
#[derive(Debug)]
struct Point { x: i32, y: i32, }
impl Add for Point {
type Output = Point;
fn add(self, other: Point) -> Point {
Point { x: self.x + other.x, y: self.y + other.y }
}
}
let p1 = Point { x: 1, y: 0 };
let p2 = Point { x: 2, y: 3 };
let p3 = p1 + p2;
println!("{:?}", p3);
// not clear why ops trait require to move args
// using operator overloads is traits for generic structs
trait HasArea<T> { fn area(&self) -> T; }
struct Square<T> { _x: T, _y: T, side: T, }
impl<T> HasArea<T> for Square<T>
where T: Mul<Output=T> + Copy {
fn area(&self) -> T
|
}
let s = Square { _x: 0.0f64, _y: 0.0f64, side: 12.0f64 };
println!("square (side = {}) area is {}", s.side, s.area());
}
|
{
self.side * self.side // Copy is important here, Rust would try to move self.side to return value then
}
|
identifier_body
|
main.rs
|
use std::ops::Add;
use std::ops::Mul;
fn main() {
// Rust allows limited operators overloading
// There are special traits for overloadng operators
#[derive(Debug)]
struct Point { x: i32, y: i32, }
impl Add for Point {
type Output = Point;
fn add(self, other: Point) -> Point {
|
Point { x: self.x + other.x, y: self.y + other.y }
}
}
let p1 = Point { x: 1, y: 0 };
let p2 = Point { x: 2, y: 3 };
let p3 = p1 + p2;
println!("{:?}", p3);
// not clear why ops trait require to move args
// using operator overloads is traits for generic structs
trait HasArea<T> { fn area(&self) -> T; }
struct Square<T> { _x: T, _y: T, side: T, }
impl<T> HasArea<T> for Square<T>
where T: Mul<Output=T> + Copy {
fn area(&self) -> T {
self.side * self.side // Copy is important here, Rust would try to move self.side to return value then
}
}
let s = Square { _x: 0.0f64, _y: 0.0f64, side: 12.0f64 };
println!("square (side = {}) area is {}", s.side, s.area());
}
|
random_line_split
|
|
cast_size_32bit.rs
|
// ignore-64bit
#[warn(
clippy::cast_precision_loss,
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::cast_possible_wrap,
clippy::cast_lossless
)]
#[allow(clippy::no_effect, clippy::unnecessary_operation)]
fn main() {
// Casting from *size
1isize as i8;
let x0 = 1isize;
let x1 = 1usize;
x0 as f64;
x1 as f64;
x0 as f32;
x1 as f32;
1isize as i32;
1isize as u32;
1usize as u32;
1usize as i32;
// Casting to *size
1i64 as isize;
|
1i64 as usize;
1u64 as isize;
1u64 as usize;
1u32 as isize;
1u32 as usize; // Should not trigger any lint
1i32 as isize; // Neither should this
1i32 as usize;
// Big integer literal to float
999_999_999 as f32;
3_999_999_999usize as f64;
}
|
random_line_split
|
|
cast_size_32bit.rs
|
// ignore-64bit
#[warn(
clippy::cast_precision_loss,
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::cast_possible_wrap,
clippy::cast_lossless
)]
#[allow(clippy::no_effect, clippy::unnecessary_operation)]
fn main()
|
1i32 as isize; // Neither should this
1i32 as usize;
// Big integer literal to float
999_999_999 as f32;
3_999_999_999usize as f64;
}
|
{
// Casting from *size
1isize as i8;
let x0 = 1isize;
let x1 = 1usize;
x0 as f64;
x1 as f64;
x0 as f32;
x1 as f32;
1isize as i32;
1isize as u32;
1usize as u32;
1usize as i32;
// Casting to *size
1i64 as isize;
1i64 as usize;
1u64 as isize;
1u64 as usize;
1u32 as isize;
1u32 as usize; // Should not trigger any lint
|
identifier_body
|
cast_size_32bit.rs
|
// ignore-64bit
#[warn(
clippy::cast_precision_loss,
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::cast_possible_wrap,
clippy::cast_lossless
)]
#[allow(clippy::no_effect, clippy::unnecessary_operation)]
fn
|
() {
// Casting from *size
1isize as i8;
let x0 = 1isize;
let x1 = 1usize;
x0 as f64;
x1 as f64;
x0 as f32;
x1 as f32;
1isize as i32;
1isize as u32;
1usize as u32;
1usize as i32;
// Casting to *size
1i64 as isize;
1i64 as usize;
1u64 as isize;
1u64 as usize;
1u32 as isize;
1u32 as usize; // Should not trigger any lint
1i32 as isize; // Neither should this
1i32 as usize;
// Big integer literal to float
999_999_999 as f32;
3_999_999_999usize as f64;
}
|
main
|
identifier_name
|
constructor-tp.rs
|
/* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct Foo {
pub _address: u8,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct Bar {
pub _address: u8,
}
#[test]
fn bindgen_test_layout_Bar() {
assert_eq!(
::std::mem::size_of::<Bar>(),
1usize,
concat!("Size of: ", stringify!(Bar))
);
assert_eq!(
::std::mem::align_of::<Bar>(),
1usize,
concat!("Alignment of ", stringify!(Bar))
);
}
extern "C" {
#[link_name = "\u{1}_ZN3BarC1Ev"]
|
#[inline]
pub unsafe fn new() -> Self {
let mut __bindgen_tmp = ::std::mem::uninitialized();
Bar_Bar(&mut __bindgen_tmp);
__bindgen_tmp
}
}
|
pub fn Bar_Bar(this: *mut Bar);
}
impl Bar {
|
random_line_split
|
constructor-tp.rs
|
/* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct Foo {
pub _address: u8,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct Bar {
pub _address: u8,
}
#[test]
fn bindgen_test_layout_Bar() {
assert_eq!(
::std::mem::size_of::<Bar>(),
1usize,
concat!("Size of: ", stringify!(Bar))
);
assert_eq!(
::std::mem::align_of::<Bar>(),
1usize,
concat!("Alignment of ", stringify!(Bar))
);
}
extern "C" {
#[link_name = "\u{1}_ZN3BarC1Ev"]
pub fn Bar_Bar(this: *mut Bar);
}
impl Bar {
#[inline]
pub unsafe fn
|
() -> Self {
let mut __bindgen_tmp = ::std::mem::uninitialized();
Bar_Bar(&mut __bindgen_tmp);
__bindgen_tmp
}
}
|
new
|
identifier_name
|
constructor-tp.rs
|
/* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct Foo {
pub _address: u8,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct Bar {
pub _address: u8,
}
#[test]
fn bindgen_test_layout_Bar()
|
extern "C" {
#[link_name = "\u{1}_ZN3BarC1Ev"]
pub fn Bar_Bar(this: *mut Bar);
}
impl Bar {
#[inline]
pub unsafe fn new() -> Self {
let mut __bindgen_tmp = ::std::mem::uninitialized();
Bar_Bar(&mut __bindgen_tmp);
__bindgen_tmp
}
}
|
{
assert_eq!(
::std::mem::size_of::<Bar>(),
1usize,
concat!("Size of: ", stringify!(Bar))
);
assert_eq!(
::std::mem::align_of::<Bar>(),
1usize,
concat!("Alignment of ", stringify!(Bar))
);
}
|
identifier_body
|
model.rs
|
use z3_sys::*;
use Solver;
use Optimize;
use Model;
use Ast;
use Z3_MUTEX;
impl<'ctx> Model<'ctx> {
pub fn of_solver(slv: &Solver<'ctx>) -> Model<'ctx> {
Model {
ctx: slv.ctx,
z3_mdl: unsafe {
let guard = Z3_MUTEX.lock().unwrap();
let m = Z3_solver_get_model(slv.ctx.z3_ctx, slv.z3_slv);
Z3_model_inc_ref(slv.ctx.z3_ctx, m);
m
}
}
}
pub fn of_optimize(opt: &Optimize<'ctx>) -> Model<'ctx> {
Model {
ctx: opt.ctx,
z3_mdl: unsafe {
let guard = Z3_MUTEX.lock().unwrap();
let m = Z3_optimize_get_model(opt.ctx.z3_ctx, opt.z3_opt);
Z3_model_inc_ref(opt.ctx.z3_ctx, m);
m
}
}
}
pub fn eval(&self, ast: &Ast<'ctx>) -> Option<Ast<'ctx>> {
unsafe {
let mut tmp : Z3_ast = ast.z3_ast;
let res;
{
let guard = Z3_MUTEX.lock().unwrap();
res = Z3_model_eval(self.ctx.z3_ctx,
self.z3_mdl,
ast.z3_ast,
Z3_TRUE,
&mut tmp)
}
if res == Z3_TRUE {
Some(Ast::new(self.ctx, tmp))
} else {
None
}
}
}
}
|
impl<'ctx> Drop for Model<'ctx> {
fn drop(&mut self) {
unsafe {
let guard = Z3_MUTEX.lock().unwrap();
Z3_model_dec_ref(self.ctx.z3_ctx, self.z3_mdl);
}
}
}
|
random_line_split
|
|
model.rs
|
use z3_sys::*;
use Solver;
use Optimize;
use Model;
use Ast;
use Z3_MUTEX;
impl<'ctx> Model<'ctx> {
pub fn of_solver(slv: &Solver<'ctx>) -> Model<'ctx> {
Model {
ctx: slv.ctx,
z3_mdl: unsafe {
let guard = Z3_MUTEX.lock().unwrap();
let m = Z3_solver_get_model(slv.ctx.z3_ctx, slv.z3_slv);
Z3_model_inc_ref(slv.ctx.z3_ctx, m);
m
}
}
}
pub fn of_optimize(opt: &Optimize<'ctx>) -> Model<'ctx> {
Model {
ctx: opt.ctx,
z3_mdl: unsafe {
let guard = Z3_MUTEX.lock().unwrap();
let m = Z3_optimize_get_model(opt.ctx.z3_ctx, opt.z3_opt);
Z3_model_inc_ref(opt.ctx.z3_ctx, m);
m
}
}
}
pub fn eval(&self, ast: &Ast<'ctx>) -> Option<Ast<'ctx>> {
unsafe {
let mut tmp : Z3_ast = ast.z3_ast;
let res;
{
let guard = Z3_MUTEX.lock().unwrap();
res = Z3_model_eval(self.ctx.z3_ctx,
self.z3_mdl,
ast.z3_ast,
Z3_TRUE,
&mut tmp)
}
if res == Z3_TRUE {
Some(Ast::new(self.ctx, tmp))
} else
|
}
}
}
impl<'ctx> Drop for Model<'ctx> {
fn drop(&mut self) {
unsafe {
let guard = Z3_MUTEX.lock().unwrap();
Z3_model_dec_ref(self.ctx.z3_ctx, self.z3_mdl);
}
}
}
|
{
None
}
|
conditional_block
|
model.rs
|
use z3_sys::*;
use Solver;
use Optimize;
use Model;
use Ast;
use Z3_MUTEX;
impl<'ctx> Model<'ctx> {
pub fn of_solver(slv: &Solver<'ctx>) -> Model<'ctx> {
Model {
ctx: slv.ctx,
z3_mdl: unsafe {
let guard = Z3_MUTEX.lock().unwrap();
let m = Z3_solver_get_model(slv.ctx.z3_ctx, slv.z3_slv);
Z3_model_inc_ref(slv.ctx.z3_ctx, m);
m
}
}
}
pub fn of_optimize(opt: &Optimize<'ctx>) -> Model<'ctx> {
Model {
ctx: opt.ctx,
z3_mdl: unsafe {
let guard = Z3_MUTEX.lock().unwrap();
let m = Z3_optimize_get_model(opt.ctx.z3_ctx, opt.z3_opt);
Z3_model_inc_ref(opt.ctx.z3_ctx, m);
m
}
}
}
pub fn eval(&self, ast: &Ast<'ctx>) -> Option<Ast<'ctx>> {
unsafe {
let mut tmp : Z3_ast = ast.z3_ast;
let res;
{
let guard = Z3_MUTEX.lock().unwrap();
res = Z3_model_eval(self.ctx.z3_ctx,
self.z3_mdl,
ast.z3_ast,
Z3_TRUE,
&mut tmp)
}
if res == Z3_TRUE {
Some(Ast::new(self.ctx, tmp))
} else {
None
}
}
}
}
impl<'ctx> Drop for Model<'ctx> {
fn drop(&mut self)
|
}
|
{
unsafe {
let guard = Z3_MUTEX.lock().unwrap();
Z3_model_dec_ref(self.ctx.z3_ctx, self.z3_mdl);
}
}
|
identifier_body
|
model.rs
|
use z3_sys::*;
use Solver;
use Optimize;
use Model;
use Ast;
use Z3_MUTEX;
impl<'ctx> Model<'ctx> {
pub fn
|
(slv: &Solver<'ctx>) -> Model<'ctx> {
Model {
ctx: slv.ctx,
z3_mdl: unsafe {
let guard = Z3_MUTEX.lock().unwrap();
let m = Z3_solver_get_model(slv.ctx.z3_ctx, slv.z3_slv);
Z3_model_inc_ref(slv.ctx.z3_ctx, m);
m
}
}
}
pub fn of_optimize(opt: &Optimize<'ctx>) -> Model<'ctx> {
Model {
ctx: opt.ctx,
z3_mdl: unsafe {
let guard = Z3_MUTEX.lock().unwrap();
let m = Z3_optimize_get_model(opt.ctx.z3_ctx, opt.z3_opt);
Z3_model_inc_ref(opt.ctx.z3_ctx, m);
m
}
}
}
pub fn eval(&self, ast: &Ast<'ctx>) -> Option<Ast<'ctx>> {
unsafe {
let mut tmp : Z3_ast = ast.z3_ast;
let res;
{
let guard = Z3_MUTEX.lock().unwrap();
res = Z3_model_eval(self.ctx.z3_ctx,
self.z3_mdl,
ast.z3_ast,
Z3_TRUE,
&mut tmp)
}
if res == Z3_TRUE {
Some(Ast::new(self.ctx, tmp))
} else {
None
}
}
}
}
impl<'ctx> Drop for Model<'ctx> {
fn drop(&mut self) {
unsafe {
let guard = Z3_MUTEX.lock().unwrap();
Z3_model_dec_ref(self.ctx.z3_ctx, self.z3_mdl);
}
}
}
|
of_solver
|
identifier_name
|
xrinputsourceevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::XRInputSourceEventBinding::{
self, XRInputSourceEventMethods,
};
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::Event;
use crate::dom::globalscope::GlobalScope;
use crate::dom::window::Window;
use crate::dom::xrframe::XRFrame;
use crate::dom::xrinputsource::XRInputSource;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct XRInputSourceEvent {
event: Event,
frame: Dom<XRFrame>,
source: Dom<XRInputSource>,
}
impl XRInputSourceEvent {
#[allow(unrooted_must_root)]
fn new_inherited(frame: &XRFrame, source: &XRInputSource) -> XRInputSourceEvent {
XRInputSourceEvent {
event: Event::new_inherited(),
frame: Dom::from_ref(frame),
source: Dom::from_ref(source),
}
}
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: bool,
|
source: &XRInputSource,
) -> DomRoot<XRInputSourceEvent> {
let trackevent = reflect_dom_object(
Box::new(XRInputSourceEvent::new_inherited(frame, source)),
global,
);
{
let event = trackevent.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
trackevent
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &XRInputSourceEventBinding::XRInputSourceEventInit,
) -> Fallible<DomRoot<XRInputSourceEvent>> {
Ok(XRInputSourceEvent::new(
&window.global(),
Atom::from(type_),
init.parent.bubbles,
init.parent.cancelable,
&init.frame,
&init.inputSource,
))
}
}
impl XRInputSourceEventMethods for XRInputSourceEvent {
// https://immersive-web.github.io/webxr/#dom-xrinputsourceeventinit-frame
fn Frame(&self) -> DomRoot<XRFrame> {
DomRoot::from_ref(&*self.frame)
}
// https://immersive-web.github.io/webxr/#dom-xrinputsourceeventinit-inputsource
fn InputSource(&self) -> DomRoot<XRInputSource> {
DomRoot::from_ref(&*self.source)
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
cancelable: bool,
frame: &XRFrame,
|
random_line_split
|
xrinputsourceevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::XRInputSourceEventBinding::{
self, XRInputSourceEventMethods,
};
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::Event;
use crate::dom::globalscope::GlobalScope;
use crate::dom::window::Window;
use crate::dom::xrframe::XRFrame;
use crate::dom::xrinputsource::XRInputSource;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct XRInputSourceEvent {
event: Event,
frame: Dom<XRFrame>,
source: Dom<XRInputSource>,
}
impl XRInputSourceEvent {
#[allow(unrooted_must_root)]
fn new_inherited(frame: &XRFrame, source: &XRInputSource) -> XRInputSourceEvent {
XRInputSourceEvent {
event: Event::new_inherited(),
frame: Dom::from_ref(frame),
source: Dom::from_ref(source),
}
}
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: bool,
cancelable: bool,
frame: &XRFrame,
source: &XRInputSource,
) -> DomRoot<XRInputSourceEvent> {
let trackevent = reflect_dom_object(
Box::new(XRInputSourceEvent::new_inherited(frame, source)),
global,
);
{
let event = trackevent.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
trackevent
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &XRInputSourceEventBinding::XRInputSourceEventInit,
) -> Fallible<DomRoot<XRInputSourceEvent>> {
Ok(XRInputSourceEvent::new(
&window.global(),
Atom::from(type_),
init.parent.bubbles,
init.parent.cancelable,
&init.frame,
&init.inputSource,
))
}
}
impl XRInputSourceEventMethods for XRInputSourceEvent {
// https://immersive-web.github.io/webxr/#dom-xrinputsourceeventinit-frame
fn Frame(&self) -> DomRoot<XRFrame> {
DomRoot::from_ref(&*self.frame)
}
// https://immersive-web.github.io/webxr/#dom-xrinputsourceeventinit-inputsource
fn InputSource(&self) -> DomRoot<XRInputSource>
|
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
{
DomRoot::from_ref(&*self.source)
}
|
identifier_body
|
xrinputsourceevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::XRInputSourceEventBinding::{
self, XRInputSourceEventMethods,
};
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::Event;
use crate::dom::globalscope::GlobalScope;
use crate::dom::window::Window;
use crate::dom::xrframe::XRFrame;
use crate::dom::xrinputsource::XRInputSource;
use dom_struct::dom_struct;
use servo_atoms::Atom;
#[dom_struct]
pub struct
|
{
event: Event,
frame: Dom<XRFrame>,
source: Dom<XRInputSource>,
}
impl XRInputSourceEvent {
#[allow(unrooted_must_root)]
fn new_inherited(frame: &XRFrame, source: &XRInputSource) -> XRInputSourceEvent {
XRInputSourceEvent {
event: Event::new_inherited(),
frame: Dom::from_ref(frame),
source: Dom::from_ref(source),
}
}
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: bool,
cancelable: bool,
frame: &XRFrame,
source: &XRInputSource,
) -> DomRoot<XRInputSourceEvent> {
let trackevent = reflect_dom_object(
Box::new(XRInputSourceEvent::new_inherited(frame, source)),
global,
);
{
let event = trackevent.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
trackevent
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &XRInputSourceEventBinding::XRInputSourceEventInit,
) -> Fallible<DomRoot<XRInputSourceEvent>> {
Ok(XRInputSourceEvent::new(
&window.global(),
Atom::from(type_),
init.parent.bubbles,
init.parent.cancelable,
&init.frame,
&init.inputSource,
))
}
}
impl XRInputSourceEventMethods for XRInputSourceEvent {
// https://immersive-web.github.io/webxr/#dom-xrinputsourceeventinit-frame
fn Frame(&self) -> DomRoot<XRFrame> {
DomRoot::from_ref(&*self.frame)
}
// https://immersive-web.github.io/webxr/#dom-xrinputsourceeventinit-inputsource
fn InputSource(&self) -> DomRoot<XRInputSource> {
DomRoot::from_ref(&*self.source)
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
XRInputSourceEvent
|
identifier_name
|
bios_boot_device.rs
|
use tag::{TagType, VerifyTag};
const UNUSED_PARTITION_NUMBER: u32 = 0xFFFFFFFF;
const BIOS_BOOT_DEVICE_TAG_SIZE: usize = 20;
#[repr(C, packed)]
pub struct BIOSBootDeviceTag {
tag_type: u32,
size: u32,
pub biosdev: u32,
pub partition: u32,
pub sub_partition: u32
}
impl BIOSBootDeviceTag {
fn is_valid_partition(&self) -> bool {
self.partition!= UNUSED_PARTITION_NUMBER
}
fn is_valid_sub_partition(&self) -> bool {
self.sub_partition!= UNUSED_PARTITION_NUMBER
}
fn size_bytes(&self) -> usize {
self.size as usize
|
}
}
impl VerifyTag for BIOSBootDeviceTag {
fn is_valid(&self) -> bool {
(self.size_bytes() == BIOS_BOOT_DEVICE_TAG_SIZE)
&& (self.tag_type == TagType::BIOSBootDevice as u32)
}
}
#[derive(Debug, Copy, Clone)]
pub struct BootDevice {
biosdev: u32,
partition: u32,
sub_partition: u32,
}
impl BootDevice {
pub fn new(biosdev: u32, partition: u32, sub_partition: u32) -> BootDevice {
BootDevice {
biosdev: biosdev,
partition: partition,
sub_partition: sub_partition
}
}
pub fn is_valid_partition(&self) -> bool {
self.partition!= UNUSED_PARTITION_NUMBER
}
pub fn is_valid_sub_partition(&self) -> bool {
self.sub_partition!= UNUSED_PARTITION_NUMBER
}
pub fn is_valid(&self) -> bool {
self.is_valid_partition() && self.is_valid_sub_partition()
}
pub fn biosdev(&self) -> usize {
self.biosdev as usize
}
pub fn partition(&self) -> usize {
self.partition as usize
}
pub fn sub_partition(&self) -> usize {
self.sub_partition as usize
}
}
|
random_line_split
|
|
bios_boot_device.rs
|
use tag::{TagType, VerifyTag};
const UNUSED_PARTITION_NUMBER: u32 = 0xFFFFFFFF;
const BIOS_BOOT_DEVICE_TAG_SIZE: usize = 20;
#[repr(C, packed)]
pub struct BIOSBootDeviceTag {
tag_type: u32,
size: u32,
pub biosdev: u32,
pub partition: u32,
pub sub_partition: u32
}
impl BIOSBootDeviceTag {
fn is_valid_partition(&self) -> bool
|
fn is_valid_sub_partition(&self) -> bool {
self.sub_partition!= UNUSED_PARTITION_NUMBER
}
fn size_bytes(&self) -> usize {
self.size as usize
}
}
impl VerifyTag for BIOSBootDeviceTag {
fn is_valid(&self) -> bool {
(self.size_bytes() == BIOS_BOOT_DEVICE_TAG_SIZE)
&& (self.tag_type == TagType::BIOSBootDevice as u32)
}
}
#[derive(Debug, Copy, Clone)]
pub struct BootDevice {
biosdev: u32,
partition: u32,
sub_partition: u32,
}
impl BootDevice {
pub fn new(biosdev: u32, partition: u32, sub_partition: u32) -> BootDevice {
BootDevice {
biosdev: biosdev,
partition: partition,
sub_partition: sub_partition
}
}
pub fn is_valid_partition(&self) -> bool {
self.partition!= UNUSED_PARTITION_NUMBER
}
pub fn is_valid_sub_partition(&self) -> bool {
self.sub_partition!= UNUSED_PARTITION_NUMBER
}
pub fn is_valid(&self) -> bool {
self.is_valid_partition() && self.is_valid_sub_partition()
}
pub fn biosdev(&self) -> usize {
self.biosdev as usize
}
pub fn partition(&self) -> usize {
self.partition as usize
}
pub fn sub_partition(&self) -> usize {
self.sub_partition as usize
}
}
|
{
self.partition != UNUSED_PARTITION_NUMBER
}
|
identifier_body
|
bios_boot_device.rs
|
use tag::{TagType, VerifyTag};
const UNUSED_PARTITION_NUMBER: u32 = 0xFFFFFFFF;
const BIOS_BOOT_DEVICE_TAG_SIZE: usize = 20;
#[repr(C, packed)]
pub struct BIOSBootDeviceTag {
tag_type: u32,
size: u32,
pub biosdev: u32,
pub partition: u32,
pub sub_partition: u32
}
impl BIOSBootDeviceTag {
fn is_valid_partition(&self) -> bool {
self.partition!= UNUSED_PARTITION_NUMBER
}
fn is_valid_sub_partition(&self) -> bool {
self.sub_partition!= UNUSED_PARTITION_NUMBER
}
fn size_bytes(&self) -> usize {
self.size as usize
}
}
impl VerifyTag for BIOSBootDeviceTag {
fn is_valid(&self) -> bool {
(self.size_bytes() == BIOS_BOOT_DEVICE_TAG_SIZE)
&& (self.tag_type == TagType::BIOSBootDevice as u32)
}
}
#[derive(Debug, Copy, Clone)]
pub struct
|
{
biosdev: u32,
partition: u32,
sub_partition: u32,
}
impl BootDevice {
pub fn new(biosdev: u32, partition: u32, sub_partition: u32) -> BootDevice {
BootDevice {
biosdev: biosdev,
partition: partition,
sub_partition: sub_partition
}
}
pub fn is_valid_partition(&self) -> bool {
self.partition!= UNUSED_PARTITION_NUMBER
}
pub fn is_valid_sub_partition(&self) -> bool {
self.sub_partition!= UNUSED_PARTITION_NUMBER
}
pub fn is_valid(&self) -> bool {
self.is_valid_partition() && self.is_valid_sub_partition()
}
pub fn biosdev(&self) -> usize {
self.biosdev as usize
}
pub fn partition(&self) -> usize {
self.partition as usize
}
pub fn sub_partition(&self) -> usize {
self.sub_partition as usize
}
}
|
BootDevice
|
identifier_name
|
thread.rs
|
use std::thread;
use std::sync::mpsc;
pub struct CancelSender(mpsc::Sender<()>);
impl CancelSender {
pub fn cancel_thread(self) {
let CancelSender(sender) = self;
let _ = sender.send(());
}
}
pub struct CancelReceiver(mpsc::Receiver<()>);
impl CancelReceiver {
pub fn has_been_canceled(&self) -> bool
|
}
// Wraps the standard thread::spawn, creating a channel to be used for canceling the thread.
// Note that is_canceled will automatically become true if the cancel sender is lost, so be sure to
// hold onto it!
pub fn spawn_cancelable<F>(to_execute: F) -> CancelSender
where F: FnOnce(CancelReceiver), F: Send +'static {
let (sender, receiver) = mpsc::channel();
thread::spawn(move || to_execute(CancelReceiver(receiver)));
CancelSender(sender)
}
|
{
let &CancelReceiver(ref receiver) = self;
match receiver.try_recv() {
Ok(_) | Err(mpsc::TryRecvError::Disconnected) => return true,
Err(mpsc::TryRecvError::Empty) => return false,
}
}
|
identifier_body
|
thread.rs
|
use std::thread;
use std::sync::mpsc;
pub struct CancelSender(mpsc::Sender<()>);
impl CancelSender {
pub fn
|
(self) {
let CancelSender(sender) = self;
let _ = sender.send(());
}
}
pub struct CancelReceiver(mpsc::Receiver<()>);
impl CancelReceiver {
pub fn has_been_canceled(&self) -> bool {
let &CancelReceiver(ref receiver) = self;
match receiver.try_recv() {
Ok(_) | Err(mpsc::TryRecvError::Disconnected) => return true,
Err(mpsc::TryRecvError::Empty) => return false,
}
}
}
// Wraps the standard thread::spawn, creating a channel to be used for canceling the thread.
// Note that is_canceled will automatically become true if the cancel sender is lost, so be sure to
// hold onto it!
pub fn spawn_cancelable<F>(to_execute: F) -> CancelSender
where F: FnOnce(CancelReceiver), F: Send +'static {
let (sender, receiver) = mpsc::channel();
thread::spawn(move || to_execute(CancelReceiver(receiver)));
CancelSender(sender)
}
|
cancel_thread
|
identifier_name
|
thread.rs
|
use std::thread;
use std::sync::mpsc;
pub struct CancelSender(mpsc::Sender<()>);
impl CancelSender {
pub fn cancel_thread(self) {
let CancelSender(sender) = self;
let _ = sender.send(());
}
}
pub struct CancelReceiver(mpsc::Receiver<()>);
impl CancelReceiver {
|
pub fn has_been_canceled(&self) -> bool {
let &CancelReceiver(ref receiver) = self;
match receiver.try_recv() {
Ok(_) | Err(mpsc::TryRecvError::Disconnected) => return true,
Err(mpsc::TryRecvError::Empty) => return false,
}
}
}
// Wraps the standard thread::spawn, creating a channel to be used for canceling the thread.
// Note that is_canceled will automatically become true if the cancel sender is lost, so be sure to
// hold onto it!
pub fn spawn_cancelable<F>(to_execute: F) -> CancelSender
where F: FnOnce(CancelReceiver), F: Send +'static {
let (sender, receiver) = mpsc::channel();
thread::spawn(move || to_execute(CancelReceiver(receiver)));
CancelSender(sender)
}
|
random_line_split
|
|
main.rs
|
// TODO: many items from tokio-core::io have been deprecated in favour of tokio-io
#![allow(deprecated)]
#[macro_use] extern crate log;
extern crate env_logger;
extern crate futures;
extern crate getopts;
extern crate librespot;
extern crate tokio_core;
extern crate tokio_signal;
use env_logger::LogBuilder;
use futures::{Future, Async, Poll, Stream};
use std::env;
use std::io::{self, stderr, Write};
use std::path::PathBuf;
use std::process::exit;
use std::str::FromStr;
use tokio_core::reactor::{Handle, Core};
use tokio_core::io::IoStream;
use std::mem;
use librespot::core::authentication::{get_credentials, Credentials};
use librespot::core::cache::Cache;
use librespot::core::config::{Bitrate, DeviceType, PlayerConfig, SessionConfig, ConnectConfig};
use librespot::core::session::Session;
use librespot::core::version;
use librespot::audio_backend::{self, Sink, BACKENDS};
use librespot::discovery::{discovery, DiscoveryStream};
use librespot::mixer::{self, Mixer};
use librespot::player::Player;
use librespot::spirc::{Spirc, SpircTask};
fn usage(program: &str, opts: &getopts::Options) -> String {
let brief = format!("Usage: {} [options]", program);
opts.usage(&brief)
}
fn setup_logging(verbose: bool) {
let mut builder = LogBuilder::new();
match env::var("RUST_LOG") {
Ok(config) => {
builder.parse(&config);
builder.init().unwrap();
if verbose {
warn!("`--verbose` flag overidden by `RUST_LOG` environment variable");
}
}
Err(_) => {
if verbose {
builder.parse("mdns=info,librespot=trace");
} else {
builder.parse("mdns=info,librespot=info");
}
builder.init().unwrap();
}
}
}
fn list_backends() {
println!("Available Backends : ");
for (&(name, _), idx) in BACKENDS.iter().zip(0..) {
if idx == 0 {
println!("- {} (default)", name);
} else {
println!("- {}", name);
}
}
}
#[derive(Clone)]
struct Setup {
backend: fn(Option<String>) -> Box<Sink>,
device: Option<String>,
mixer: fn() -> Box<Mixer>,
cache: Option<Cache>,
player_config: PlayerConfig,
session_config: SessionConfig,
connect_config: ConnectConfig,
credentials: Option<Credentials>,
enable_discovery: bool,
}
fn setup(args: &[String]) -> Setup {
let mut opts = getopts::Options::new();
opts.optopt("c", "cache", "Path to a directory where files will be cached.", "CACHE")
.optflag("", "disable-audio-cache", "Disable caching of the audio data.")
.reqopt("n", "name", "Device name", "NAME")
.optopt("", "device-type", "Displayed device type", "DEVICE_TYPE")
.optopt("b", "bitrate", "Bitrate (96, 160 or 320). Defaults to 160", "BITRATE")
.optopt("", "onstart", "Run PROGRAM when playback is about to begin.", "PROGRAM")
.optopt("", "onstop", "Run PROGRAM when playback has ended.", "PROGRAM")
.optflag("v", "verbose", "Enable verbose output")
.optopt("u", "username", "Username to sign in with", "USERNAME")
.optopt("p", "password", "Password", "PASSWORD")
.optflag("", "disable-discovery", "Disable discovery mode")
.optopt("", "backend", "Audio backend to use. Use '?' to list options", "BACKEND")
.optopt("", "device", "Audio device to use. Use '?' to list options", "DEVICE")
.optopt("", "mixer", "Mixer to use", "MIXER");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => {
writeln!(stderr(), "error: {}\n{}", f.to_string(), usage(&args[0], &opts)).unwrap();
exit(1);
}
};
let verbose = matches.opt_present("verbose");
setup_logging(verbose);
info!("librespot {} ({}). Built on {}. Build ID: {}",
version::short_sha(),
version::commit_date(),
version::short_now(),
version::build_id());
let backend_name = matches.opt_str("backend");
if backend_name == Some("?".into()) {
list_backends();
exit(0);
}
let backend = audio_backend::find(backend_name)
.expect("Invalid backend");
let device = matches.opt_str("device");
let mixer_name = matches.opt_str("mixer");
|
let use_audio_cache =!matches.opt_present("disable-audio-cache");
let cache = matches.opt_str("c").map(|cache_location| {
Cache::new(PathBuf::from(cache_location), use_audio_cache)
});
let credentials = {
let cached_credentials = cache.as_ref().and_then(Cache::credentials);
get_credentials(
matches.opt_str("username"),
matches.opt_str("password"),
cached_credentials
)
};
let session_config = {
let device_id = librespot::core::session::device_id(&name);
SessionConfig {
user_agent: version::version_string(),
device_id: device_id,
}
};
let player_config = {
let bitrate = matches.opt_str("b").as_ref()
.map(|bitrate| Bitrate::from_str(bitrate).expect("Invalid bitrate"))
.unwrap_or(Bitrate::default());
PlayerConfig {
bitrate: bitrate,
onstart: matches.opt_str("onstart"),
onstop: matches.opt_str("onstop"),
}
};
let connect_config = {
let device_type = matches.opt_str("device-type").as_ref()
.map(|device_type| DeviceType::from_str(device_type).expect("Invalid device type"))
.unwrap_or(DeviceType::default());
ConnectConfig {
name: name,
device_type: device_type,
}
};
let enable_discovery =!matches.opt_present("disable-discovery");
Setup {
backend: backend,
cache: cache,
session_config: session_config,
player_config: player_config,
connect_config: connect_config,
credentials: credentials,
device: device,
enable_discovery: enable_discovery,
mixer: mixer,
}
}
struct Main {
cache: Option<Cache>,
player_config: PlayerConfig,
session_config: SessionConfig,
connect_config: ConnectConfig,
backend: fn(Option<String>) -> Box<Sink>,
device: Option<String>,
mixer: fn() -> Box<Mixer>,
handle: Handle,
discovery: Option<DiscoveryStream>,
signal: IoStream<()>,
spirc: Option<Spirc>,
spirc_task: Option<SpircTask>,
connect: Box<Future<Item=Session, Error=io::Error>>,
shutdown: bool,
}
impl Main {
fn new(handle: Handle, setup: Setup) -> Main {
let mut task = Main {
handle: handle.clone(),
cache: setup.cache,
session_config: setup.session_config,
player_config: setup.player_config,
connect_config: setup.connect_config,
backend: setup.backend,
device: setup.device,
mixer: setup.mixer,
connect: Box::new(futures::future::empty()),
discovery: None,
spirc: None,
spirc_task: None,
shutdown: false,
signal: tokio_signal::ctrl_c(&handle).flatten_stream().boxed(),
};
if setup.enable_discovery {
let config = task.connect_config.clone();
let device_id = task.session_config.device_id.clone();
task.discovery = Some(discovery(&handle, config, device_id).unwrap());
}
if let Some(credentials) = setup.credentials {
task.credentials(credentials);
}
task
}
fn credentials(&mut self, credentials: Credentials) {
let config = self.session_config.clone();
let handle = self.handle.clone();
let connection = Session::connect(config, credentials, self.cache.clone(), handle);
self.connect = connection;
self.spirc = None;
let task = mem::replace(&mut self.spirc_task, None);
if let Some(task) = task {
self.handle.spawn(task);
}
}
}
impl Future for Main {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
loop {
let mut progress = false;
if let Some(Async::Ready(Some(creds))) = self.discovery.as_mut().map(|d| d.poll().unwrap()) {
if let Some(ref spirc) = self.spirc {
spirc.shutdown();
}
self.credentials(creds);
progress = true;
}
if let Async::Ready(session) = self.connect.poll().unwrap() {
self.connect = Box::new(futures::future::empty());
let device = self.device.clone();
let mixer = (self.mixer)();
let player_config = self.player_config.clone();
let connect_config = self.connect_config.clone();
let audio_filter = mixer.get_audio_filter();
let backend = self.backend;
let player = Player::new(player_config, session.clone(), audio_filter, move || {
(backend)(device)
});
let (spirc, spirc_task) = Spirc::new(connect_config, session, player, mixer);
self.spirc = Some(spirc);
self.spirc_task = Some(spirc_task);
progress = true;
}
if let Async::Ready(Some(())) = self.signal.poll().unwrap() {
if!self.shutdown {
if let Some(ref spirc) = self.spirc {
spirc.shutdown();
}
self.shutdown = true;
} else {
return Ok(Async::Ready(()));
}
progress = true;
}
if let Some(ref mut spirc_task) = self.spirc_task {
if let Async::Ready(()) = spirc_task.poll().unwrap() {
if self.shutdown {
return Ok(Async::Ready(()));
} else {
panic!("Spirc shut down unexpectedly");
}
}
}
if!progress {
return Ok(Async::NotReady);
}
}
}
}
fn main() {
let mut core = Core::new().unwrap();
let handle = core.handle();
let args: Vec<String> = std::env::args().collect();
core.run(Main::new(handle, setup(&args))).unwrap()
}
|
let mixer = mixer::find(mixer_name.as_ref())
.expect("Invalid mixer");
let name = matches.opt_str("name").unwrap();
|
random_line_split
|
main.rs
|
// TODO: many items from tokio-core::io have been deprecated in favour of tokio-io
#![allow(deprecated)]
#[macro_use] extern crate log;
extern crate env_logger;
extern crate futures;
extern crate getopts;
extern crate librespot;
extern crate tokio_core;
extern crate tokio_signal;
use env_logger::LogBuilder;
use futures::{Future, Async, Poll, Stream};
use std::env;
use std::io::{self, stderr, Write};
use std::path::PathBuf;
use std::process::exit;
use std::str::FromStr;
use tokio_core::reactor::{Handle, Core};
use tokio_core::io::IoStream;
use std::mem;
use librespot::core::authentication::{get_credentials, Credentials};
use librespot::core::cache::Cache;
use librespot::core::config::{Bitrate, DeviceType, PlayerConfig, SessionConfig, ConnectConfig};
use librespot::core::session::Session;
use librespot::core::version;
use librespot::audio_backend::{self, Sink, BACKENDS};
use librespot::discovery::{discovery, DiscoveryStream};
use librespot::mixer::{self, Mixer};
use librespot::player::Player;
use librespot::spirc::{Spirc, SpircTask};
fn usage(program: &str, opts: &getopts::Options) -> String {
let brief = format!("Usage: {} [options]", program);
opts.usage(&brief)
}
fn setup_logging(verbose: bool) {
let mut builder = LogBuilder::new();
match env::var("RUST_LOG") {
Ok(config) => {
builder.parse(&config);
builder.init().unwrap();
if verbose {
warn!("`--verbose` flag overidden by `RUST_LOG` environment variable");
}
}
Err(_) => {
if verbose {
builder.parse("mdns=info,librespot=trace");
} else
|
builder.init().unwrap();
}
}
}
fn list_backends() {
println!("Available Backends : ");
for (&(name, _), idx) in BACKENDS.iter().zip(0..) {
if idx == 0 {
println!("- {} (default)", name);
} else {
println!("- {}", name);
}
}
}
#[derive(Clone)]
struct Setup {
backend: fn(Option<String>) -> Box<Sink>,
device: Option<String>,
mixer: fn() -> Box<Mixer>,
cache: Option<Cache>,
player_config: PlayerConfig,
session_config: SessionConfig,
connect_config: ConnectConfig,
credentials: Option<Credentials>,
enable_discovery: bool,
}
fn setup(args: &[String]) -> Setup {
let mut opts = getopts::Options::new();
opts.optopt("c", "cache", "Path to a directory where files will be cached.", "CACHE")
.optflag("", "disable-audio-cache", "Disable caching of the audio data.")
.reqopt("n", "name", "Device name", "NAME")
.optopt("", "device-type", "Displayed device type", "DEVICE_TYPE")
.optopt("b", "bitrate", "Bitrate (96, 160 or 320). Defaults to 160", "BITRATE")
.optopt("", "onstart", "Run PROGRAM when playback is about to begin.", "PROGRAM")
.optopt("", "onstop", "Run PROGRAM when playback has ended.", "PROGRAM")
.optflag("v", "verbose", "Enable verbose output")
.optopt("u", "username", "Username to sign in with", "USERNAME")
.optopt("p", "password", "Password", "PASSWORD")
.optflag("", "disable-discovery", "Disable discovery mode")
.optopt("", "backend", "Audio backend to use. Use '?' to list options", "BACKEND")
.optopt("", "device", "Audio device to use. Use '?' to list options", "DEVICE")
.optopt("", "mixer", "Mixer to use", "MIXER");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => {
writeln!(stderr(), "error: {}\n{}", f.to_string(), usage(&args[0], &opts)).unwrap();
exit(1);
}
};
let verbose = matches.opt_present("verbose");
setup_logging(verbose);
info!("librespot {} ({}). Built on {}. Build ID: {}",
version::short_sha(),
version::commit_date(),
version::short_now(),
version::build_id());
let backend_name = matches.opt_str("backend");
if backend_name == Some("?".into()) {
list_backends();
exit(0);
}
let backend = audio_backend::find(backend_name)
.expect("Invalid backend");
let device = matches.opt_str("device");
let mixer_name = matches.opt_str("mixer");
let mixer = mixer::find(mixer_name.as_ref())
.expect("Invalid mixer");
let name = matches.opt_str("name").unwrap();
let use_audio_cache =!matches.opt_present("disable-audio-cache");
let cache = matches.opt_str("c").map(|cache_location| {
Cache::new(PathBuf::from(cache_location), use_audio_cache)
});
let credentials = {
let cached_credentials = cache.as_ref().and_then(Cache::credentials);
get_credentials(
matches.opt_str("username"),
matches.opt_str("password"),
cached_credentials
)
};
let session_config = {
let device_id = librespot::core::session::device_id(&name);
SessionConfig {
user_agent: version::version_string(),
device_id: device_id,
}
};
let player_config = {
let bitrate = matches.opt_str("b").as_ref()
.map(|bitrate| Bitrate::from_str(bitrate).expect("Invalid bitrate"))
.unwrap_or(Bitrate::default());
PlayerConfig {
bitrate: bitrate,
onstart: matches.opt_str("onstart"),
onstop: matches.opt_str("onstop"),
}
};
let connect_config = {
let device_type = matches.opt_str("device-type").as_ref()
.map(|device_type| DeviceType::from_str(device_type).expect("Invalid device type"))
.unwrap_or(DeviceType::default());
ConnectConfig {
name: name,
device_type: device_type,
}
};
let enable_discovery =!matches.opt_present("disable-discovery");
Setup {
backend: backend,
cache: cache,
session_config: session_config,
player_config: player_config,
connect_config: connect_config,
credentials: credentials,
device: device,
enable_discovery: enable_discovery,
mixer: mixer,
}
}
struct Main {
cache: Option<Cache>,
player_config: PlayerConfig,
session_config: SessionConfig,
connect_config: ConnectConfig,
backend: fn(Option<String>) -> Box<Sink>,
device: Option<String>,
mixer: fn() -> Box<Mixer>,
handle: Handle,
discovery: Option<DiscoveryStream>,
signal: IoStream<()>,
spirc: Option<Spirc>,
spirc_task: Option<SpircTask>,
connect: Box<Future<Item=Session, Error=io::Error>>,
shutdown: bool,
}
impl Main {
fn new(handle: Handle, setup: Setup) -> Main {
let mut task = Main {
handle: handle.clone(),
cache: setup.cache,
session_config: setup.session_config,
player_config: setup.player_config,
connect_config: setup.connect_config,
backend: setup.backend,
device: setup.device,
mixer: setup.mixer,
connect: Box::new(futures::future::empty()),
discovery: None,
spirc: None,
spirc_task: None,
shutdown: false,
signal: tokio_signal::ctrl_c(&handle).flatten_stream().boxed(),
};
if setup.enable_discovery {
let config = task.connect_config.clone();
let device_id = task.session_config.device_id.clone();
task.discovery = Some(discovery(&handle, config, device_id).unwrap());
}
if let Some(credentials) = setup.credentials {
task.credentials(credentials);
}
task
}
fn credentials(&mut self, credentials: Credentials) {
let config = self.session_config.clone();
let handle = self.handle.clone();
let connection = Session::connect(config, credentials, self.cache.clone(), handle);
self.connect = connection;
self.spirc = None;
let task = mem::replace(&mut self.spirc_task, None);
if let Some(task) = task {
self.handle.spawn(task);
}
}
}
impl Future for Main {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
loop {
let mut progress = false;
if let Some(Async::Ready(Some(creds))) = self.discovery.as_mut().map(|d| d.poll().unwrap()) {
if let Some(ref spirc) = self.spirc {
spirc.shutdown();
}
self.credentials(creds);
progress = true;
}
if let Async::Ready(session) = self.connect.poll().unwrap() {
self.connect = Box::new(futures::future::empty());
let device = self.device.clone();
let mixer = (self.mixer)();
let player_config = self.player_config.clone();
let connect_config = self.connect_config.clone();
let audio_filter = mixer.get_audio_filter();
let backend = self.backend;
let player = Player::new(player_config, session.clone(), audio_filter, move || {
(backend)(device)
});
let (spirc, spirc_task) = Spirc::new(connect_config, session, player, mixer);
self.spirc = Some(spirc);
self.spirc_task = Some(spirc_task);
progress = true;
}
if let Async::Ready(Some(())) = self.signal.poll().unwrap() {
if!self.shutdown {
if let Some(ref spirc) = self.spirc {
spirc.shutdown();
}
self.shutdown = true;
} else {
return Ok(Async::Ready(()));
}
progress = true;
}
if let Some(ref mut spirc_task) = self.spirc_task {
if let Async::Ready(()) = spirc_task.poll().unwrap() {
if self.shutdown {
return Ok(Async::Ready(()));
} else {
panic!("Spirc shut down unexpectedly");
}
}
}
if!progress {
return Ok(Async::NotReady);
}
}
}
}
fn main() {
let mut core = Core::new().unwrap();
let handle = core.handle();
let args: Vec<String> = std::env::args().collect();
core.run(Main::new(handle, setup(&args))).unwrap()
}
|
{
builder.parse("mdns=info,librespot=info");
}
|
conditional_block
|
main.rs
|
// TODO: many items from tokio-core::io have been deprecated in favour of tokio-io
#![allow(deprecated)]
#[macro_use] extern crate log;
extern crate env_logger;
extern crate futures;
extern crate getopts;
extern crate librespot;
extern crate tokio_core;
extern crate tokio_signal;
use env_logger::LogBuilder;
use futures::{Future, Async, Poll, Stream};
use std::env;
use std::io::{self, stderr, Write};
use std::path::PathBuf;
use std::process::exit;
use std::str::FromStr;
use tokio_core::reactor::{Handle, Core};
use tokio_core::io::IoStream;
use std::mem;
use librespot::core::authentication::{get_credentials, Credentials};
use librespot::core::cache::Cache;
use librespot::core::config::{Bitrate, DeviceType, PlayerConfig, SessionConfig, ConnectConfig};
use librespot::core::session::Session;
use librespot::core::version;
use librespot::audio_backend::{self, Sink, BACKENDS};
use librespot::discovery::{discovery, DiscoveryStream};
use librespot::mixer::{self, Mixer};
use librespot::player::Player;
use librespot::spirc::{Spirc, SpircTask};
fn
|
(program: &str, opts: &getopts::Options) -> String {
let brief = format!("Usage: {} [options]", program);
opts.usage(&brief)
}
fn setup_logging(verbose: bool) {
let mut builder = LogBuilder::new();
match env::var("RUST_LOG") {
Ok(config) => {
builder.parse(&config);
builder.init().unwrap();
if verbose {
warn!("`--verbose` flag overidden by `RUST_LOG` environment variable");
}
}
Err(_) => {
if verbose {
builder.parse("mdns=info,librespot=trace");
} else {
builder.parse("mdns=info,librespot=info");
}
builder.init().unwrap();
}
}
}
fn list_backends() {
println!("Available Backends : ");
for (&(name, _), idx) in BACKENDS.iter().zip(0..) {
if idx == 0 {
println!("- {} (default)", name);
} else {
println!("- {}", name);
}
}
}
#[derive(Clone)]
struct Setup {
backend: fn(Option<String>) -> Box<Sink>,
device: Option<String>,
mixer: fn() -> Box<Mixer>,
cache: Option<Cache>,
player_config: PlayerConfig,
session_config: SessionConfig,
connect_config: ConnectConfig,
credentials: Option<Credentials>,
enable_discovery: bool,
}
fn setup(args: &[String]) -> Setup {
let mut opts = getopts::Options::new();
opts.optopt("c", "cache", "Path to a directory where files will be cached.", "CACHE")
.optflag("", "disable-audio-cache", "Disable caching of the audio data.")
.reqopt("n", "name", "Device name", "NAME")
.optopt("", "device-type", "Displayed device type", "DEVICE_TYPE")
.optopt("b", "bitrate", "Bitrate (96, 160 or 320). Defaults to 160", "BITRATE")
.optopt("", "onstart", "Run PROGRAM when playback is about to begin.", "PROGRAM")
.optopt("", "onstop", "Run PROGRAM when playback has ended.", "PROGRAM")
.optflag("v", "verbose", "Enable verbose output")
.optopt("u", "username", "Username to sign in with", "USERNAME")
.optopt("p", "password", "Password", "PASSWORD")
.optflag("", "disable-discovery", "Disable discovery mode")
.optopt("", "backend", "Audio backend to use. Use '?' to list options", "BACKEND")
.optopt("", "device", "Audio device to use. Use '?' to list options", "DEVICE")
.optopt("", "mixer", "Mixer to use", "MIXER");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => {
writeln!(stderr(), "error: {}\n{}", f.to_string(), usage(&args[0], &opts)).unwrap();
exit(1);
}
};
let verbose = matches.opt_present("verbose");
setup_logging(verbose);
info!("librespot {} ({}). Built on {}. Build ID: {}",
version::short_sha(),
version::commit_date(),
version::short_now(),
version::build_id());
let backend_name = matches.opt_str("backend");
if backend_name == Some("?".into()) {
list_backends();
exit(0);
}
let backend = audio_backend::find(backend_name)
.expect("Invalid backend");
let device = matches.opt_str("device");
let mixer_name = matches.opt_str("mixer");
let mixer = mixer::find(mixer_name.as_ref())
.expect("Invalid mixer");
let name = matches.opt_str("name").unwrap();
let use_audio_cache =!matches.opt_present("disable-audio-cache");
let cache = matches.opt_str("c").map(|cache_location| {
Cache::new(PathBuf::from(cache_location), use_audio_cache)
});
let credentials = {
let cached_credentials = cache.as_ref().and_then(Cache::credentials);
get_credentials(
matches.opt_str("username"),
matches.opt_str("password"),
cached_credentials
)
};
let session_config = {
let device_id = librespot::core::session::device_id(&name);
SessionConfig {
user_agent: version::version_string(),
device_id: device_id,
}
};
let player_config = {
let bitrate = matches.opt_str("b").as_ref()
.map(|bitrate| Bitrate::from_str(bitrate).expect("Invalid bitrate"))
.unwrap_or(Bitrate::default());
PlayerConfig {
bitrate: bitrate,
onstart: matches.opt_str("onstart"),
onstop: matches.opt_str("onstop"),
}
};
let connect_config = {
let device_type = matches.opt_str("device-type").as_ref()
.map(|device_type| DeviceType::from_str(device_type).expect("Invalid device type"))
.unwrap_or(DeviceType::default());
ConnectConfig {
name: name,
device_type: device_type,
}
};
let enable_discovery =!matches.opt_present("disable-discovery");
Setup {
backend: backend,
cache: cache,
session_config: session_config,
player_config: player_config,
connect_config: connect_config,
credentials: credentials,
device: device,
enable_discovery: enable_discovery,
mixer: mixer,
}
}
struct Main {
cache: Option<Cache>,
player_config: PlayerConfig,
session_config: SessionConfig,
connect_config: ConnectConfig,
backend: fn(Option<String>) -> Box<Sink>,
device: Option<String>,
mixer: fn() -> Box<Mixer>,
handle: Handle,
discovery: Option<DiscoveryStream>,
signal: IoStream<()>,
spirc: Option<Spirc>,
spirc_task: Option<SpircTask>,
connect: Box<Future<Item=Session, Error=io::Error>>,
shutdown: bool,
}
impl Main {
fn new(handle: Handle, setup: Setup) -> Main {
let mut task = Main {
handle: handle.clone(),
cache: setup.cache,
session_config: setup.session_config,
player_config: setup.player_config,
connect_config: setup.connect_config,
backend: setup.backend,
device: setup.device,
mixer: setup.mixer,
connect: Box::new(futures::future::empty()),
discovery: None,
spirc: None,
spirc_task: None,
shutdown: false,
signal: tokio_signal::ctrl_c(&handle).flatten_stream().boxed(),
};
if setup.enable_discovery {
let config = task.connect_config.clone();
let device_id = task.session_config.device_id.clone();
task.discovery = Some(discovery(&handle, config, device_id).unwrap());
}
if let Some(credentials) = setup.credentials {
task.credentials(credentials);
}
task
}
fn credentials(&mut self, credentials: Credentials) {
let config = self.session_config.clone();
let handle = self.handle.clone();
let connection = Session::connect(config, credentials, self.cache.clone(), handle);
self.connect = connection;
self.spirc = None;
let task = mem::replace(&mut self.spirc_task, None);
if let Some(task) = task {
self.handle.spawn(task);
}
}
}
impl Future for Main {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
loop {
let mut progress = false;
if let Some(Async::Ready(Some(creds))) = self.discovery.as_mut().map(|d| d.poll().unwrap()) {
if let Some(ref spirc) = self.spirc {
spirc.shutdown();
}
self.credentials(creds);
progress = true;
}
if let Async::Ready(session) = self.connect.poll().unwrap() {
self.connect = Box::new(futures::future::empty());
let device = self.device.clone();
let mixer = (self.mixer)();
let player_config = self.player_config.clone();
let connect_config = self.connect_config.clone();
let audio_filter = mixer.get_audio_filter();
let backend = self.backend;
let player = Player::new(player_config, session.clone(), audio_filter, move || {
(backend)(device)
});
let (spirc, spirc_task) = Spirc::new(connect_config, session, player, mixer);
self.spirc = Some(spirc);
self.spirc_task = Some(spirc_task);
progress = true;
}
if let Async::Ready(Some(())) = self.signal.poll().unwrap() {
if!self.shutdown {
if let Some(ref spirc) = self.spirc {
spirc.shutdown();
}
self.shutdown = true;
} else {
return Ok(Async::Ready(()));
}
progress = true;
}
if let Some(ref mut spirc_task) = self.spirc_task {
if let Async::Ready(()) = spirc_task.poll().unwrap() {
if self.shutdown {
return Ok(Async::Ready(()));
} else {
panic!("Spirc shut down unexpectedly");
}
}
}
if!progress {
return Ok(Async::NotReady);
}
}
}
}
fn main() {
let mut core = Core::new().unwrap();
let handle = core.handle();
let args: Vec<String> = std::env::args().collect();
core.run(Main::new(handle, setup(&args))).unwrap()
}
|
usage
|
identifier_name
|
mod.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(unused_unsafe)]
#![allow(unused_mut)]
use prelude::v1::*;
use ffi::CStr;
use io::{self, ErrorKind};
use libc;
use num::{Int, SignedInt};
use num;
use old_io::{self, IoResult, IoError};
use str;
use sys_common::mkerr_libc;
macro_rules! helper_init { (static $name:ident: Helper<$m:ty>) => (
static $name: Helper<$m> = Helper {
lock: ::sync::MUTEX_INIT,
cond: ::sync::CONDVAR_INIT,
chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> },
signal: ::cell::UnsafeCell { value: 0 },
initialized: ::cell::UnsafeCell { value: false },
shutdown: ::cell::UnsafeCell { value: false },
};
) }
pub mod backtrace;
pub mod c;
pub mod condvar;
pub mod ext;
pub mod fd;
pub mod fs; // support for std::old_io
pub mod fs2; // support for std::fs
pub mod helper_signal;
pub mod mutex;
pub mod net;
pub mod os;
pub mod os_str;
pub mod pipe;
pub mod pipe2;
pub mod process;
pub mod process2;
pub mod rwlock;
pub mod stack_overflow;
pub mod sync;
pub mod tcp;
pub mod thread;
pub mod thread_local;
pub mod time;
pub mod timer;
pub mod tty;
pub mod udp;
pub mod stdio;
pub mod addrinfo {
pub use sys_common::net::get_host_addresses;
pub use sys_common::net::get_address_name;
}
// FIXME: move these to c module
pub type sock_t = self::fs::fd_t;
pub type wrlen = libc::size_t;
pub type msglen_t = libc::size_t;
pub unsafe fn close_sock(sock: sock_t)
|
pub fn last_error() -> IoError {
decode_error_detailed(os::errno() as i32)
}
pub fn last_net_error() -> IoError {
last_error()
}
extern "system" {
fn gai_strerror(errcode: libc::c_int) -> *const libc::c_char;
}
pub fn last_gai_error(s: libc::c_int) -> IoError {
let mut err = decode_error(s);
err.detail = Some(unsafe {
let data = CStr::from_ptr(gai_strerror(s));
str::from_utf8(data.to_bytes()).unwrap().to_string()
});
err
}
/// Convert an `errno` value into a high-level error variant and description.
pub fn decode_error(errno: i32) -> IoError {
// FIXME: this should probably be a bit more descriptive...
let (kind, desc) = match errno {
libc::EOF => (old_io::EndOfFile, "end of file"),
libc::ECONNREFUSED => (old_io::ConnectionRefused, "connection refused"),
libc::ECONNRESET => (old_io::ConnectionReset, "connection reset"),
libc::EPERM | libc::EACCES =>
(old_io::PermissionDenied, "permission denied"),
libc::EPIPE => (old_io::BrokenPipe, "broken pipe"),
libc::ENOTCONN => (old_io::NotConnected, "not connected"),
libc::ECONNABORTED => (old_io::ConnectionAborted, "connection aborted"),
libc::EADDRNOTAVAIL => (old_io::ConnectionRefused, "address not available"),
libc::EADDRINUSE => (old_io::ConnectionRefused, "address in use"),
libc::ENOENT => (old_io::FileNotFound, "no such file or directory"),
libc::EISDIR => (old_io::InvalidInput, "illegal operation on a directory"),
libc::ENOSYS => (old_io::IoUnavailable, "function not implemented"),
libc::EINVAL => (old_io::InvalidInput, "invalid argument"),
libc::ENOTTY =>
(old_io::MismatchedFileTypeForOperation,
"file descriptor is not a TTY"),
libc::ETIMEDOUT => (old_io::TimedOut, "operation timed out"),
libc::ECANCELED => (old_io::TimedOut, "operation aborted"),
libc::consts::os::posix88::EEXIST =>
(old_io::PathAlreadyExists, "path already exists"),
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
(old_io::ResourceUnavailable, "resource temporarily unavailable"),
_ => (old_io::OtherIoError, "unknown error")
};
IoError { kind: kind, desc: desc, detail: None }
}
pub fn decode_error_detailed(errno: i32) -> IoError {
let mut err = decode_error(errno);
err.detail = Some(os::error_string(errno));
err
}
pub fn decode_error_kind(errno: i32) -> ErrorKind {
match errno as libc::c_int {
libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
libc::ECONNRESET => ErrorKind::ConnectionReset,
libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
libc::EPIPE => ErrorKind::BrokenPipe,
libc::ENOTCONN => ErrorKind::NotConnected,
libc::ECONNABORTED => ErrorKind::ConnectionAborted,
libc::EADDRNOTAVAIL => ErrorKind::ConnectionRefused,
libc::EADDRINUSE => ErrorKind::ConnectionRefused,
libc::ENOENT => ErrorKind::FileNotFound,
libc::EISDIR => ErrorKind::InvalidInput,
libc::EINTR => ErrorKind::Interrupted,
libc::EINVAL => ErrorKind::InvalidInput,
libc::ENOTTY => ErrorKind::MismatchedFileTypeForOperation,
libc::ETIMEDOUT => ErrorKind::TimedOut,
libc::ECANCELED => ErrorKind::TimedOut,
libc::consts::os::posix88::EEXIST => ErrorKind::PathAlreadyExists,
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
ErrorKind::ResourceUnavailable,
_ => ErrorKind::Other,
}
}
#[inline]
pub fn retry<T, F> (mut f: F) -> T where
T: SignedInt,
F: FnMut() -> T,
{
let one: T = Int::one();
loop {
let n = f();
if n == -one && os::errno() == libc::EINTR as i32 { }
else { return n }
}
}
pub fn cvt<T: SignedInt>(t: T) -> io::Result<T> {
let one: T = Int::one();
if t == -one {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
}
pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
where T: SignedInt, F: FnMut() -> T
{
loop {
match cvt(f()) {
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
other => return other,
}
}
}
pub fn ms_to_timeval(ms: u64) -> libc::timeval {
libc::timeval {
tv_sec: (ms / 1000) as libc::time_t,
tv_usec: ((ms % 1000) * 1000) as libc::suseconds_t,
}
}
pub fn wouldblock() -> bool {
let err = os::errno();
err == libc::EWOULDBLOCK as i32 || err == libc::EAGAIN as i32
}
pub fn set_nonblocking(fd: sock_t, nb: bool) {
let set = nb as libc::c_int;
mkerr_libc(retry(|| unsafe { c::ioctl(fd, c::FIONBIO, &set) })).unwrap();
}
// nothing needed on unix platforms
pub fn init_net() {}
|
{ let _ = libc::close(sock); }
|
identifier_body
|
mod.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(unused_unsafe)]
#![allow(unused_mut)]
use prelude::v1::*;
use ffi::CStr;
use io::{self, ErrorKind};
use libc;
use num::{Int, SignedInt};
use num;
use old_io::{self, IoResult, IoError};
use str;
use sys_common::mkerr_libc;
macro_rules! helper_init { (static $name:ident: Helper<$m:ty>) => (
static $name: Helper<$m> = Helper {
lock: ::sync::MUTEX_INIT,
cond: ::sync::CONDVAR_INIT,
chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> },
signal: ::cell::UnsafeCell { value: 0 },
initialized: ::cell::UnsafeCell { value: false },
shutdown: ::cell::UnsafeCell { value: false },
};
) }
pub mod backtrace;
pub mod c;
pub mod condvar;
pub mod ext;
pub mod fd;
pub mod fs; // support for std::old_io
pub mod fs2; // support for std::fs
pub mod helper_signal;
pub mod mutex;
pub mod net;
pub mod os;
pub mod os_str;
pub mod pipe;
pub mod pipe2;
pub mod process;
pub mod process2;
pub mod rwlock;
pub mod stack_overflow;
pub mod sync;
pub mod tcp;
pub mod thread;
pub mod thread_local;
pub mod time;
pub mod timer;
pub mod tty;
pub mod udp;
pub mod stdio;
pub mod addrinfo {
pub use sys_common::net::get_host_addresses;
pub use sys_common::net::get_address_name;
}
// FIXME: move these to c module
pub type sock_t = self::fs::fd_t;
pub type wrlen = libc::size_t;
pub type msglen_t = libc::size_t;
pub unsafe fn close_sock(sock: sock_t) { let _ = libc::close(sock); }
pub fn last_error() -> IoError {
decode_error_detailed(os::errno() as i32)
}
pub fn
|
() -> IoError {
last_error()
}
extern "system" {
fn gai_strerror(errcode: libc::c_int) -> *const libc::c_char;
}
pub fn last_gai_error(s: libc::c_int) -> IoError {
let mut err = decode_error(s);
err.detail = Some(unsafe {
let data = CStr::from_ptr(gai_strerror(s));
str::from_utf8(data.to_bytes()).unwrap().to_string()
});
err
}
/// Convert an `errno` value into a high-level error variant and description.
pub fn decode_error(errno: i32) -> IoError {
// FIXME: this should probably be a bit more descriptive...
let (kind, desc) = match errno {
libc::EOF => (old_io::EndOfFile, "end of file"),
libc::ECONNREFUSED => (old_io::ConnectionRefused, "connection refused"),
libc::ECONNRESET => (old_io::ConnectionReset, "connection reset"),
libc::EPERM | libc::EACCES =>
(old_io::PermissionDenied, "permission denied"),
libc::EPIPE => (old_io::BrokenPipe, "broken pipe"),
libc::ENOTCONN => (old_io::NotConnected, "not connected"),
libc::ECONNABORTED => (old_io::ConnectionAborted, "connection aborted"),
libc::EADDRNOTAVAIL => (old_io::ConnectionRefused, "address not available"),
libc::EADDRINUSE => (old_io::ConnectionRefused, "address in use"),
libc::ENOENT => (old_io::FileNotFound, "no such file or directory"),
libc::EISDIR => (old_io::InvalidInput, "illegal operation on a directory"),
libc::ENOSYS => (old_io::IoUnavailable, "function not implemented"),
libc::EINVAL => (old_io::InvalidInput, "invalid argument"),
libc::ENOTTY =>
(old_io::MismatchedFileTypeForOperation,
"file descriptor is not a TTY"),
libc::ETIMEDOUT => (old_io::TimedOut, "operation timed out"),
libc::ECANCELED => (old_io::TimedOut, "operation aborted"),
libc::consts::os::posix88::EEXIST =>
(old_io::PathAlreadyExists, "path already exists"),
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
(old_io::ResourceUnavailable, "resource temporarily unavailable"),
_ => (old_io::OtherIoError, "unknown error")
};
IoError { kind: kind, desc: desc, detail: None }
}
pub fn decode_error_detailed(errno: i32) -> IoError {
let mut err = decode_error(errno);
err.detail = Some(os::error_string(errno));
err
}
pub fn decode_error_kind(errno: i32) -> ErrorKind {
match errno as libc::c_int {
libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
libc::ECONNRESET => ErrorKind::ConnectionReset,
libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
libc::EPIPE => ErrorKind::BrokenPipe,
libc::ENOTCONN => ErrorKind::NotConnected,
libc::ECONNABORTED => ErrorKind::ConnectionAborted,
libc::EADDRNOTAVAIL => ErrorKind::ConnectionRefused,
libc::EADDRINUSE => ErrorKind::ConnectionRefused,
libc::ENOENT => ErrorKind::FileNotFound,
libc::EISDIR => ErrorKind::InvalidInput,
libc::EINTR => ErrorKind::Interrupted,
libc::EINVAL => ErrorKind::InvalidInput,
libc::ENOTTY => ErrorKind::MismatchedFileTypeForOperation,
libc::ETIMEDOUT => ErrorKind::TimedOut,
libc::ECANCELED => ErrorKind::TimedOut,
libc::consts::os::posix88::EEXIST => ErrorKind::PathAlreadyExists,
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
ErrorKind::ResourceUnavailable,
_ => ErrorKind::Other,
}
}
#[inline]
pub fn retry<T, F> (mut f: F) -> T where
T: SignedInt,
F: FnMut() -> T,
{
let one: T = Int::one();
loop {
let n = f();
if n == -one && os::errno() == libc::EINTR as i32 { }
else { return n }
}
}
pub fn cvt<T: SignedInt>(t: T) -> io::Result<T> {
let one: T = Int::one();
if t == -one {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
}
pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
where T: SignedInt, F: FnMut() -> T
{
loop {
match cvt(f()) {
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
other => return other,
}
}
}
pub fn ms_to_timeval(ms: u64) -> libc::timeval {
libc::timeval {
tv_sec: (ms / 1000) as libc::time_t,
tv_usec: ((ms % 1000) * 1000) as libc::suseconds_t,
}
}
pub fn wouldblock() -> bool {
let err = os::errno();
err == libc::EWOULDBLOCK as i32 || err == libc::EAGAIN as i32
}
pub fn set_nonblocking(fd: sock_t, nb: bool) {
let set = nb as libc::c_int;
mkerr_libc(retry(|| unsafe { c::ioctl(fd, c::FIONBIO, &set) })).unwrap();
}
// nothing needed on unix platforms
pub fn init_net() {}
|
last_net_error
|
identifier_name
|
mod.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
#![allow(dead_code)]
#![allow(unused_unsafe)]
#![allow(unused_mut)]
use prelude::v1::*;
use ffi::CStr;
use io::{self, ErrorKind};
use libc;
use num::{Int, SignedInt};
use num;
use old_io::{self, IoResult, IoError};
use str;
use sys_common::mkerr_libc;
macro_rules! helper_init { (static $name:ident: Helper<$m:ty>) => (
static $name: Helper<$m> = Helper {
lock: ::sync::MUTEX_INIT,
cond: ::sync::CONDVAR_INIT,
chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> },
signal: ::cell::UnsafeCell { value: 0 },
initialized: ::cell::UnsafeCell { value: false },
shutdown: ::cell::UnsafeCell { value: false },
};
) }
pub mod backtrace;
pub mod c;
pub mod condvar;
pub mod ext;
pub mod fd;
pub mod fs; // support for std::old_io
pub mod fs2; // support for std::fs
pub mod helper_signal;
pub mod mutex;
pub mod net;
pub mod os;
pub mod os_str;
pub mod pipe;
pub mod pipe2;
pub mod process;
pub mod process2;
pub mod rwlock;
pub mod stack_overflow;
pub mod sync;
pub mod tcp;
pub mod thread;
pub mod thread_local;
pub mod time;
pub mod timer;
pub mod tty;
pub mod udp;
pub mod stdio;
pub mod addrinfo {
pub use sys_common::net::get_host_addresses;
pub use sys_common::net::get_address_name;
}
// FIXME: move these to c module
pub type sock_t = self::fs::fd_t;
pub type wrlen = libc::size_t;
pub type msglen_t = libc::size_t;
pub unsafe fn close_sock(sock: sock_t) { let _ = libc::close(sock); }
pub fn last_error() -> IoError {
decode_error_detailed(os::errno() as i32)
}
pub fn last_net_error() -> IoError {
last_error()
}
extern "system" {
fn gai_strerror(errcode: libc::c_int) -> *const libc::c_char;
}
pub fn last_gai_error(s: libc::c_int) -> IoError {
let mut err = decode_error(s);
err.detail = Some(unsafe {
let data = CStr::from_ptr(gai_strerror(s));
str::from_utf8(data.to_bytes()).unwrap().to_string()
});
err
}
/// Convert an `errno` value into a high-level error variant and description.
pub fn decode_error(errno: i32) -> IoError {
// FIXME: this should probably be a bit more descriptive...
let (kind, desc) = match errno {
libc::EOF => (old_io::EndOfFile, "end of file"),
libc::ECONNREFUSED => (old_io::ConnectionRefused, "connection refused"),
libc::ECONNRESET => (old_io::ConnectionReset, "connection reset"),
libc::EPERM | libc::EACCES =>
(old_io::PermissionDenied, "permission denied"),
libc::EPIPE => (old_io::BrokenPipe, "broken pipe"),
libc::ENOTCONN => (old_io::NotConnected, "not connected"),
libc::ECONNABORTED => (old_io::ConnectionAborted, "connection aborted"),
libc::EADDRNOTAVAIL => (old_io::ConnectionRefused, "address not available"),
libc::EADDRINUSE => (old_io::ConnectionRefused, "address in use"),
libc::ENOENT => (old_io::FileNotFound, "no such file or directory"),
libc::EISDIR => (old_io::InvalidInput, "illegal operation on a directory"),
libc::ENOSYS => (old_io::IoUnavailable, "function not implemented"),
libc::EINVAL => (old_io::InvalidInput, "invalid argument"),
libc::ENOTTY =>
(old_io::MismatchedFileTypeForOperation,
"file descriptor is not a TTY"),
libc::ETIMEDOUT => (old_io::TimedOut, "operation timed out"),
libc::ECANCELED => (old_io::TimedOut, "operation aborted"),
libc::consts::os::posix88::EEXIST =>
(old_io::PathAlreadyExists, "path already exists"),
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
(old_io::ResourceUnavailable, "resource temporarily unavailable"),
_ => (old_io::OtherIoError, "unknown error")
};
IoError { kind: kind, desc: desc, detail: None }
}
pub fn decode_error_detailed(errno: i32) -> IoError {
let mut err = decode_error(errno);
err.detail = Some(os::error_string(errno));
err
}
pub fn decode_error_kind(errno: i32) -> ErrorKind {
match errno as libc::c_int {
libc::ECONNREFUSED => ErrorKind::ConnectionRefused,
libc::ECONNRESET => ErrorKind::ConnectionReset,
libc::EPERM | libc::EACCES => ErrorKind::PermissionDenied,
libc::EPIPE => ErrorKind::BrokenPipe,
libc::ENOTCONN => ErrorKind::NotConnected,
libc::ECONNABORTED => ErrorKind::ConnectionAborted,
libc::EADDRNOTAVAIL => ErrorKind::ConnectionRefused,
libc::EADDRINUSE => ErrorKind::ConnectionRefused,
libc::ENOENT => ErrorKind::FileNotFound,
libc::EISDIR => ErrorKind::InvalidInput,
libc::EINTR => ErrorKind::Interrupted,
libc::EINVAL => ErrorKind::InvalidInput,
libc::ENOTTY => ErrorKind::MismatchedFileTypeForOperation,
libc::ETIMEDOUT => ErrorKind::TimedOut,
libc::ECANCELED => ErrorKind::TimedOut,
libc::consts::os::posix88::EEXIST => ErrorKind::PathAlreadyExists,
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
// clause
x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
ErrorKind::ResourceUnavailable,
_ => ErrorKind::Other,
}
}
#[inline]
pub fn retry<T, F> (mut f: F) -> T where
T: SignedInt,
F: FnMut() -> T,
{
let one: T = Int::one();
loop {
let n = f();
if n == -one && os::errno() == libc::EINTR as i32 { }
else { return n }
}
}
pub fn cvt<T: SignedInt>(t: T) -> io::Result<T> {
let one: T = Int::one();
if t == -one {
Err(io::Error::last_os_error())
} else {
Ok(t)
}
}
pub fn cvt_r<T, F>(mut f: F) -> io::Result<T>
where T: SignedInt, F: FnMut() -> T
{
loop {
match cvt(f()) {
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
other => return other,
}
}
}
pub fn ms_to_timeval(ms: u64) -> libc::timeval {
libc::timeval {
tv_sec: (ms / 1000) as libc::time_t,
tv_usec: ((ms % 1000) * 1000) as libc::suseconds_t,
}
}
pub fn wouldblock() -> bool {
let err = os::errno();
err == libc::EWOULDBLOCK as i32 || err == libc::EAGAIN as i32
}
pub fn set_nonblocking(fd: sock_t, nb: bool) {
let set = nb as libc::c_int;
mkerr_libc(retry(|| unsafe { c::ioctl(fd, c::FIONBIO, &set) })).unwrap();
}
|
pub fn init_net() {}
|
// nothing needed on unix platforms
|
random_line_split
|
size_big.rs
|
//! The GC can work with objects that take up most of a page.
extern crate cell_gc;
#[macro_use]
extern crate cell_gc_derive;
type Big32 = (u64, u64, u64, u64);
type Big128 = (Big32, Big32, Big32, Big32);
type Big512 = (Big128, Big128, Big128, Big128);
type Big2560 = (Big512, Big512, Big512, Big512, Big512);
#[derive(IntoHeap)]
struct
|
<'h> {
bits: Big2560,
next: Option<BigRef<'h>>,
}
#[test]
fn size_big() {
cell_gc::with_heap(|hs| {
hs.set_page_limit::<Big>(Some(1));
let n = cell_gc::page_capacity::<Big>();
assert_eq!(n, 1); // see comment in size_medium.rs
let a = (5, 6, 7, 8);
let b = (a, a, a, a);
let c = (b, b, b, b);
let d = (c, c, c, c, c);
let result = hs.alloc(Big {
bits: d,
next: None,
});
assert_eq!(result.bits(), d);
assert_eq!(result.next(), None);
assert_eq!(
hs.try_alloc(Big {
bits: d,
next: None,
}),
None
);
});
}
|
Big
|
identifier_name
|
size_big.rs
|
//! The GC can work with objects that take up most of a page.
extern crate cell_gc;
#[macro_use]
extern crate cell_gc_derive;
type Big32 = (u64, u64, u64, u64);
type Big128 = (Big32, Big32, Big32, Big32);
type Big512 = (Big128, Big128, Big128, Big128);
type Big2560 = (Big512, Big512, Big512, Big512, Big512);
#[derive(IntoHeap)]
struct Big<'h> {
bits: Big2560,
next: Option<BigRef<'h>>,
|
#[test]
fn size_big() {
cell_gc::with_heap(|hs| {
hs.set_page_limit::<Big>(Some(1));
let n = cell_gc::page_capacity::<Big>();
assert_eq!(n, 1); // see comment in size_medium.rs
let a = (5, 6, 7, 8);
let b = (a, a, a, a);
let c = (b, b, b, b);
let d = (c, c, c, c, c);
let result = hs.alloc(Big {
bits: d,
next: None,
});
assert_eq!(result.bits(), d);
assert_eq!(result.next(), None);
assert_eq!(
hs.try_alloc(Big {
bits: d,
next: None,
}),
None
);
});
}
|
}
|
random_line_split
|
stdio.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Non-blocking access to stdin, stdout, and stderr.
//!
//! This module provides bindings to the local event loop's TTY interface, using it
//! to offer synchronous but non-blocking versions of stdio. These handles can be
//! inspected for information about terminal dimensions or for related information
//! about the stream or terminal to which it is attached.
//!
//! # Example
//!
//! ```rust
//! # #![allow(unused_must_use)]
//! use std::io;
//!
//! let mut out = io::stdout();
//! out.write(b"Hello, world!");
//! ```
use self::StdSource::*;
use boxed::Box;
use cell::RefCell;
use clone::Clone;
use failure::LOCAL_STDERR;
use fmt;
use io::{Reader, Writer, IoResult, IoError, OtherIoError, Buffer,
standard_error, EndOfFile, LineBufferedWriter, BufferedReader};
use kinds::Send;
use libc;
use mem;
use option::Option;
use option::Option::{Some, None};
use ops::{Deref, DerefMut};
use result::Result::{Ok, Err};
use rustrt;
use rustrt::local::Local;
use rustrt::task::Task;
use slice::SlicePrelude;
use str::StrPrelude;
use string::String;
use sys::{fs, tty};
use sync::{Arc, Mutex, MutexGuard, Once, ONCE_INIT};
use uint;
use vec::Vec;
// And so begins the tale of acquiring a uv handle to a stdio stream on all
// platforms in all situations. Our story begins by splitting the world into two
// categories, windows and unix. Then one day the creators of unix said let
// there be redirection! And henceforth there was redirection away from the
// console for standard I/O streams.
//
// After this day, the world split into four factions:
//
// 1. Unix with stdout on a terminal.
// 2. Unix with stdout redirected.
// 3. Windows with stdout on a terminal.
// 4. Windows with stdout redirected.
//
// Many years passed, and then one day the nation of libuv decided to unify this
// world. After months of toiling, uv created three ideas: TTY, Pipe, File.
// These three ideas propagated throughout the lands and the four great factions
// decided to settle among them.
//
// The groups of 1, 2, and 3 all worked very hard towards the idea of TTY. Upon
// doing so, they even enhanced themselves further then their Pipe/File
// brethren, becoming the dominant powers.
//
// The group of 4, however, decided to work independently. They abandoned the
// common TTY belief throughout, and even abandoned the fledgling Pipe belief.
// The members of the 4th faction decided to only align themselves with File.
//
// tl;dr; TTY works on everything but when windows stdout is redirected, in that
// case pipe also doesn't work, but magically file does!
enum StdSource {
TTY(tty::TTY),
File(fs::FileDesc),
}
fn src<T>(fd: libc::c_int, _readable: bool, f: |StdSource| -> T) -> T {
match tty::TTY::new(fd) {
Ok(tty) => f(TTY(tty)),
Err(_) => f(File(fs::FileDesc::new(fd, false))),
}
}
thread_local!(static LOCAL_STDOUT: RefCell<Option<Box<Writer + Send>>> = {
RefCell::new(None)
})
/// A synchronized wrapper around a buffered reader from stdin
#[deriving(Clone)]
pub struct StdinReader {
inner: Arc<Mutex<BufferedReader<StdReader>>>,
}
/// A guard for exlusive access to `StdinReader`'s internal `BufferedReader`.
pub struct StdinReaderGuard<'a> {
inner: MutexGuard<'a, BufferedReader<StdReader>>,
}
impl<'a> Deref<BufferedReader<StdReader>> for StdinReaderGuard<'a> {
fn deref(&self) -> &BufferedReader<StdReader> {
&*self.inner
}
}
impl<'a> DerefMut<BufferedReader<StdReader>> for StdinReaderGuard<'a> {
fn deref_mut(&mut self) -> &mut BufferedReader<StdReader> {
&mut *self.inner
}
}
impl StdinReader {
/// Locks the `StdinReader`, granting the calling thread exclusive access
/// to the underlying `BufferedReader`.
///
/// This provides access to methods like `chars` and `lines`.
///
/// ## Example
///
/// ```rust
/// use std::io;
///
/// for line in io::stdin().lock().lines() {
/// println!("{}", line.unwrap());
/// }
/// ```
pub fn lock<'a>(&'a mut self) -> StdinReaderGuard<'a> {
StdinReaderGuard {
inner: self.inner.lock()
}
}
/// Like `Buffer::read_line`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_line(&mut self) -> IoResult<String> {
self.inner.lock().read_line()
}
/// Like `Buffer::read_until`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_until(&mut self, byte: u8) -> IoResult<Vec<u8>> {
self.inner.lock().read_until(byte)
}
/// Like `Buffer::read_char`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_char(&mut self) -> IoResult<char> {
self.inner.lock().read_char()
}
}
impl Reader for StdinReader {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
self.inner.lock().read(buf)
}
// We have to manually delegate all of these because the default impls call
// read more than once and we don't want those calls to interleave (or
// incur the costs of repeated locking).
fn read_at_least(&mut self, min: uint, buf: &mut [u8]) -> IoResult<uint> {
self.inner.lock().read_at_least(min, buf)
}
fn push_at_least(&mut self, min: uint, len: uint, buf: &mut Vec<u8>) -> IoResult<uint> {
self.inner.lock().push_at_least(min, len, buf)
}
fn read_to_end(&mut self) -> IoResult<Vec<u8>> {
self.inner.lock().read_to_end()
}
fn read_le_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
self.inner.lock().read_le_uint_n(nbytes)
}
fn read_be_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
self.inner.lock().read_be_uint_n(nbytes)
}
}
/// Creates a new handle to the stdin of the current process.
///
/// The returned handle is a wrapper around a global `BufferedReader` shared
/// by all threads. If buffered access is not desired, the `stdin_raw` function
/// is provided to provided unbuffered access to stdin.
///
/// See `stdout()` for more notes about this function.
pub fn stdin() -> StdinReader {
// We're following the same strategy as kimundi's lazy_static library
static mut STDIN: *const StdinReader = 0 as *const StdinReader;
static ONCE: Once = ONCE_INIT;
unsafe {
ONCE.doit(|| {
// The default buffer capacity is 64k, but apparently windows doesn't like
// 64k reads on stdin. See #13304 for details, but the idea is that on
// windows we use a slightly smaller buffer that's been seen to be
// acceptable.
let stdin = if cfg!(windows) {
BufferedReader::with_capacity(8 * 1024, stdin_raw())
} else {
BufferedReader::new(stdin_raw())
};
let stdin = StdinReader {
inner: Arc::new(Mutex::new(stdin))
};
STDIN = mem::transmute(box stdin);
});
(*STDIN).clone()
}
}
/// Creates a new non-blocking handle to the stdin of the current process.
///
/// Unlike `stdin()`, the returned reader is *not* a buffered reader.
///
/// See `stdout()` for more notes about this function.
pub fn stdin_raw() -> StdReader {
src(libc::STDIN_FILENO, true, |src| StdReader { inner: src })
}
/// Creates a line-buffered handle to the stdout of the current process.
///
/// Note that this is a fairly expensive operation in that at least one memory
/// allocation is performed. Additionally, this must be called from a runtime
/// task context because the stream returned will be a non-blocking object using
/// the local scheduler to perform the I/O.
///
/// Care should be taken when creating multiple handles to an output stream for
/// a single process. While usage is still safe, the output may be surprising if
/// no synchronization is performed to ensure a sane output.
pub fn stdout() -> LineBufferedWriter<StdWriter> {
LineBufferedWriter::new(stdout_raw())
}
/// Creates an unbuffered handle to the stdout of the current process
///
/// See notes in `stdout()` for more information.
pub fn stdout_raw() -> StdWriter {
src(libc::STDOUT_FILENO, false, |src| StdWriter { inner: src })
}
/// Creates a line-buffered handle to the stderr of the current process.
///
/// See `stdout()` for notes about this function.
pub fn stderr() -> LineBufferedWriter<StdWriter> {
|
}
/// Creates an unbuffered handle to the stderr of the current process
///
/// See notes in `stdout()` for more information.
pub fn stderr_raw() -> StdWriter {
src(libc::STDERR_FILENO, false, |src| StdWriter { inner: src })
}
/// Resets the task-local stdout handle to the specified writer
///
/// This will replace the current task's stdout handle, returning the old
/// handle. All future calls to `print` and friends will emit their output to
/// this specified handle.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stdout stream.
pub fn set_stdout(stdout: Box<Writer + Send>) -> Option<Box<Writer + Send>> {
let mut new = Some(stdout);
LOCAL_STDOUT.with(|slot| {
mem::replace(&mut *slot.borrow_mut(), new.take())
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
/// Resets the task-local stderr handle to the specified writer
///
/// This will replace the current task's stderr handle, returning the old
/// handle. Currently, the stderr handle is used for printing panic messages
/// during task panic.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stderr stream.
pub fn set_stderr(stderr: Box<Writer + Send>) -> Option<Box<Writer + Send>> {
let mut new = Some(stderr);
LOCAL_STDERR.with(|slot| {
mem::replace(&mut *slot.borrow_mut(), new.take())
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
// Helper to access the local task's stdout handle
//
// Note that this is not a safe function to expose because you can create an
// aliased pointer very easily:
//
// with_task_stdout(|io1| {
// with_task_stdout(|io2| {
// // io1 aliases io2
// })
// })
fn with_task_stdout(f: |&mut Writer| -> IoResult<()>) {
let result = if Local::exists(None::<Task>) {
let mut my_stdout = LOCAL_STDOUT.with(|slot| {
slot.borrow_mut().take()
}).unwrap_or_else(|| {
box stdout() as Box<Writer + Send>
});
let result = f(&mut *my_stdout);
let mut var = Some(my_stdout);
LOCAL_STDOUT.with(|slot| {
*slot.borrow_mut() = var.take();
});
result
} else {
let mut io = rustrt::Stdout;
f(&mut io as &mut Writer)
};
match result {
Ok(()) => {}
Err(e) => panic!("failed printing to stdout: {}", e),
}
}
/// Flushes the local task's stdout handle.
///
/// By default, this stream is a line-buffering stream, so flushing may be
/// necessary to ensure that all output is printed to the screen (if there are
/// no newlines printed).
///
/// Note that logging macros do not use this stream. Using the logging macros
/// will emit output to stderr, and while they are line buffered the log
/// messages are always terminated in a newline (no need to flush).
pub fn flush() {
with_task_stdout(|io| io.flush())
}
/// Prints a string to the stdout of the current process. No newline is emitted
/// after the string is printed.
pub fn print(s: &str) {
with_task_stdout(|io| io.write(s.as_bytes()))
}
/// Prints a string to the stdout of the current process. A literal
/// `\n` character is printed to the console after the string.
pub fn println(s: &str) {
with_task_stdout(|io| {
io.write(s.as_bytes()).and_then(|()| io.write(&[b'\n']))
})
}
/// Similar to `print`, but takes a `fmt::Arguments` structure to be compatible
/// with the `format_args!` macro.
pub fn print_args(fmt: &fmt::Arguments) {
with_task_stdout(|io| write!(io, "{}", fmt))
}
/// Similar to `println`, but takes a `fmt::Arguments` structure to be
/// compatible with the `format_args!` macro.
pub fn println_args(fmt: &fmt::Arguments) {
with_task_stdout(|io| writeln!(io, "{}", fmt))
}
/// Representation of a reader of a standard input stream
pub struct StdReader {
inner: StdSource
}
impl StdReader {
/// Returns whether this stream is attached to a TTY instance or not.
pub fn isatty(&self) -> bool {
match self.inner {
TTY(..) => true,
File(..) => false,
}
}
}
impl Reader for StdReader {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
let ret = match self.inner {
TTY(ref mut tty) => {
// Flush the task-local stdout so that weird issues like a
// print!'d prompt not being shown until after the user hits
// enter.
flush();
tty.read(buf).map(|i| i as uint)
},
File(ref mut file) => file.read(buf).map(|i| i as uint),
};
match ret {
// When reading a piped stdin, libuv will return 0-length reads when
// stdin reaches EOF. For pretty much all other streams it will
// return an actual EOF error, but apparently for stdin it's a
// little different. Hence, here we convert a 0 length read to an
// end-of-file indicator so the caller knows to stop reading.
Ok(0) => { Err(standard_error(EndOfFile)) }
ret @ Ok(..) | ret @ Err(..) => ret,
}
}
}
/// Representation of a writer to a standard output stream
pub struct StdWriter {
inner: StdSource
}
impl StdWriter {
/// Gets the size of this output window, if possible. This is typically used
/// when the writer is attached to something like a terminal, this is used
/// to fetch the dimensions of the terminal.
///
/// If successful, returns `Ok((width, height))`.
///
/// # Error
///
/// This function will return an error if the output stream is not actually
/// connected to a TTY instance, or if querying the TTY instance fails.
pub fn winsize(&mut self) -> IoResult<(int, int)> {
match self.inner {
TTY(ref mut tty) => {
tty.get_winsize()
}
File(..) => {
Err(IoError {
kind: OtherIoError,
desc: "stream is not a tty",
detail: None,
})
}
}
}
/// Controls whether this output stream is a "raw stream" or simply a normal
/// stream.
///
/// # Error
///
/// This function will return an error if the output stream is not actually
/// connected to a TTY instance, or if querying the TTY instance fails.
pub fn set_raw(&mut self, raw: bool) -> IoResult<()> {
match self.inner {
TTY(ref mut tty) => {
tty.set_raw(raw)
}
File(..) => {
Err(IoError {
kind: OtherIoError,
desc: "stream is not a tty",
detail: None,
})
}
}
}
/// Returns whether this stream is attached to a TTY instance or not.
pub fn isatty(&self) -> bool {
match self.inner {
TTY(..) => true,
File(..) => false,
}
}
}
impl Writer for StdWriter {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
// As with stdin on windows, stdout often can't handle writes of large
// sizes. For an example, see #14940. For this reason, chunk the output
// buffer on windows, but on unix we can just write the whole buffer all
// at once.
//
// For some other references, it appears that this problem has been
// encountered by others [1] [2]. We choose the number 8KB just because
// libuv does the same.
//
// [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
// [2]: http://www.mail-archive.com/[email protected]/msg00661.html
let max_size = if cfg!(windows) {8192} else {uint::MAX};
for chunk in buf.chunks(max_size) {
try!(match self.inner {
TTY(ref mut tty) => tty.write(chunk),
File(ref mut file) => file.write(chunk),
})
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use prelude::*;
#[test]
fn smoke() {
// Just make sure we can acquire handles
stdin();
stdout();
stderr();
}
#[test]
fn capture_stdout() {
use io::{ChanReader, ChanWriter};
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
set_stdout(box w);
println!("hello!");
});
assert_eq!(r.read_to_string().unwrap(), "hello!\n");
}
#[test]
fn capture_stderr() {
use realstd::comm::channel;
use realstd::io::{ChanReader, ChanWriter, Reader};
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
::realstd::io::stdio::set_stderr(box w);
panic!("my special message");
});
let s = r.read_to_string().unwrap();
assert!(s.contains("my special message"));
}
}
|
LineBufferedWriter::new(stderr_raw())
|
random_line_split
|
stdio.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Non-blocking access to stdin, stdout, and stderr.
//!
//! This module provides bindings to the local event loop's TTY interface, using it
//! to offer synchronous but non-blocking versions of stdio. These handles can be
//! inspected for information about terminal dimensions or for related information
//! about the stream or terminal to which it is attached.
//!
//! # Example
//!
//! ```rust
//! # #![allow(unused_must_use)]
//! use std::io;
//!
//! let mut out = io::stdout();
//! out.write(b"Hello, world!");
//! ```
use self::StdSource::*;
use boxed::Box;
use cell::RefCell;
use clone::Clone;
use failure::LOCAL_STDERR;
use fmt;
use io::{Reader, Writer, IoResult, IoError, OtherIoError, Buffer,
standard_error, EndOfFile, LineBufferedWriter, BufferedReader};
use kinds::Send;
use libc;
use mem;
use option::Option;
use option::Option::{Some, None};
use ops::{Deref, DerefMut};
use result::Result::{Ok, Err};
use rustrt;
use rustrt::local::Local;
use rustrt::task::Task;
use slice::SlicePrelude;
use str::StrPrelude;
use string::String;
use sys::{fs, tty};
use sync::{Arc, Mutex, MutexGuard, Once, ONCE_INIT};
use uint;
use vec::Vec;
// And so begins the tale of acquiring a uv handle to a stdio stream on all
// platforms in all situations. Our story begins by splitting the world into two
// categories, windows and unix. Then one day the creators of unix said let
// there be redirection! And henceforth there was redirection away from the
// console for standard I/O streams.
//
// After this day, the world split into four factions:
//
// 1. Unix with stdout on a terminal.
// 2. Unix with stdout redirected.
// 3. Windows with stdout on a terminal.
// 4. Windows with stdout redirected.
//
// Many years passed, and then one day the nation of libuv decided to unify this
// world. After months of toiling, uv created three ideas: TTY, Pipe, File.
// These three ideas propagated throughout the lands and the four great factions
// decided to settle among them.
//
// The groups of 1, 2, and 3 all worked very hard towards the idea of TTY. Upon
// doing so, they even enhanced themselves further then their Pipe/File
// brethren, becoming the dominant powers.
//
// The group of 4, however, decided to work independently. They abandoned the
// common TTY belief throughout, and even abandoned the fledgling Pipe belief.
// The members of the 4th faction decided to only align themselves with File.
//
// tl;dr; TTY works on everything but when windows stdout is redirected, in that
// case pipe also doesn't work, but magically file does!
enum StdSource {
TTY(tty::TTY),
File(fs::FileDesc),
}
fn src<T>(fd: libc::c_int, _readable: bool, f: |StdSource| -> T) -> T {
match tty::TTY::new(fd) {
Ok(tty) => f(TTY(tty)),
Err(_) => f(File(fs::FileDesc::new(fd, false))),
}
}
thread_local!(static LOCAL_STDOUT: RefCell<Option<Box<Writer + Send>>> = {
RefCell::new(None)
})
/// A synchronized wrapper around a buffered reader from stdin
#[deriving(Clone)]
pub struct StdinReader {
inner: Arc<Mutex<BufferedReader<StdReader>>>,
}
/// A guard for exlusive access to `StdinReader`'s internal `BufferedReader`.
pub struct StdinReaderGuard<'a> {
inner: MutexGuard<'a, BufferedReader<StdReader>>,
}
impl<'a> Deref<BufferedReader<StdReader>> for StdinReaderGuard<'a> {
fn deref(&self) -> &BufferedReader<StdReader> {
&*self.inner
}
}
impl<'a> DerefMut<BufferedReader<StdReader>> for StdinReaderGuard<'a> {
fn deref_mut(&mut self) -> &mut BufferedReader<StdReader> {
&mut *self.inner
}
}
impl StdinReader {
/// Locks the `StdinReader`, granting the calling thread exclusive access
/// to the underlying `BufferedReader`.
///
/// This provides access to methods like `chars` and `lines`.
///
/// ## Example
///
/// ```rust
/// use std::io;
///
/// for line in io::stdin().lock().lines() {
/// println!("{}", line.unwrap());
/// }
/// ```
pub fn lock<'a>(&'a mut self) -> StdinReaderGuard<'a> {
StdinReaderGuard {
inner: self.inner.lock()
}
}
/// Like `Buffer::read_line`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_line(&mut self) -> IoResult<String> {
self.inner.lock().read_line()
}
/// Like `Buffer::read_until`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_until(&mut self, byte: u8) -> IoResult<Vec<u8>> {
self.inner.lock().read_until(byte)
}
/// Like `Buffer::read_char`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_char(&mut self) -> IoResult<char> {
self.inner.lock().read_char()
}
}
impl Reader for StdinReader {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
self.inner.lock().read(buf)
}
// We have to manually delegate all of these because the default impls call
// read more than once and we don't want those calls to interleave (or
// incur the costs of repeated locking).
fn read_at_least(&mut self, min: uint, buf: &mut [u8]) -> IoResult<uint> {
self.inner.lock().read_at_least(min, buf)
}
fn push_at_least(&mut self, min: uint, len: uint, buf: &mut Vec<u8>) -> IoResult<uint> {
self.inner.lock().push_at_least(min, len, buf)
}
fn read_to_end(&mut self) -> IoResult<Vec<u8>> {
self.inner.lock().read_to_end()
}
fn read_le_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
self.inner.lock().read_le_uint_n(nbytes)
}
fn
|
(&mut self, nbytes: uint) -> IoResult<u64> {
self.inner.lock().read_be_uint_n(nbytes)
}
}
/// Creates a new handle to the stdin of the current process.
///
/// The returned handle is a wrapper around a global `BufferedReader` shared
/// by all threads. If buffered access is not desired, the `stdin_raw` function
/// is provided to provided unbuffered access to stdin.
///
/// See `stdout()` for more notes about this function.
pub fn stdin() -> StdinReader {
// We're following the same strategy as kimundi's lazy_static library
static mut STDIN: *const StdinReader = 0 as *const StdinReader;
static ONCE: Once = ONCE_INIT;
unsafe {
ONCE.doit(|| {
// The default buffer capacity is 64k, but apparently windows doesn't like
// 64k reads on stdin. See #13304 for details, but the idea is that on
// windows we use a slightly smaller buffer that's been seen to be
// acceptable.
let stdin = if cfg!(windows) {
BufferedReader::with_capacity(8 * 1024, stdin_raw())
} else {
BufferedReader::new(stdin_raw())
};
let stdin = StdinReader {
inner: Arc::new(Mutex::new(stdin))
};
STDIN = mem::transmute(box stdin);
});
(*STDIN).clone()
}
}
/// Creates a new non-blocking handle to the stdin of the current process.
///
/// Unlike `stdin()`, the returned reader is *not* a buffered reader.
///
/// See `stdout()` for more notes about this function.
pub fn stdin_raw() -> StdReader {
src(libc::STDIN_FILENO, true, |src| StdReader { inner: src })
}
/// Creates a line-buffered handle to the stdout of the current process.
///
/// Note that this is a fairly expensive operation in that at least one memory
/// allocation is performed. Additionally, this must be called from a runtime
/// task context because the stream returned will be a non-blocking object using
/// the local scheduler to perform the I/O.
///
/// Care should be taken when creating multiple handles to an output stream for
/// a single process. While usage is still safe, the output may be surprising if
/// no synchronization is performed to ensure a sane output.
pub fn stdout() -> LineBufferedWriter<StdWriter> {
LineBufferedWriter::new(stdout_raw())
}
/// Creates an unbuffered handle to the stdout of the current process
///
/// See notes in `stdout()` for more information.
pub fn stdout_raw() -> StdWriter {
src(libc::STDOUT_FILENO, false, |src| StdWriter { inner: src })
}
/// Creates a line-buffered handle to the stderr of the current process.
///
/// See `stdout()` for notes about this function.
pub fn stderr() -> LineBufferedWriter<StdWriter> {
LineBufferedWriter::new(stderr_raw())
}
/// Creates an unbuffered handle to the stderr of the current process
///
/// See notes in `stdout()` for more information.
pub fn stderr_raw() -> StdWriter {
src(libc::STDERR_FILENO, false, |src| StdWriter { inner: src })
}
/// Resets the task-local stdout handle to the specified writer
///
/// This will replace the current task's stdout handle, returning the old
/// handle. All future calls to `print` and friends will emit their output to
/// this specified handle.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stdout stream.
pub fn set_stdout(stdout: Box<Writer + Send>) -> Option<Box<Writer + Send>> {
let mut new = Some(stdout);
LOCAL_STDOUT.with(|slot| {
mem::replace(&mut *slot.borrow_mut(), new.take())
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
/// Resets the task-local stderr handle to the specified writer
///
/// This will replace the current task's stderr handle, returning the old
/// handle. Currently, the stderr handle is used for printing panic messages
/// during task panic.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stderr stream.
pub fn set_stderr(stderr: Box<Writer + Send>) -> Option<Box<Writer + Send>> {
let mut new = Some(stderr);
LOCAL_STDERR.with(|slot| {
mem::replace(&mut *slot.borrow_mut(), new.take())
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
// Helper to access the local task's stdout handle
//
// Note that this is not a safe function to expose because you can create an
// aliased pointer very easily:
//
// with_task_stdout(|io1| {
// with_task_stdout(|io2| {
// // io1 aliases io2
// })
// })
fn with_task_stdout(f: |&mut Writer| -> IoResult<()>) {
let result = if Local::exists(None::<Task>) {
let mut my_stdout = LOCAL_STDOUT.with(|slot| {
slot.borrow_mut().take()
}).unwrap_or_else(|| {
box stdout() as Box<Writer + Send>
});
let result = f(&mut *my_stdout);
let mut var = Some(my_stdout);
LOCAL_STDOUT.with(|slot| {
*slot.borrow_mut() = var.take();
});
result
} else {
let mut io = rustrt::Stdout;
f(&mut io as &mut Writer)
};
match result {
Ok(()) => {}
Err(e) => panic!("failed printing to stdout: {}", e),
}
}
/// Flushes the local task's stdout handle.
///
/// By default, this stream is a line-buffering stream, so flushing may be
/// necessary to ensure that all output is printed to the screen (if there are
/// no newlines printed).
///
/// Note that logging macros do not use this stream. Using the logging macros
/// will emit output to stderr, and while they are line buffered the log
/// messages are always terminated in a newline (no need to flush).
pub fn flush() {
with_task_stdout(|io| io.flush())
}
/// Prints a string to the stdout of the current process. No newline is emitted
/// after the string is printed.
pub fn print(s: &str) {
with_task_stdout(|io| io.write(s.as_bytes()))
}
/// Prints a string to the stdout of the current process. A literal
/// `\n` character is printed to the console after the string.
pub fn println(s: &str) {
with_task_stdout(|io| {
io.write(s.as_bytes()).and_then(|()| io.write(&[b'\n']))
})
}
/// Similar to `print`, but takes a `fmt::Arguments` structure to be compatible
/// with the `format_args!` macro.
pub fn print_args(fmt: &fmt::Arguments) {
with_task_stdout(|io| write!(io, "{}", fmt))
}
/// Similar to `println`, but takes a `fmt::Arguments` structure to be
/// compatible with the `format_args!` macro.
pub fn println_args(fmt: &fmt::Arguments) {
with_task_stdout(|io| writeln!(io, "{}", fmt))
}
/// Representation of a reader of a standard input stream
pub struct StdReader {
inner: StdSource
}
impl StdReader {
/// Returns whether this stream is attached to a TTY instance or not.
pub fn isatty(&self) -> bool {
match self.inner {
TTY(..) => true,
File(..) => false,
}
}
}
impl Reader for StdReader {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
let ret = match self.inner {
TTY(ref mut tty) => {
// Flush the task-local stdout so that weird issues like a
// print!'d prompt not being shown until after the user hits
// enter.
flush();
tty.read(buf).map(|i| i as uint)
},
File(ref mut file) => file.read(buf).map(|i| i as uint),
};
match ret {
// When reading a piped stdin, libuv will return 0-length reads when
// stdin reaches EOF. For pretty much all other streams it will
// return an actual EOF error, but apparently for stdin it's a
// little different. Hence, here we convert a 0 length read to an
// end-of-file indicator so the caller knows to stop reading.
Ok(0) => { Err(standard_error(EndOfFile)) }
ret @ Ok(..) | ret @ Err(..) => ret,
}
}
}
/// Representation of a writer to a standard output stream
pub struct StdWriter {
inner: StdSource
}
impl StdWriter {
/// Gets the size of this output window, if possible. This is typically used
/// when the writer is attached to something like a terminal, this is used
/// to fetch the dimensions of the terminal.
///
/// If successful, returns `Ok((width, height))`.
///
/// # Error
///
/// This function will return an error if the output stream is not actually
/// connected to a TTY instance, or if querying the TTY instance fails.
pub fn winsize(&mut self) -> IoResult<(int, int)> {
match self.inner {
TTY(ref mut tty) => {
tty.get_winsize()
}
File(..) => {
Err(IoError {
kind: OtherIoError,
desc: "stream is not a tty",
detail: None,
})
}
}
}
/// Controls whether this output stream is a "raw stream" or simply a normal
/// stream.
///
/// # Error
///
/// This function will return an error if the output stream is not actually
/// connected to a TTY instance, or if querying the TTY instance fails.
pub fn set_raw(&mut self, raw: bool) -> IoResult<()> {
match self.inner {
TTY(ref mut tty) => {
tty.set_raw(raw)
}
File(..) => {
Err(IoError {
kind: OtherIoError,
desc: "stream is not a tty",
detail: None,
})
}
}
}
/// Returns whether this stream is attached to a TTY instance or not.
pub fn isatty(&self) -> bool {
match self.inner {
TTY(..) => true,
File(..) => false,
}
}
}
impl Writer for StdWriter {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
// As with stdin on windows, stdout often can't handle writes of large
// sizes. For an example, see #14940. For this reason, chunk the output
// buffer on windows, but on unix we can just write the whole buffer all
// at once.
//
// For some other references, it appears that this problem has been
// encountered by others [1] [2]. We choose the number 8KB just because
// libuv does the same.
//
// [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
// [2]: http://www.mail-archive.com/[email protected]/msg00661.html
let max_size = if cfg!(windows) {8192} else {uint::MAX};
for chunk in buf.chunks(max_size) {
try!(match self.inner {
TTY(ref mut tty) => tty.write(chunk),
File(ref mut file) => file.write(chunk),
})
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use prelude::*;
#[test]
fn smoke() {
// Just make sure we can acquire handles
stdin();
stdout();
stderr();
}
#[test]
fn capture_stdout() {
use io::{ChanReader, ChanWriter};
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
set_stdout(box w);
println!("hello!");
});
assert_eq!(r.read_to_string().unwrap(), "hello!\n");
}
#[test]
fn capture_stderr() {
use realstd::comm::channel;
use realstd::io::{ChanReader, ChanWriter, Reader};
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
::realstd::io::stdio::set_stderr(box w);
panic!("my special message");
});
let s = r.read_to_string().unwrap();
assert!(s.contains("my special message"));
}
}
|
read_be_uint_n
|
identifier_name
|
stdio.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Non-blocking access to stdin, stdout, and stderr.
//!
//! This module provides bindings to the local event loop's TTY interface, using it
//! to offer synchronous but non-blocking versions of stdio. These handles can be
//! inspected for information about terminal dimensions or for related information
//! about the stream or terminal to which it is attached.
//!
//! # Example
//!
//! ```rust
//! # #![allow(unused_must_use)]
//! use std::io;
//!
//! let mut out = io::stdout();
//! out.write(b"Hello, world!");
//! ```
use self::StdSource::*;
use boxed::Box;
use cell::RefCell;
use clone::Clone;
use failure::LOCAL_STDERR;
use fmt;
use io::{Reader, Writer, IoResult, IoError, OtherIoError, Buffer,
standard_error, EndOfFile, LineBufferedWriter, BufferedReader};
use kinds::Send;
use libc;
use mem;
use option::Option;
use option::Option::{Some, None};
use ops::{Deref, DerefMut};
use result::Result::{Ok, Err};
use rustrt;
use rustrt::local::Local;
use rustrt::task::Task;
use slice::SlicePrelude;
use str::StrPrelude;
use string::String;
use sys::{fs, tty};
use sync::{Arc, Mutex, MutexGuard, Once, ONCE_INIT};
use uint;
use vec::Vec;
// And so begins the tale of acquiring a uv handle to a stdio stream on all
// platforms in all situations. Our story begins by splitting the world into two
// categories, windows and unix. Then one day the creators of unix said let
// there be redirection! And henceforth there was redirection away from the
// console for standard I/O streams.
//
// After this day, the world split into four factions:
//
// 1. Unix with stdout on a terminal.
// 2. Unix with stdout redirected.
// 3. Windows with stdout on a terminal.
// 4. Windows with stdout redirected.
//
// Many years passed, and then one day the nation of libuv decided to unify this
// world. After months of toiling, uv created three ideas: TTY, Pipe, File.
// These three ideas propagated throughout the lands and the four great factions
// decided to settle among them.
//
// The groups of 1, 2, and 3 all worked very hard towards the idea of TTY. Upon
// doing so, they even enhanced themselves further then their Pipe/File
// brethren, becoming the dominant powers.
//
// The group of 4, however, decided to work independently. They abandoned the
// common TTY belief throughout, and even abandoned the fledgling Pipe belief.
// The members of the 4th faction decided to only align themselves with File.
//
// tl;dr; TTY works on everything but when windows stdout is redirected, in that
// case pipe also doesn't work, but magically file does!
enum StdSource {
TTY(tty::TTY),
File(fs::FileDesc),
}
fn src<T>(fd: libc::c_int, _readable: bool, f: |StdSource| -> T) -> T {
match tty::TTY::new(fd) {
Ok(tty) => f(TTY(tty)),
Err(_) => f(File(fs::FileDesc::new(fd, false))),
}
}
thread_local!(static LOCAL_STDOUT: RefCell<Option<Box<Writer + Send>>> = {
RefCell::new(None)
})
/// A synchronized wrapper around a buffered reader from stdin
#[deriving(Clone)]
pub struct StdinReader {
inner: Arc<Mutex<BufferedReader<StdReader>>>,
}
/// A guard for exlusive access to `StdinReader`'s internal `BufferedReader`.
pub struct StdinReaderGuard<'a> {
inner: MutexGuard<'a, BufferedReader<StdReader>>,
}
impl<'a> Deref<BufferedReader<StdReader>> for StdinReaderGuard<'a> {
fn deref(&self) -> &BufferedReader<StdReader> {
&*self.inner
}
}
impl<'a> DerefMut<BufferedReader<StdReader>> for StdinReaderGuard<'a> {
fn deref_mut(&mut self) -> &mut BufferedReader<StdReader> {
&mut *self.inner
}
}
impl StdinReader {
/// Locks the `StdinReader`, granting the calling thread exclusive access
/// to the underlying `BufferedReader`.
///
/// This provides access to methods like `chars` and `lines`.
///
/// ## Example
///
/// ```rust
/// use std::io;
///
/// for line in io::stdin().lock().lines() {
/// println!("{}", line.unwrap());
/// }
/// ```
pub fn lock<'a>(&'a mut self) -> StdinReaderGuard<'a> {
StdinReaderGuard {
inner: self.inner.lock()
}
}
/// Like `Buffer::read_line`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_line(&mut self) -> IoResult<String> {
self.inner.lock().read_line()
}
/// Like `Buffer::read_until`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_until(&mut self, byte: u8) -> IoResult<Vec<u8>> {
self.inner.lock().read_until(byte)
}
/// Like `Buffer::read_char`.
///
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_char(&mut self) -> IoResult<char> {
self.inner.lock().read_char()
}
}
impl Reader for StdinReader {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>
|
// We have to manually delegate all of these because the default impls call
// read more than once and we don't want those calls to interleave (or
// incur the costs of repeated locking).
fn read_at_least(&mut self, min: uint, buf: &mut [u8]) -> IoResult<uint> {
self.inner.lock().read_at_least(min, buf)
}
fn push_at_least(&mut self, min: uint, len: uint, buf: &mut Vec<u8>) -> IoResult<uint> {
self.inner.lock().push_at_least(min, len, buf)
}
fn read_to_end(&mut self) -> IoResult<Vec<u8>> {
self.inner.lock().read_to_end()
}
fn read_le_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
self.inner.lock().read_le_uint_n(nbytes)
}
fn read_be_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
self.inner.lock().read_be_uint_n(nbytes)
}
}
/// Creates a new handle to the stdin of the current process.
///
/// The returned handle is a wrapper around a global `BufferedReader` shared
/// by all threads. If buffered access is not desired, the `stdin_raw` function
/// is provided to provided unbuffered access to stdin.
///
/// See `stdout()` for more notes about this function.
pub fn stdin() -> StdinReader {
// We're following the same strategy as kimundi's lazy_static library
static mut STDIN: *const StdinReader = 0 as *const StdinReader;
static ONCE: Once = ONCE_INIT;
unsafe {
ONCE.doit(|| {
// The default buffer capacity is 64k, but apparently windows doesn't like
// 64k reads on stdin. See #13304 for details, but the idea is that on
// windows we use a slightly smaller buffer that's been seen to be
// acceptable.
let stdin = if cfg!(windows) {
BufferedReader::with_capacity(8 * 1024, stdin_raw())
} else {
BufferedReader::new(stdin_raw())
};
let stdin = StdinReader {
inner: Arc::new(Mutex::new(stdin))
};
STDIN = mem::transmute(box stdin);
});
(*STDIN).clone()
}
}
/// Creates a new non-blocking handle to the stdin of the current process.
///
/// Unlike `stdin()`, the returned reader is *not* a buffered reader.
///
/// See `stdout()` for more notes about this function.
pub fn stdin_raw() -> StdReader {
src(libc::STDIN_FILENO, true, |src| StdReader { inner: src })
}
/// Creates a line-buffered handle to the stdout of the current process.
///
/// Note that this is a fairly expensive operation in that at least one memory
/// allocation is performed. Additionally, this must be called from a runtime
/// task context because the stream returned will be a non-blocking object using
/// the local scheduler to perform the I/O.
///
/// Care should be taken when creating multiple handles to an output stream for
/// a single process. While usage is still safe, the output may be surprising if
/// no synchronization is performed to ensure a sane output.
pub fn stdout() -> LineBufferedWriter<StdWriter> {
LineBufferedWriter::new(stdout_raw())
}
/// Creates an unbuffered handle to the stdout of the current process
///
/// See notes in `stdout()` for more information.
pub fn stdout_raw() -> StdWriter {
src(libc::STDOUT_FILENO, false, |src| StdWriter { inner: src })
}
/// Creates a line-buffered handle to the stderr of the current process.
///
/// See `stdout()` for notes about this function.
pub fn stderr() -> LineBufferedWriter<StdWriter> {
LineBufferedWriter::new(stderr_raw())
}
/// Creates an unbuffered handle to the stderr of the current process
///
/// See notes in `stdout()` for more information.
pub fn stderr_raw() -> StdWriter {
src(libc::STDERR_FILENO, false, |src| StdWriter { inner: src })
}
/// Resets the task-local stdout handle to the specified writer
///
/// This will replace the current task's stdout handle, returning the old
/// handle. All future calls to `print` and friends will emit their output to
/// this specified handle.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stdout stream.
pub fn set_stdout(stdout: Box<Writer + Send>) -> Option<Box<Writer + Send>> {
let mut new = Some(stdout);
LOCAL_STDOUT.with(|slot| {
mem::replace(&mut *slot.borrow_mut(), new.take())
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
/// Resets the task-local stderr handle to the specified writer
///
/// This will replace the current task's stderr handle, returning the old
/// handle. Currently, the stderr handle is used for printing panic messages
/// during task panic.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stderr stream.
pub fn set_stderr(stderr: Box<Writer + Send>) -> Option<Box<Writer + Send>> {
let mut new = Some(stderr);
LOCAL_STDERR.with(|slot| {
mem::replace(&mut *slot.borrow_mut(), new.take())
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
// Helper to access the local task's stdout handle
//
// Note that this is not a safe function to expose because you can create an
// aliased pointer very easily:
//
// with_task_stdout(|io1| {
// with_task_stdout(|io2| {
// // io1 aliases io2
// })
// })
fn with_task_stdout(f: |&mut Writer| -> IoResult<()>) {
let result = if Local::exists(None::<Task>) {
let mut my_stdout = LOCAL_STDOUT.with(|slot| {
slot.borrow_mut().take()
}).unwrap_or_else(|| {
box stdout() as Box<Writer + Send>
});
let result = f(&mut *my_stdout);
let mut var = Some(my_stdout);
LOCAL_STDOUT.with(|slot| {
*slot.borrow_mut() = var.take();
});
result
} else {
let mut io = rustrt::Stdout;
f(&mut io as &mut Writer)
};
match result {
Ok(()) => {}
Err(e) => panic!("failed printing to stdout: {}", e),
}
}
/// Flushes the local task's stdout handle.
///
/// By default, this stream is a line-buffering stream, so flushing may be
/// necessary to ensure that all output is printed to the screen (if there are
/// no newlines printed).
///
/// Note that logging macros do not use this stream. Using the logging macros
/// will emit output to stderr, and while they are line buffered the log
/// messages are always terminated in a newline (no need to flush).
pub fn flush() {
with_task_stdout(|io| io.flush())
}
/// Prints a string to the stdout of the current process. No newline is emitted
/// after the string is printed.
pub fn print(s: &str) {
with_task_stdout(|io| io.write(s.as_bytes()))
}
/// Prints a string to the stdout of the current process. A literal
/// `\n` character is printed to the console after the string.
pub fn println(s: &str) {
with_task_stdout(|io| {
io.write(s.as_bytes()).and_then(|()| io.write(&[b'\n']))
})
}
/// Similar to `print`, but takes a `fmt::Arguments` structure to be compatible
/// with the `format_args!` macro.
pub fn print_args(fmt: &fmt::Arguments) {
with_task_stdout(|io| write!(io, "{}", fmt))
}
/// Similar to `println`, but takes a `fmt::Arguments` structure to be
/// compatible with the `format_args!` macro.
pub fn println_args(fmt: &fmt::Arguments) {
with_task_stdout(|io| writeln!(io, "{}", fmt))
}
/// Representation of a reader of a standard input stream
pub struct StdReader {
inner: StdSource
}
impl StdReader {
/// Returns whether this stream is attached to a TTY instance or not.
pub fn isatty(&self) -> bool {
match self.inner {
TTY(..) => true,
File(..) => false,
}
}
}
impl Reader for StdReader {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
let ret = match self.inner {
TTY(ref mut tty) => {
// Flush the task-local stdout so that weird issues like a
// print!'d prompt not being shown until after the user hits
// enter.
flush();
tty.read(buf).map(|i| i as uint)
},
File(ref mut file) => file.read(buf).map(|i| i as uint),
};
match ret {
// When reading a piped stdin, libuv will return 0-length reads when
// stdin reaches EOF. For pretty much all other streams it will
// return an actual EOF error, but apparently for stdin it's a
// little different. Hence, here we convert a 0 length read to an
// end-of-file indicator so the caller knows to stop reading.
Ok(0) => { Err(standard_error(EndOfFile)) }
ret @ Ok(..) | ret @ Err(..) => ret,
}
}
}
/// Representation of a writer to a standard output stream
pub struct StdWriter {
inner: StdSource
}
impl StdWriter {
/// Gets the size of this output window, if possible. This is typically used
/// when the writer is attached to something like a terminal, this is used
/// to fetch the dimensions of the terminal.
///
/// If successful, returns `Ok((width, height))`.
///
/// # Error
///
/// This function will return an error if the output stream is not actually
/// connected to a TTY instance, or if querying the TTY instance fails.
pub fn winsize(&mut self) -> IoResult<(int, int)> {
match self.inner {
TTY(ref mut tty) => {
tty.get_winsize()
}
File(..) => {
Err(IoError {
kind: OtherIoError,
desc: "stream is not a tty",
detail: None,
})
}
}
}
/// Controls whether this output stream is a "raw stream" or simply a normal
/// stream.
///
/// # Error
///
/// This function will return an error if the output stream is not actually
/// connected to a TTY instance, or if querying the TTY instance fails.
pub fn set_raw(&mut self, raw: bool) -> IoResult<()> {
match self.inner {
TTY(ref mut tty) => {
tty.set_raw(raw)
}
File(..) => {
Err(IoError {
kind: OtherIoError,
desc: "stream is not a tty",
detail: None,
})
}
}
}
/// Returns whether this stream is attached to a TTY instance or not.
pub fn isatty(&self) -> bool {
match self.inner {
TTY(..) => true,
File(..) => false,
}
}
}
impl Writer for StdWriter {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
// As with stdin on windows, stdout often can't handle writes of large
// sizes. For an example, see #14940. For this reason, chunk the output
// buffer on windows, but on unix we can just write the whole buffer all
// at once.
//
// For some other references, it appears that this problem has been
// encountered by others [1] [2]. We choose the number 8KB just because
// libuv does the same.
//
// [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
// [2]: http://www.mail-archive.com/[email protected]/msg00661.html
let max_size = if cfg!(windows) {8192} else {uint::MAX};
for chunk in buf.chunks(max_size) {
try!(match self.inner {
TTY(ref mut tty) => tty.write(chunk),
File(ref mut file) => file.write(chunk),
})
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use prelude::*;
#[test]
fn smoke() {
// Just make sure we can acquire handles
stdin();
stdout();
stderr();
}
#[test]
fn capture_stdout() {
use io::{ChanReader, ChanWriter};
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
set_stdout(box w);
println!("hello!");
});
assert_eq!(r.read_to_string().unwrap(), "hello!\n");
}
#[test]
fn capture_stderr() {
use realstd::comm::channel;
use realstd::io::{ChanReader, ChanWriter, Reader};
let (tx, rx) = channel();
let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx));
spawn(proc() {
::realstd::io::stdio::set_stderr(box w);
panic!("my special message");
});
let s = r.read_to_string().unwrap();
assert!(s.contains("my special message"));
}
}
|
{
self.inner.lock().read(buf)
}
|
identifier_body
|
normalize_erasing_regions.rs
|
//! Methods for normalizing when you don't care about regions (and
//! aren't doing type inference). If either of those things don't
//! apply to you, use `infcx.normalize(...)`.
//!
//! The methods in this file use a `TypeFolder` to recursively process
//! contents, invoking the underlying
//! `normalize_generic_arg_after_erasing_regions` query for each type
//! or constant found within. (This underlying query is what is cached.)
use crate::mir;
use crate::ty::fold::{TypeFoldable, TypeFolder};
use crate::ty::subst::{Subst, SubstsRef};
use crate::ty::{self, Ty, TyCtxt};
impl<'tcx> TyCtxt<'tcx> {
/// Erase the regions in `value` and then fully normalize all the
/// types found within. The result will also have regions erased.
///
/// This is appropriate to use only after type-check: it assumes
/// that normalization will succeed, for example.
pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T
where
T: TypeFoldable<'tcx>,
{
debug!(
"normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
std::any::type_name::<T>(),
value,
param_env,
);
// Erase first before we do the real query -- this keeps the
// cache from being too polluted.
let value = self.erase_regions(value);
if!value.has_projections() {
value
} else {
value
.fold_with(&mut NormalizeAfterErasingRegionsFolder { tcx: self, param_env })
.into_ok()
}
}
/// If you have a `Binder<'tcx, T>`, you can do this to strip out the
/// late-bound regions and then normalize the result, yielding up
/// a `T` (with regions erased). This is appropriate when the
/// binder is being instantiated at the call site.
///
/// N.B., currently, higher-ranked type bounds inhibit
/// normalization. Therefore, each time we erase them in
/// codegen, we need to normalize the contents.
pub fn normalize_erasing_late_bound_regions<T>(
self,
param_env: ty::ParamEnv<'tcx>,
value: ty::Binder<'tcx, T>,
) -> T
where
T: TypeFoldable<'tcx>,
{
let value = self.erase_late_bound_regions(value);
self.normalize_erasing_regions(param_env, value)
}
/// Monomorphizes a type from the AST by first applying the
/// in-scope substitutions and then normalizing any associated
/// types.
pub fn subst_and_normalize_erasing_regions<T>(
self,
param_substs: SubstsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: T,
) -> T
where
T: TypeFoldable<'tcx>,
{
debug!(
"subst_and_normalize_erasing_regions(\
param_substs={:?}, \
value={:?}, \
param_env={:?})",
param_substs, value, param_env,
);
let substituted = value.subst(self, param_substs);
self.normalize_erasing_regions(param_env, substituted)
}
}
struct NormalizeAfterErasingRegionsFolder<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
}
impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> {
fn
|
(
&self,
arg: ty::GenericArg<'tcx>,
) -> ty::GenericArg<'tcx> {
let arg = self.param_env.and(arg);
self.tcx.normalize_generic_arg_after_erasing_regions(arg)
}
}
impl TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
Ok(self.normalize_generic_arg_after_erasing_regions(ty.into()).expect_ty())
}
fn fold_const(
&mut self,
c: &'tcx ty::Const<'tcx>,
) -> Result<&'tcx ty::Const<'tcx>, Self::Error> {
Ok(self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const())
}
#[inline]
fn fold_mir_const(
&mut self,
c: mir::ConstantKind<'tcx>,
) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
// FIXME: This *probably* needs canonicalization too!
let arg = self.param_env.and(c);
Ok(self.tcx.normalize_mir_const_after_erasing_regions(arg))
}
}
|
normalize_generic_arg_after_erasing_regions
|
identifier_name
|
normalize_erasing_regions.rs
|
//! Methods for normalizing when you don't care about regions (and
//! aren't doing type inference). If either of those things don't
//! apply to you, use `infcx.normalize(...)`.
//!
//! The methods in this file use a `TypeFolder` to recursively process
//! contents, invoking the underlying
//! `normalize_generic_arg_after_erasing_regions` query for each type
//! or constant found within. (This underlying query is what is cached.)
use crate::mir;
use crate::ty::fold::{TypeFoldable, TypeFolder};
use crate::ty::subst::{Subst, SubstsRef};
use crate::ty::{self, Ty, TyCtxt};
impl<'tcx> TyCtxt<'tcx> {
/// Erase the regions in `value` and then fully normalize all the
/// types found within. The result will also have regions erased.
///
/// This is appropriate to use only after type-check: it assumes
/// that normalization will succeed, for example.
pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T
where
T: TypeFoldable<'tcx>,
{
debug!(
"normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
std::any::type_name::<T>(),
value,
param_env,
);
// Erase first before we do the real query -- this keeps the
// cache from being too polluted.
let value = self.erase_regions(value);
if!value.has_projections() {
value
} else {
value
.fold_with(&mut NormalizeAfterErasingRegionsFolder { tcx: self, param_env })
.into_ok()
}
}
/// If you have a `Binder<'tcx, T>`, you can do this to strip out the
/// late-bound regions and then normalize the result, yielding up
/// a `T` (with regions erased). This is appropriate when the
/// binder is being instantiated at the call site.
///
/// N.B., currently, higher-ranked type bounds inhibit
/// normalization. Therefore, each time we erase them in
/// codegen, we need to normalize the contents.
pub fn normalize_erasing_late_bound_regions<T>(
self,
param_env: ty::ParamEnv<'tcx>,
value: ty::Binder<'tcx, T>,
) -> T
where
T: TypeFoldable<'tcx>,
|
/// Monomorphizes a type from the AST by first applying the
/// in-scope substitutions and then normalizing any associated
/// types.
pub fn subst_and_normalize_erasing_regions<T>(
self,
param_substs: SubstsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: T,
) -> T
where
T: TypeFoldable<'tcx>,
{
debug!(
"subst_and_normalize_erasing_regions(\
param_substs={:?}, \
value={:?}, \
param_env={:?})",
param_substs, value, param_env,
);
let substituted = value.subst(self, param_substs);
self.normalize_erasing_regions(param_env, substituted)
}
}
struct NormalizeAfterErasingRegionsFolder<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
}
impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> {
fn normalize_generic_arg_after_erasing_regions(
&self,
arg: ty::GenericArg<'tcx>,
) -> ty::GenericArg<'tcx> {
let arg = self.param_env.and(arg);
self.tcx.normalize_generic_arg_after_erasing_regions(arg)
}
}
impl TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
Ok(self.normalize_generic_arg_after_erasing_regions(ty.into()).expect_ty())
}
fn fold_const(
&mut self,
c: &'tcx ty::Const<'tcx>,
) -> Result<&'tcx ty::Const<'tcx>, Self::Error> {
Ok(self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const())
}
#[inline]
fn fold_mir_const(
&mut self,
c: mir::ConstantKind<'tcx>,
) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
// FIXME: This *probably* needs canonicalization too!
let arg = self.param_env.and(c);
Ok(self.tcx.normalize_mir_const_after_erasing_regions(arg))
}
}
|
{
let value = self.erase_late_bound_regions(value);
self.normalize_erasing_regions(param_env, value)
}
|
identifier_body
|
normalize_erasing_regions.rs
|
//! Methods for normalizing when you don't care about regions (and
//! aren't doing type inference). If either of those things don't
//! apply to you, use `infcx.normalize(...)`.
//!
//! The methods in this file use a `TypeFolder` to recursively process
//! contents, invoking the underlying
//! `normalize_generic_arg_after_erasing_regions` query for each type
//! or constant found within. (This underlying query is what is cached.)
use crate::mir;
use crate::ty::fold::{TypeFoldable, TypeFolder};
use crate::ty::subst::{Subst, SubstsRef};
use crate::ty::{self, Ty, TyCtxt};
impl<'tcx> TyCtxt<'tcx> {
/// Erase the regions in `value` and then fully normalize all the
/// types found within. The result will also have regions erased.
///
/// This is appropriate to use only after type-check: it assumes
/// that normalization will succeed, for example.
pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T
where
T: TypeFoldable<'tcx>,
{
debug!(
"normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
std::any::type_name::<T>(),
value,
param_env,
);
// Erase first before we do the real query -- this keeps the
// cache from being too polluted.
let value = self.erase_regions(value);
if!value.has_projections()
|
else {
value
.fold_with(&mut NormalizeAfterErasingRegionsFolder { tcx: self, param_env })
.into_ok()
}
}
/// If you have a `Binder<'tcx, T>`, you can do this to strip out the
/// late-bound regions and then normalize the result, yielding up
/// a `T` (with regions erased). This is appropriate when the
/// binder is being instantiated at the call site.
///
/// N.B., currently, higher-ranked type bounds inhibit
/// normalization. Therefore, each time we erase them in
/// codegen, we need to normalize the contents.
pub fn normalize_erasing_late_bound_regions<T>(
self,
param_env: ty::ParamEnv<'tcx>,
value: ty::Binder<'tcx, T>,
) -> T
where
T: TypeFoldable<'tcx>,
{
let value = self.erase_late_bound_regions(value);
self.normalize_erasing_regions(param_env, value)
}
/// Monomorphizes a type from the AST by first applying the
/// in-scope substitutions and then normalizing any associated
/// types.
pub fn subst_and_normalize_erasing_regions<T>(
self,
param_substs: SubstsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: T,
) -> T
where
T: TypeFoldable<'tcx>,
{
debug!(
"subst_and_normalize_erasing_regions(\
param_substs={:?}, \
value={:?}, \
param_env={:?})",
param_substs, value, param_env,
);
let substituted = value.subst(self, param_substs);
self.normalize_erasing_regions(param_env, substituted)
}
}
struct NormalizeAfterErasingRegionsFolder<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
}
impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> {
fn normalize_generic_arg_after_erasing_regions(
&self,
arg: ty::GenericArg<'tcx>,
) -> ty::GenericArg<'tcx> {
let arg = self.param_env.and(arg);
self.tcx.normalize_generic_arg_after_erasing_regions(arg)
}
}
impl TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
Ok(self.normalize_generic_arg_after_erasing_regions(ty.into()).expect_ty())
}
fn fold_const(
&mut self,
c: &'tcx ty::Const<'tcx>,
) -> Result<&'tcx ty::Const<'tcx>, Self::Error> {
Ok(self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const())
}
#[inline]
fn fold_mir_const(
&mut self,
c: mir::ConstantKind<'tcx>,
) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
// FIXME: This *probably* needs canonicalization too!
let arg = self.param_env.and(c);
Ok(self.tcx.normalize_mir_const_after_erasing_regions(arg))
}
}
|
{
value
}
|
conditional_block
|
normalize_erasing_regions.rs
|
//! Methods for normalizing when you don't care about regions (and
//! aren't doing type inference). If either of those things don't
//! apply to you, use `infcx.normalize(...)`.
//!
//! The methods in this file use a `TypeFolder` to recursively process
//! contents, invoking the underlying
//! `normalize_generic_arg_after_erasing_regions` query for each type
|
use crate::mir;
use crate::ty::fold::{TypeFoldable, TypeFolder};
use crate::ty::subst::{Subst, SubstsRef};
use crate::ty::{self, Ty, TyCtxt};
impl<'tcx> TyCtxt<'tcx> {
/// Erase the regions in `value` and then fully normalize all the
/// types found within. The result will also have regions erased.
///
/// This is appropriate to use only after type-check: it assumes
/// that normalization will succeed, for example.
pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T
where
T: TypeFoldable<'tcx>,
{
debug!(
"normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
std::any::type_name::<T>(),
value,
param_env,
);
// Erase first before we do the real query -- this keeps the
// cache from being too polluted.
let value = self.erase_regions(value);
if!value.has_projections() {
value
} else {
value
.fold_with(&mut NormalizeAfterErasingRegionsFolder { tcx: self, param_env })
.into_ok()
}
}
/// If you have a `Binder<'tcx, T>`, you can do this to strip out the
/// late-bound regions and then normalize the result, yielding up
/// a `T` (with regions erased). This is appropriate when the
/// binder is being instantiated at the call site.
///
/// N.B., currently, higher-ranked type bounds inhibit
/// normalization. Therefore, each time we erase them in
/// codegen, we need to normalize the contents.
pub fn normalize_erasing_late_bound_regions<T>(
self,
param_env: ty::ParamEnv<'tcx>,
value: ty::Binder<'tcx, T>,
) -> T
where
T: TypeFoldable<'tcx>,
{
let value = self.erase_late_bound_regions(value);
self.normalize_erasing_regions(param_env, value)
}
/// Monomorphizes a type from the AST by first applying the
/// in-scope substitutions and then normalizing any associated
/// types.
pub fn subst_and_normalize_erasing_regions<T>(
self,
param_substs: SubstsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
value: T,
) -> T
where
T: TypeFoldable<'tcx>,
{
debug!(
"subst_and_normalize_erasing_regions(\
param_substs={:?}, \
value={:?}, \
param_env={:?})",
param_substs, value, param_env,
);
let substituted = value.subst(self, param_substs);
self.normalize_erasing_regions(param_env, substituted)
}
}
struct NormalizeAfterErasingRegionsFolder<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
}
impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> {
fn normalize_generic_arg_after_erasing_regions(
&self,
arg: ty::GenericArg<'tcx>,
) -> ty::GenericArg<'tcx> {
let arg = self.param_env.and(arg);
self.tcx.normalize_generic_arg_after_erasing_regions(arg)
}
}
impl TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
Ok(self.normalize_generic_arg_after_erasing_regions(ty.into()).expect_ty())
}
fn fold_const(
&mut self,
c: &'tcx ty::Const<'tcx>,
) -> Result<&'tcx ty::Const<'tcx>, Self::Error> {
Ok(self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const())
}
#[inline]
fn fold_mir_const(
&mut self,
c: mir::ConstantKind<'tcx>,
) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
// FIXME: This *probably* needs canonicalization too!
let arg = self.param_env.and(c);
Ok(self.tcx.normalize_mir_const_after_erasing_regions(arg))
}
}
|
//! or constant found within. (This underlying query is what is cached.)
|
random_line_split
|
pyext.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::slice;
use cpython::PyBytes;
|
use cpython::PyModule;
use cpython::PyObject;
use cpython::PyResult;
use cpython::Python;
use cpython_ext::ResultPyErrExt;
use cpython_ext::SimplePyBuf;
use revlogindex::nodemap::empty_index_buffer;
use revlogindex::NodeRevMap;
use revlogindex::RevlogEntry;
pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
let name = [package, "indexes"].join(".");
let m = PyModule::new(py, &name)?;
m.add_class::<nodemap>(py)?;
Ok(m)
}
py_class!(class nodemap |py| {
data nodemap: NodeRevMap<SimplePyBuf<RevlogEntry>, SimplePyBuf<u32>>;
def __new__(_cls, changelog: &PyObject, index: &PyObject) -> PyResult<nodemap> {
let changelog_buf = SimplePyBuf::new(py, changelog);
let index_buf = SimplePyBuf::new(py, index);
let nm = NodeRevMap::new(changelog_buf, index_buf).map_pyerr(py)?;
nodemap::create_instance(py, nm)
}
def __getitem__(&self, key: PyBytes) -> PyResult<Option<u32>> {
Ok(self.nodemap(py).node_to_rev(key.data(py)).map_pyerr(py)?)
}
def __contains__(&self, key: PyBytes) -> PyResult<bool> {
Ok(self.nodemap(py).node_to_rev(key.data(py)).map_pyerr(py)?.is_some())
}
def partialmatch(&self, hex: &str) -> PyResult<Option<PyBytes>> {
Ok(self.nodemap(py).hex_prefix_to_node(hex).map_pyerr(py)?.map(|b| PyBytes::new(py, b)))
}
def build(&self) -> PyResult<PyBytes> {
let buf = self.nodemap(py).build_incrementally().map_pyerr(py)?;
let slice = unsafe { slice::from_raw_parts(buf.as_ptr() as *const u8, buf.len() * 4) };
Ok(PyBytes::new(py, slice))
}
def lag(&self) -> PyResult<u32> {
Ok(self.nodemap(py).lag())
}
@staticmethod
def emptyindexbuffer() -> PyResult<PyBytes> {
let buf = empty_index_buffer();
Ok(PyBytes::new(py, &buf))
}
});
|
random_line_split
|
|
pyext.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::slice;
use cpython::PyBytes;
use cpython::PyModule;
use cpython::PyObject;
use cpython::PyResult;
use cpython::Python;
use cpython_ext::ResultPyErrExt;
use cpython_ext::SimplePyBuf;
use revlogindex::nodemap::empty_index_buffer;
use revlogindex::NodeRevMap;
use revlogindex::RevlogEntry;
pub fn init_module(py: Python, package: &str) -> PyResult<PyModule>
|
py_class!(class nodemap |py| {
data nodemap: NodeRevMap<SimplePyBuf<RevlogEntry>, SimplePyBuf<u32>>;
def __new__(_cls, changelog: &PyObject, index: &PyObject) -> PyResult<nodemap> {
let changelog_buf = SimplePyBuf::new(py, changelog);
let index_buf = SimplePyBuf::new(py, index);
let nm = NodeRevMap::new(changelog_buf, index_buf).map_pyerr(py)?;
nodemap::create_instance(py, nm)
}
def __getitem__(&self, key: PyBytes) -> PyResult<Option<u32>> {
Ok(self.nodemap(py).node_to_rev(key.data(py)).map_pyerr(py)?)
}
def __contains__(&self, key: PyBytes) -> PyResult<bool> {
Ok(self.nodemap(py).node_to_rev(key.data(py)).map_pyerr(py)?.is_some())
}
def partialmatch(&self, hex: &str) -> PyResult<Option<PyBytes>> {
Ok(self.nodemap(py).hex_prefix_to_node(hex).map_pyerr(py)?.map(|b| PyBytes::new(py, b)))
}
def build(&self) -> PyResult<PyBytes> {
let buf = self.nodemap(py).build_incrementally().map_pyerr(py)?;
let slice = unsafe { slice::from_raw_parts(buf.as_ptr() as *const u8, buf.len() * 4) };
Ok(PyBytes::new(py, slice))
}
def lag(&self) -> PyResult<u32> {
Ok(self.nodemap(py).lag())
}
@staticmethod
def emptyindexbuffer() -> PyResult<PyBytes> {
let buf = empty_index_buffer();
Ok(PyBytes::new(py, &buf))
}
});
|
{
let name = [package, "indexes"].join(".");
let m = PyModule::new(py, &name)?;
m.add_class::<nodemap>(py)?;
Ok(m)
}
|
identifier_body
|
pyext.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::slice;
use cpython::PyBytes;
use cpython::PyModule;
use cpython::PyObject;
use cpython::PyResult;
use cpython::Python;
use cpython_ext::ResultPyErrExt;
use cpython_ext::SimplePyBuf;
use revlogindex::nodemap::empty_index_buffer;
use revlogindex::NodeRevMap;
use revlogindex::RevlogEntry;
pub fn
|
(py: Python, package: &str) -> PyResult<PyModule> {
let name = [package, "indexes"].join(".");
let m = PyModule::new(py, &name)?;
m.add_class::<nodemap>(py)?;
Ok(m)
}
py_class!(class nodemap |py| {
data nodemap: NodeRevMap<SimplePyBuf<RevlogEntry>, SimplePyBuf<u32>>;
def __new__(_cls, changelog: &PyObject, index: &PyObject) -> PyResult<nodemap> {
let changelog_buf = SimplePyBuf::new(py, changelog);
let index_buf = SimplePyBuf::new(py, index);
let nm = NodeRevMap::new(changelog_buf, index_buf).map_pyerr(py)?;
nodemap::create_instance(py, nm)
}
def __getitem__(&self, key: PyBytes) -> PyResult<Option<u32>> {
Ok(self.nodemap(py).node_to_rev(key.data(py)).map_pyerr(py)?)
}
def __contains__(&self, key: PyBytes) -> PyResult<bool> {
Ok(self.nodemap(py).node_to_rev(key.data(py)).map_pyerr(py)?.is_some())
}
def partialmatch(&self, hex: &str) -> PyResult<Option<PyBytes>> {
Ok(self.nodemap(py).hex_prefix_to_node(hex).map_pyerr(py)?.map(|b| PyBytes::new(py, b)))
}
def build(&self) -> PyResult<PyBytes> {
let buf = self.nodemap(py).build_incrementally().map_pyerr(py)?;
let slice = unsafe { slice::from_raw_parts(buf.as_ptr() as *const u8, buf.len() * 4) };
Ok(PyBytes::new(py, slice))
}
def lag(&self) -> PyResult<u32> {
Ok(self.nodemap(py).lag())
}
@staticmethod
def emptyindexbuffer() -> PyResult<PyBytes> {
let buf = empty_index_buffer();
Ok(PyBytes::new(py, &buf))
}
});
|
init_module
|
identifier_name
|
client_mock_tcp.rs
|
#![feature(default_type_params)]
extern crate curl;
extern crate http;
extern crate hyper;
extern crate test;
use std::fmt::{mod, Show};
use std::str::from_str;
use std::io::{IoResult, MemReader};
use std::io::net::ip::{SocketAddr, ToSocketAddr};
use std::os;
use std::path::BytesContainer;
use http::connecter::Connecter;
use hyper::net;
static README: &'static [u8] = include_bin!("../README.md");
struct MockStream {
read: MemReader,
}
impl Clone for MockStream {
fn clone(&self) -> MockStream {
MockStream::new()
}
}
impl MockStream {
fn new() -> MockStream {
let head = b"HTTP/1.1 200 OK\r\nServer: Mock\r\n\r\n";
let mut res = head.to_vec();
res.push_all(README);
MockStream {
read: MemReader::new(res),
}
}
}
impl Reader for MockStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
self.read.read(buf)
}
}
impl Writer for MockStream {
fn write(&mut self, _msg: &[u8]) -> IoResult<()> {
// we're mocking, what do we care.
Ok(())
}
}
#[bench]
fn bench_mock_curl(b: &mut test::Bencher) {
let mut cwd = os::getcwd().unwrap();
cwd.push("README.md");
let s = format!("file://{}", cwd.container_as_str().unwrap());
let url = s.as_slice();
b.iter(|| {
curl::http::handle()
.get(url)
.header("X-Foo", "Bar")
.exec()
.unwrap()
});
}
#[deriving(Clone)]
struct Foo;
impl hyper::header::Header for Foo {
fn header_name(_: Option<Foo>) -> &'static str {
"x-foo"
}
fn parse_header(_: &[Vec<u8>]) -> Option<Foo>
|
}
impl hyper::header::HeaderFormat for Foo {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
"Bar".fmt(fmt)
}
}
impl net::NetworkStream for MockStream {
fn peer_name(&mut self) -> IoResult<SocketAddr> {
Ok(from_str("127.0.0.1:1337").unwrap())
}
}
impl net::NetworkConnector for MockStream {
fn connect<To: ToSocketAddr>(_addr: To, _scheme: &str) -> IoResult<MockStream> {
Ok(MockStream::new())
}
}
#[bench]
fn bench_mock_hyper(b: &mut test::Bencher) {
let url = "http://127.0.0.1:1337/";
b.iter(|| {
let mut req = hyper::client::Request::with_stream::<MockStream>(
hyper::Get, hyper::Url::parse(url).unwrap()).unwrap();
req.headers_mut().set(Foo);
req
.start().unwrap()
.send().unwrap()
.read_to_string().unwrap()
});
}
impl Connecter for MockStream {
fn connect(_addr: SocketAddr, _host: &str, _use_ssl: bool) -> IoResult<MockStream> {
Ok(MockStream::new())
}
}
#[bench]
fn bench_mock_http(b: &mut test::Bencher) {
let url = "http://127.0.0.1:1337/";
b.iter(|| {
let mut req: http::client::RequestWriter<MockStream> = http::client::RequestWriter::new(
http::method::Get,
hyper::Url::parse(url).unwrap()
).unwrap();
req.headers.extensions.insert("x-foo".to_string(), "Bar".to_string());
// cant unwrap because Err contains RequestWriter, which does not implement Show
let mut res = match req.read_response() {
Ok(res) => res,
Err(..) => panic!("http response failed")
};
res.read_to_string().unwrap();
});
}
|
{
None
}
|
identifier_body
|
client_mock_tcp.rs
|
#![feature(default_type_params)]
extern crate curl;
extern crate http;
extern crate hyper;
extern crate test;
use std::fmt::{mod, Show};
use std::str::from_str;
use std::io::{IoResult, MemReader};
use std::io::net::ip::{SocketAddr, ToSocketAddr};
use std::os;
use std::path::BytesContainer;
use http::connecter::Connecter;
use hyper::net;
static README: &'static [u8] = include_bin!("../README.md");
struct MockStream {
read: MemReader,
}
impl Clone for MockStream {
fn clone(&self) -> MockStream {
MockStream::new()
}
}
impl MockStream {
fn new() -> MockStream {
let head = b"HTTP/1.1 200 OK\r\nServer: Mock\r\n\r\n";
let mut res = head.to_vec();
res.push_all(README);
MockStream {
read: MemReader::new(res),
}
}
}
impl Reader for MockStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
self.read.read(buf)
}
}
impl Writer for MockStream {
fn write(&mut self, _msg: &[u8]) -> IoResult<()> {
// we're mocking, what do we care.
Ok(())
}
}
#[bench]
fn bench_mock_curl(b: &mut test::Bencher) {
let mut cwd = os::getcwd().unwrap();
cwd.push("README.md");
let s = format!("file://{}", cwd.container_as_str().unwrap());
let url = s.as_slice();
b.iter(|| {
curl::http::handle()
.get(url)
.header("X-Foo", "Bar")
.exec()
.unwrap()
});
}
#[deriving(Clone)]
struct Foo;
impl hyper::header::Header for Foo {
fn header_name(_: Option<Foo>) -> &'static str {
"x-foo"
}
fn
|
(_: &[Vec<u8>]) -> Option<Foo> {
None
}
}
impl hyper::header::HeaderFormat for Foo {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
"Bar".fmt(fmt)
}
}
impl net::NetworkStream for MockStream {
fn peer_name(&mut self) -> IoResult<SocketAddr> {
Ok(from_str("127.0.0.1:1337").unwrap())
}
}
impl net::NetworkConnector for MockStream {
fn connect<To: ToSocketAddr>(_addr: To, _scheme: &str) -> IoResult<MockStream> {
Ok(MockStream::new())
}
}
#[bench]
fn bench_mock_hyper(b: &mut test::Bencher) {
let url = "http://127.0.0.1:1337/";
b.iter(|| {
let mut req = hyper::client::Request::with_stream::<MockStream>(
hyper::Get, hyper::Url::parse(url).unwrap()).unwrap();
req.headers_mut().set(Foo);
req
.start().unwrap()
.send().unwrap()
.read_to_string().unwrap()
});
}
impl Connecter for MockStream {
fn connect(_addr: SocketAddr, _host: &str, _use_ssl: bool) -> IoResult<MockStream> {
Ok(MockStream::new())
}
}
#[bench]
fn bench_mock_http(b: &mut test::Bencher) {
let url = "http://127.0.0.1:1337/";
b.iter(|| {
let mut req: http::client::RequestWriter<MockStream> = http::client::RequestWriter::new(
http::method::Get,
hyper::Url::parse(url).unwrap()
).unwrap();
req.headers.extensions.insert("x-foo".to_string(), "Bar".to_string());
// cant unwrap because Err contains RequestWriter, which does not implement Show
let mut res = match req.read_response() {
Ok(res) => res,
Err(..) => panic!("http response failed")
};
res.read_to_string().unwrap();
});
}
|
parse_header
|
identifier_name
|
client_mock_tcp.rs
|
#![feature(default_type_params)]
extern crate curl;
extern crate http;
extern crate hyper;
extern crate test;
use std::fmt::{mod, Show};
use std::str::from_str;
use std::io::{IoResult, MemReader};
use std::io::net::ip::{SocketAddr, ToSocketAddr};
use std::os;
use std::path::BytesContainer;
use http::connecter::Connecter;
use hyper::net;
static README: &'static [u8] = include_bin!("../README.md");
struct MockStream {
read: MemReader,
}
impl Clone for MockStream {
fn clone(&self) -> MockStream {
MockStream::new()
}
}
impl MockStream {
fn new() -> MockStream {
let head = b"HTTP/1.1 200 OK\r\nServer: Mock\r\n\r\n";
let mut res = head.to_vec();
res.push_all(README);
MockStream {
read: MemReader::new(res),
}
}
}
|
self.read.read(buf)
}
}
impl Writer for MockStream {
fn write(&mut self, _msg: &[u8]) -> IoResult<()> {
// we're mocking, what do we care.
Ok(())
}
}
#[bench]
fn bench_mock_curl(b: &mut test::Bencher) {
let mut cwd = os::getcwd().unwrap();
cwd.push("README.md");
let s = format!("file://{}", cwd.container_as_str().unwrap());
let url = s.as_slice();
b.iter(|| {
curl::http::handle()
.get(url)
.header("X-Foo", "Bar")
.exec()
.unwrap()
});
}
#[deriving(Clone)]
struct Foo;
impl hyper::header::Header for Foo {
fn header_name(_: Option<Foo>) -> &'static str {
"x-foo"
}
fn parse_header(_: &[Vec<u8>]) -> Option<Foo> {
None
}
}
impl hyper::header::HeaderFormat for Foo {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
"Bar".fmt(fmt)
}
}
impl net::NetworkStream for MockStream {
fn peer_name(&mut self) -> IoResult<SocketAddr> {
Ok(from_str("127.0.0.1:1337").unwrap())
}
}
impl net::NetworkConnector for MockStream {
fn connect<To: ToSocketAddr>(_addr: To, _scheme: &str) -> IoResult<MockStream> {
Ok(MockStream::new())
}
}
#[bench]
fn bench_mock_hyper(b: &mut test::Bencher) {
let url = "http://127.0.0.1:1337/";
b.iter(|| {
let mut req = hyper::client::Request::with_stream::<MockStream>(
hyper::Get, hyper::Url::parse(url).unwrap()).unwrap();
req.headers_mut().set(Foo);
req
.start().unwrap()
.send().unwrap()
.read_to_string().unwrap()
});
}
impl Connecter for MockStream {
fn connect(_addr: SocketAddr, _host: &str, _use_ssl: bool) -> IoResult<MockStream> {
Ok(MockStream::new())
}
}
#[bench]
fn bench_mock_http(b: &mut test::Bencher) {
let url = "http://127.0.0.1:1337/";
b.iter(|| {
let mut req: http::client::RequestWriter<MockStream> = http::client::RequestWriter::new(
http::method::Get,
hyper::Url::parse(url).unwrap()
).unwrap();
req.headers.extensions.insert("x-foo".to_string(), "Bar".to_string());
// cant unwrap because Err contains RequestWriter, which does not implement Show
let mut res = match req.read_response() {
Ok(res) => res,
Err(..) => panic!("http response failed")
};
res.read_to_string().unwrap();
});
}
|
impl Reader for MockStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
|
random_line_split
|
regions-escape-via-trait-or-not.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
trait Deref {
fn get(self) -> isize;
}
impl<'a> Deref for &'a isize {
fn get(self) -> isize {
*self
}
}
fn with<R:Deref, F>(f: F) -> isize where F: FnOnce(&isize) -> R {
f(&3).get()
}
fn return_it() -> isize {
with(|o| o) //~ ERROR cannot infer
}
fn main() {
}
|
#![allow(dead_code)]
|
random_line_split
|
regions-escape-via-trait-or-not.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
trait Deref {
fn get(self) -> isize;
}
impl<'a> Deref for &'a isize {
fn get(self) -> isize {
*self
}
}
fn with<R:Deref, F>(f: F) -> isize where F: FnOnce(&isize) -> R {
f(&3).get()
}
fn
|
() -> isize {
with(|o| o) //~ ERROR cannot infer
}
fn main() {
}
|
return_it
|
identifier_name
|
regions-escape-via-trait-or-not.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
trait Deref {
fn get(self) -> isize;
}
impl<'a> Deref for &'a isize {
fn get(self) -> isize {
*self
}
}
fn with<R:Deref, F>(f: F) -> isize where F: FnOnce(&isize) -> R {
f(&3).get()
}
fn return_it() -> isize
|
fn main() {
}
|
{
with(|o| o) //~ ERROR cannot infer
}
|
identifier_body
|
lib.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support code for encoding and decoding types.
/*
Core encoding and decoding interfaces.
*/
#![crate_id = "serialize#0.11.0"]
#![experimental]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/0.11.0/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, managed_boxes, default_type_params, phase)]
// test harness access
|
#[phase(plugin, link)]
extern crate log;
pub use self::serialize::{Decoder, Encoder, Decodable, Encodable,
DecoderHelpers, EncoderHelpers};
mod serialize;
mod collection_impls;
pub mod base64;
pub mod ebml;
pub mod hex;
pub mod json;
|
#[cfg(test)]
extern crate test;
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.