file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
load_distr_uniform.rs | extern crate rand;
use self::rand::distributions::{IndependentSample, Range};
#[derive(Clone, Debug)]
pub struct LoadDistrUniform < T > {
_buf: Vec< T >,
}
impl < T > Default for LoadDistrUniform < T > {
fn default() -> Self {
Self {
_buf: vec![],
}
}
}
impl < T > LoadDistrUniform < T > {
pub fn load( & mut self, t: T ) -> Result< (), &'static str > {
self._buf.push( t );
Ok( () )
}
pub fn apply< F >( & mut self, mut f: F ) -> Result< (), &'static str >
where F: FnMut( & T ) -> bool
{
if self._buf.len() > 0 |
self._buf.clear();
Ok( () )
}
}
| {
let between = Range::new( 0, self._buf.len() );
let mut rng = rand::thread_rng();
let i = between.ind_sample( & mut rng );
f( & self._buf[ i ] );
} | conditional_block |
load_distr_uniform.rs | extern crate rand;
use self::rand::distributions::{IndependentSample, Range};
#[derive(Clone, Debug)]
pub struct LoadDistrUniform < T > {
_buf: Vec< T >,
}
impl < T > Default for LoadDistrUniform < T > {
fn | () -> Self {
Self {
_buf: vec![],
}
}
}
impl < T > LoadDistrUniform < T > {
pub fn load( & mut self, t: T ) -> Result< (), &'static str > {
self._buf.push( t );
Ok( () )
}
pub fn apply< F >( & mut self, mut f: F ) -> Result< (), &'static str >
where F: FnMut( & T ) -> bool
{
if self._buf.len() > 0 {
let between = Range::new( 0, self._buf.len() );
let mut rng = rand::thread_rng();
let i = between.ind_sample( & mut rng );
f( & self._buf[ i ] );
}
self._buf.clear();
Ok( () )
}
}
| default | identifier_name |
physics.rs | use cgmath::{Aabb3, Point, Vector, Vector3};
use octree::Octree;
use common::entity::EntityId;
use std::collections::HashMap;
pub struct Physics {
pub terrain_octree: Octree<EntityId>,
pub misc_octree: Octree<EntityId>,
pub bounds: HashMap<EntityId, Aabb3<f32>>,
}
impl Physics {
pub fn new(world_bounds: Aabb3<f32>) -> Physics {
Physics {
terrain_octree: Octree::new(&world_bounds),
misc_octree: Octree::new(&world_bounds),
bounds: HashMap::new(),
}
}
pub fn insert_terrain(&mut self, id: EntityId, bounds: Aabb3<f32>) {
self.terrain_octree.insert(bounds.clone(), id);
self.bounds.insert(id, bounds);
}
pub fn insert_misc(&mut self, id: EntityId, bounds: Aabb3<f32>) |
pub fn remove_terrain(&mut self, id: EntityId) {
match self.bounds.get(&id) {
None => {},
Some(bounds) => {
self.terrain_octree.remove(bounds, id);
},
}
}
pub fn remove_misc(&mut self, id: EntityId) {
match self.bounds.get(&id) {
None => {},
Some(bounds) => {
self.misc_octree.remove(bounds, id);
},
}
}
pub fn get_bounds(&self, id: EntityId) -> Option<&Aabb3<f32>> {
self.bounds.get(&id)
}
pub fn reinsert(
octree: &mut Octree<EntityId>,
id: EntityId,
bounds: &mut Aabb3<f32>,
new_bounds: Aabb3<f32>,
) -> Option<(Aabb3<f32>, EntityId)> {
match octree.intersect(&new_bounds, Some(id)) {
None => {
octree.reinsert(id, bounds, new_bounds.clone());
*bounds = new_bounds;
None
},
collision => collision,
}
}
pub fn translate_misc(&mut self, id: EntityId, amount: Vector3<f32>) -> Option<(Aabb3<f32>, EntityId)> {
let bounds = self.bounds.get_mut(&id).unwrap();
let new_bounds =
Aabb3::new(
bounds.min.add_v(&amount),
bounds.max.add_v(&amount),
);
let terrain_collision = self.terrain_octree.intersect(&new_bounds, None);
if terrain_collision.is_none() {
Physics::reinsert(&mut self.misc_octree, id, bounds, new_bounds)
} else {
terrain_collision
}
}
}
| {
self.misc_octree.insert(bounds.clone(), id);
self.bounds.insert(id, bounds);
} | identifier_body |
physics.rs | use cgmath::{Aabb3, Point, Vector, Vector3};
use octree::Octree;
use common::entity::EntityId;
use std::collections::HashMap;
pub struct Physics {
pub terrain_octree: Octree<EntityId>,
pub misc_octree: Octree<EntityId>,
pub bounds: HashMap<EntityId, Aabb3<f32>>,
}
impl Physics {
pub fn new(world_bounds: Aabb3<f32>) -> Physics {
Physics {
terrain_octree: Octree::new(&world_bounds),
misc_octree: Octree::new(&world_bounds),
bounds: HashMap::new(),
}
}
pub fn insert_terrain(&mut self, id: EntityId, bounds: Aabb3<f32>) {
self.terrain_octree.insert(bounds.clone(), id);
self.bounds.insert(id, bounds);
}
pub fn insert_misc(&mut self, id: EntityId, bounds: Aabb3<f32>) {
self.misc_octree.insert(bounds.clone(), id);
self.bounds.insert(id, bounds);
}
pub fn remove_terrain(&mut self, id: EntityId) {
match self.bounds.get(&id) {
None => {},
Some(bounds) => | ,
}
}
pub fn remove_misc(&mut self, id: EntityId) {
match self.bounds.get(&id) {
None => {},
Some(bounds) => {
self.misc_octree.remove(bounds, id);
},
}
}
pub fn get_bounds(&self, id: EntityId) -> Option<&Aabb3<f32>> {
self.bounds.get(&id)
}
pub fn reinsert(
octree: &mut Octree<EntityId>,
id: EntityId,
bounds: &mut Aabb3<f32>,
new_bounds: Aabb3<f32>,
) -> Option<(Aabb3<f32>, EntityId)> {
match octree.intersect(&new_bounds, Some(id)) {
None => {
octree.reinsert(id, bounds, new_bounds.clone());
*bounds = new_bounds;
None
},
collision => collision,
}
}
pub fn translate_misc(&mut self, id: EntityId, amount: Vector3<f32>) -> Option<(Aabb3<f32>, EntityId)> {
let bounds = self.bounds.get_mut(&id).unwrap();
let new_bounds =
Aabb3::new(
bounds.min.add_v(&amount),
bounds.max.add_v(&amount),
);
let terrain_collision = self.terrain_octree.intersect(&new_bounds, None);
if terrain_collision.is_none() {
Physics::reinsert(&mut self.misc_octree, id, bounds, new_bounds)
} else {
terrain_collision
}
}
}
| {
self.terrain_octree.remove(bounds, id);
} | conditional_block |
physics.rs | use cgmath::{Aabb3, Point, Vector, Vector3};
use octree::Octree;
use common::entity::EntityId;
use std::collections::HashMap;
pub struct Physics {
pub terrain_octree: Octree<EntityId>,
pub misc_octree: Octree<EntityId>,
pub bounds: HashMap<EntityId, Aabb3<f32>>,
}
impl Physics {
pub fn new(world_bounds: Aabb3<f32>) -> Physics {
Physics {
terrain_octree: Octree::new(&world_bounds),
misc_octree: Octree::new(&world_bounds),
bounds: HashMap::new(),
}
}
pub fn insert_terrain(&mut self, id: EntityId, bounds: Aabb3<f32>) {
self.terrain_octree.insert(bounds.clone(), id);
self.bounds.insert(id, bounds);
}
pub fn insert_misc(&mut self, id: EntityId, bounds: Aabb3<f32>) {
self.misc_octree.insert(bounds.clone(), id);
self.bounds.insert(id, bounds);
}
pub fn | (&mut self, id: EntityId) {
match self.bounds.get(&id) {
None => {},
Some(bounds) => {
self.terrain_octree.remove(bounds, id);
},
}
}
pub fn remove_misc(&mut self, id: EntityId) {
match self.bounds.get(&id) {
None => {},
Some(bounds) => {
self.misc_octree.remove(bounds, id);
},
}
}
pub fn get_bounds(&self, id: EntityId) -> Option<&Aabb3<f32>> {
self.bounds.get(&id)
}
pub fn reinsert(
octree: &mut Octree<EntityId>,
id: EntityId,
bounds: &mut Aabb3<f32>,
new_bounds: Aabb3<f32>,
) -> Option<(Aabb3<f32>, EntityId)> {
match octree.intersect(&new_bounds, Some(id)) {
None => {
octree.reinsert(id, bounds, new_bounds.clone());
*bounds = new_bounds;
None
},
collision => collision,
}
}
pub fn translate_misc(&mut self, id: EntityId, amount: Vector3<f32>) -> Option<(Aabb3<f32>, EntityId)> {
let bounds = self.bounds.get_mut(&id).unwrap();
let new_bounds =
Aabb3::new(
bounds.min.add_v(&amount),
bounds.max.add_v(&amount),
);
let terrain_collision = self.terrain_octree.intersect(&new_bounds, None);
if terrain_collision.is_none() {
Physics::reinsert(&mut self.misc_octree, id, bounds, new_bounds)
} else {
terrain_collision
}
}
}
| remove_terrain | identifier_name |
physics.rs | use cgmath::{Aabb3, Point, Vector, Vector3};
use octree::Octree;
use common::entity::EntityId;
use std::collections::HashMap;
pub struct Physics {
pub terrain_octree: Octree<EntityId>,
pub misc_octree: Octree<EntityId>,
pub bounds: HashMap<EntityId, Aabb3<f32>>,
}
impl Physics {
pub fn new(world_bounds: Aabb3<f32>) -> Physics {
Physics {
terrain_octree: Octree::new(&world_bounds),
misc_octree: Octree::new(&world_bounds),
bounds: HashMap::new(),
}
}
pub fn insert_terrain(&mut self, id: EntityId, bounds: Aabb3<f32>) {
self.terrain_octree.insert(bounds.clone(), id);
self.bounds.insert(id, bounds);
}
pub fn insert_misc(&mut self, id: EntityId, bounds: Aabb3<f32>) {
self.misc_octree.insert(bounds.clone(), id);
self.bounds.insert(id, bounds);
}
pub fn remove_terrain(&mut self, id: EntityId) {
match self.bounds.get(&id) {
None => {},
Some(bounds) => {
self.terrain_octree.remove(bounds, id);
},
}
}
pub fn remove_misc(&mut self, id: EntityId) {
match self.bounds.get(&id) {
None => {}, | }
pub fn get_bounds(&self, id: EntityId) -> Option<&Aabb3<f32>> {
self.bounds.get(&id)
}
pub fn reinsert(
octree: &mut Octree<EntityId>,
id: EntityId,
bounds: &mut Aabb3<f32>,
new_bounds: Aabb3<f32>,
) -> Option<(Aabb3<f32>, EntityId)> {
match octree.intersect(&new_bounds, Some(id)) {
None => {
octree.reinsert(id, bounds, new_bounds.clone());
*bounds = new_bounds;
None
},
collision => collision,
}
}
pub fn translate_misc(&mut self, id: EntityId, amount: Vector3<f32>) -> Option<(Aabb3<f32>, EntityId)> {
let bounds = self.bounds.get_mut(&id).unwrap();
let new_bounds =
Aabb3::new(
bounds.min.add_v(&amount),
bounds.max.add_v(&amount),
);
let terrain_collision = self.terrain_octree.intersect(&new_bounds, None);
if terrain_collision.is_none() {
Physics::reinsert(&mut self.misc_octree, id, bounds, new_bounds)
} else {
terrain_collision
}
}
} | Some(bounds) => {
self.misc_octree.remove(bounds, id);
},
} | random_line_split |
main.rs |
#[derive(Debug, Copy, Clone)]
enum IncExc {
Inc(f32),
Exc(f32),
}
use IncExc::{Exc, Inc};
fn parse_f32_range(s: &str, mn: IncExc, mx: IncExc) -> Result<f32, Error> {
let x = s.parse()?;
let in_range = match (mn, mx) {
(Inc(mn), Inc(mx)) => mn <= x && x <= mx,
(Exc(mn), Inc(mx)) => mn < x && x <= mx,
(Inc(mn), Exc(mx)) => mn <= x && x < mx,
(Exc(mn), Exc(mx)) => mn < x && x < mx,
};
if in_range {
Ok(x)
} else {
Err(err_msg(format!(
"number {} not in range {:?}.. {:?}",
x, mn, mx
)))
}
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [0] [1] [N] [options]", program);
print!("{}", opts.usage(&brief));
}
fn main() {
match mainr() {
Err(err) => {
for c in err.causes() {
println!("{}", c);
}
//println!("{}", err.backtrace());
process::exit(101);
}
_ => (),
}
}
fn mainr() -> Result<(), Error> {
// install SEH guard to continue on continuable structured exception
let _seh_guard = seh::SehGuard::new();
// install ctrl-C handler to shutdown gracefully
seh::install_ctrl_c_handler();
// Initialize logger
env_logger::init().unwrap();
println!("sound::init()");
sound::init();
// Set default values of cubes module parameters
let mut parms = CubeParms {
thread_count: 2,
object_count: 2_000,
backbuffer_count: FRAME_COUNT,
speed_mult: 1.0,
concurrent_state_update: true, // unused
debug_layer: cfg!(debug_assertions),
rt_format: DXGI_FORMAT_R16G16B16A16_FLOAT,
enable_srgb: false,
render_trace: false,
fovy_deg: 60.,
render_text: true,
};
let mut adapters_to_test = vec![];
let mut adapters_info = false;
let program_name = env::args().next().unwrap().clone();
let mut opts = Options::new();
opts.optflag("h", "help", "Usage");
opts.optflag("i", "info", "Adapters information");
opts.optflag("", "f32-color", "Render using R32G32B32_FLOAT");
opts.optflag("d", "debug", "Enable debug layer");
opts.optflag("", "nodebug", "Disable debug layer");
opts.optflag("", "notext", "Disable DWrite text output");
opts.optflag("", "trace", "Enable logging in render cycle");
opts.optflag(
"",
"srgb",
"Pretend that backbuffer uses sRGB (doesn't seem to affect anything)",
);
opts.optopt("o", "objects", "Number of cubes", "NUM");
opts.optopt("t", "", "Number of state update threads", "NUM");
opts.optopt("b", "", "Number of backbuffers", "NUM");
opts.optopt("s", "", "Time speed multiplier", "FLOAT");
opts.optopt("", "fov", "Vertical FOV", "FLOAT");
let ms = opts.parse(env::args().skip(1))?;
// help
if ms.opt_present("h") {
print_usage(&program_name, opts);
return Ok(());
};
// adapters to use
for anum in &ms.free {
let n = anum.parse::<u32>()
.with_context(|_| format!("Unrecognized command line argument \"{}\"", anum))?;
adapters_to_test.push(n);
}
// adapters info
if ms.opt_present("i") {
adapters_info = true;
};
// use f32 color
if ms.opt_present("f32-color") {
parms.rt_format = DXGI_FORMAT_R32G32B32A32_FLOAT;
};
// enable sRGB
if ms.opt_present("srgb") {
parms.enable_srgb = true;
};
if ms.opt_present("d") {
parms.debug_layer = true;
};
if ms.opt_present("nodebug") {
parms.debug_layer = false;
};
if ms.opt_present("notext") {
parms.render_text = false;
};
if ms.opt_present("trace") {
parms.render_trace = true;
};
// object count
if let Some(num) = ms.opt_str("o") {
parms.object_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Object count in -o should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("t") {
parms.thread_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Thread count in -t should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("b") {
parms.backbuffer_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Backbuffer count in -b should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("s") {
parms.speed_mult = parse_f32_range(&num, Inc(0.), Inc(::std::f32::INFINITY))
.with_context(|_| {
format!(
"Time speed multiplier in -s should be non-negative floating number, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("fov") {
parms.fovy_deg = parse_f32_range(&num, Exc(0.), Exc(180.)).with_context(|_| {
format!(
"FOV in --fov should be floating number in (0., 180.) range, not '{}'",
num
)
})?;
};
let factory: DXGIFactory4 = match create_dxgi_factory2(parms.debug_layer) {
Ok(fact) => fact,
Err(hr) => bail!("Cannot create DXGIFactory4 (0x{:x}). No can do. {}", hr, utils::hr2msg(hr)),
};
let mut adapters = vec![];
// Iterate over available GPUs
for (i, adapter) in &factory {
let descr = adapter.get_desc1().unwrap();
println!(
"Adapter {}: {}",
i,
wchar_array_to_string_lossy(&descr.Description)
);
println!(
" Dedicated video memory: {}MiB",
descr.DedicatedVideoMemory / 1024 / 1024
);
if adapters_to_test.is_empty() || adapters_to_test[..].contains(&i) {
// If there's no numbers in command line add each and every available adapter,
// otherwise use adapter numbers from command line
if adapters_info {
print_adapter_info(&adapter);
}
adapters.push(adapter);
}
}
if adapters_info {
return Ok(());
}
// I used this mutex to sync console output.
let mutex = Arc::new(Mutex::new(()));
crossbeam::scope(|scope| {
//// d2d1 test window
// scope.spawn(|| {
// if let Err(err) = d2d1test::main() {
// error!("d2d1test::main() error 0x{:x}", err);
// }
// });
for (id, a) in adapters.into_iter().enumerate() {
let mutex = mutex.clone();
let parms = &parms;
let dxgi_factory = factory.clone();
scope.spawn(move || {
// Spawn a thread for each adapter
match main_prime(id, dxgi_factory, a, mutex, parms) {
Err(err) => {
let stdout = ::std::io::stdout();
let mut handle = stdout.lock();
for c in err.causes() {
let _ = writeln!(handle, "{}", c);
}
}
_ => (),
}
});
}
});
seh::uninstall_ctrl_c_handler();
Ok(())
}
fn print_adapter_info(adapter: &DXGIAdapter1) {
if let Ok(dev) = d3d12_create_device(Some(adapter), D3D_FEATURE_LEVEL_11_0) {
if let Ok(data) = dev.check_feature_support_virtual_address() {
println!(" {:#?}", data);
}
if let Ok(data) = dev.check_feature_support_options() {
println!(" {:#?}", data);
}
let rdesc = D3D12_RESOURCE_DESC {
Dimension: D3D12_RESOURCE_DIMENSION_TEXTURE1D,
Alignment: 0,
Width: 1024,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 10,
Format: DXGI_FORMAT_R32G32B32A32_FLOAT,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: D3D12_TEXTURE_LAYOUT_UNKNOWN,
Flags: D3D12_RESOURCE_FLAG_NONE,
};
let rai = dev.get_resource_allocation_info(0, &[rdesc]);
println!(" {:#?}", rai);
}
}
const VK_A: i32 = b'A' as i32;
const VK_S: i32 = b'S' as i32;
const VK_D: i32 = b'D' as i32;
const VK_F: i32 = b'F' as i32;
const VK_Q: i32 = b'Q' as i32;
const VK_W: i32 = b'W' as i32;
const VK_E: i32 = b'E' as i32;
const VK_R: i32 = b'R' as i32;
const VK_P: i32 = b'P' as i32;
const KEYS_LEN: usize = 256;
struct KeyState {
keys: [bool; KEYS_LEN],
}
impl KeyState {
pub fn set(&mut self, vk: i32) {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize] = true;
}
}
pub fn unset(&mut self, vk: i32) {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize] = false;
}
}
pub fn pressed(&self, vk: i32) -> bool {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize]
} else {
false
}
}
}
impl Default for KeyState {
fn default() -> KeyState {
KeyState {
keys: [false; KEYS_LEN],
}
}
}
fn main_prime(
id: usize,
dxgi_factory: DXGIFactory4,
adapter: DXGIAdapter1,
_mutex: Arc<Mutex<()>>,
parms: &CubeParms,
) -> Result<(), Error>
{
// Setup window. Currently window module supports only one window per thread.
let descr = wchar_array_to_string_lossy(&adapter.get_desc1().into_error_context("Cannot get adapter description")?.Description);
let title = format!("D3D12 Hello, rusty world! ({})", descr);
let wnd = create_window(&title, 512, 256);
let mut fps = 0.0;
// Initialization of cubes module data required to render all stuff
let data =
RefCell::new(
cubes::AppData::on_init(&wnd, &dxgi_factory, &adapter, parms)
.with_context(|_| format!("Adapter '{}' failed initialization", descr))?);
// x and y store last mouse coords from WM_MOUSEMOVE
let mut x: i32 = 0;
let mut y: i32 = 0;
let mut keys = KeyState::default();
// state of left mouse button
let mut mouse_down = false;
let mut pause = false;
// Profiling stuff
let mut start = time::precise_time_s();
let mut prev_frame_time = time::precise_time_s();
// Window::poll_events() returns non-blocking iterator, which returns Option<MSG>
for mmsg in wnd.poll_events() {
// "if let Some(msg)" extracts msg from mmsg
// if mmsg is None, then 'else' branch is taken
if let Some(msg) = mmsg {
//trace!("{:?} {} {:x} {:x}", msg.time, wmtext::wm_str(msg.message), msg.wParam, msg.lParam);
// Instead of passing messages into cubes module, I process them here
// It is not well-thought-out design decision, it's just slightly simpler now, and cost of changing it is low.
match msg.message {
// Usual message processing stuff
WM_SIZE => {
// Normally this message goes to wndproc, in window.rs I repost it into message queue to prevent reentrancy problems
debug!("WM_SIZE {}, {} ", msg.wParam, msg.lParam);
data.borrow_mut().on_resize(
LOWORD(msg.lParam as u32) as u32,
HIWORD(msg.lParam as u32) as u32,
msg.wParam as u32,
);
}
WM_MOUSEMOVE => {
let x1 = GET_X_LPARAM(msg.lParam) as i32;
let y1 = GET_Y_LPARAM(msg.lParam) as i32;
let (dx, dy) = (x1 - x, y - y1);
x = x1;
y = y1;
if mouse_down {
let mut data = data.borrow_mut();
let camera = data.camera();
let fov = camera.fov();
camera.roty(dy as f32 * fov / 300.);
camera.rotx(-dx as f32 * fov / 300.);
}
}
WM_MOUSEWHEEL => {
let dz = (GET_WHEEL_DELTA_WPARAM(msg.wParam) as f32) / (WHEEL_DELTA as f32);
let mut data = data.borrow_mut();
let camera = data.camera();
let mut vfov = camera.fov();
if vfov < 5.0 {
vfov -= dz * 0.2;
} else {
vfov -= dz * 2.0;
};
vfov = f32::max(vfov, 0.2);
vfov = f32::min(vfov, 120.);
camera.set_fov(vfov);
}
WM_KEYDOWN => {
keys.set(msg.wParam as i32);
}
WM_KEYUP => {
let vk = msg.wParam as i32;
keys.unset(vk);
if vk == VK_P {
pause =!pause;
};
}
WM_LBUTTONDOWN => {
mouse_down = true;
set_capture(wnd.get_hwnd());
}
WM_LBUTTONUP => {
mouse_down = false;
release_capture();
}
_ => {}
};
} else {
// There's no pending window message.
let do_not_render = data.borrow().is_minimized();
if do_not_render {
// MSDN suggest to use MsgWaitForMultipleObjects here, but 10ms sleep shouldn't create problems
std::thread::sleep(Duration::from_millis(10));
} else {
let cur_frame_time = time::precise_time_s();
let frame_dt = cur_frame_time - prev_frame_time;
prev_frame_time = cur_frame_time;
// data is Rc<RefCell<cubes::AppData>>
// Rc is not really needed. I didn't pass it around.
// Take a mutable reference to cubes::AppData
let mut data = data.borrow_mut();
{
// Process WASD keys
let camera = data.camera();
camera.restore_up((180. * frame_dt) as f32);
let step = match (keys.pressed(VK_SHIFT), keys.pressed(VK_CONTROL)) {
(false, false) => 0.1,
(true, false) => 1.0,
(false, true) => 0.01,
(true, true) => 0.001,
};
if keys.pressed(VK_W) {
camera.go(step, 0., 0.);
};
if keys.pressed(VK_S) {
camera.go(-step, 0., 0.);
};
if keys.pressed(VK_A) {
camera.go(0., -step, 0.);
};
if keys.pressed(VK_D) {
camera.go(0., step, 0.);
};
if keys.pressed(VK_R) {
camera.go(0., 0., step);
};
if keys.pressed(VK_F) {
camera.go(0., 0., -step);
};
// Process Q and E keys. They control camera's roll
if keys.pressed(VK_Q) {
camera.rotz(-step);
| {
let n = s.parse()?;
if n > 0 {
Ok(n)
} else {
Err(err_msg("number shouldn't be zero"))
}
} | identifier_body |
|
main.rs | else {
Err(err_msg("number shouldn't be zero"))
}
}
#[derive(Debug, Copy, Clone)]
enum IncExc {
Inc(f32),
Exc(f32),
}
use IncExc::{Exc, Inc};
fn parse_f32_range(s: &str, mn: IncExc, mx: IncExc) -> Result<f32, Error> {
let x = s.parse()?;
let in_range = match (mn, mx) {
(Inc(mn), Inc(mx)) => mn <= x && x <= mx,
(Exc(mn), Inc(mx)) => mn < x && x <= mx,
(Inc(mn), Exc(mx)) => mn <= x && x < mx,
(Exc(mn), Exc(mx)) => mn < x && x < mx,
};
if in_range {
Ok(x)
} else {
Err(err_msg(format!(
"number {} not in range {:?}.. {:?}",
x, mn, mx
)))
}
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [0] [1] [N] [options]", program);
print!("{}", opts.usage(&brief));
}
fn main() {
match mainr() {
Err(err) => {
for c in err.causes() {
println!("{}", c);
}
//println!("{}", err.backtrace());
process::exit(101);
}
_ => (),
}
}
fn mainr() -> Result<(), Error> {
// install SEH guard to continue on continuable structured exception
let _seh_guard = seh::SehGuard::new();
// install ctrl-C handler to shutdown gracefully
seh::install_ctrl_c_handler();
// Initialize logger
env_logger::init().unwrap();
println!("sound::init()");
sound::init();
// Set default values of cubes module parameters
let mut parms = CubeParms {
thread_count: 2,
object_count: 2_000,
backbuffer_count: FRAME_COUNT,
speed_mult: 1.0,
concurrent_state_update: true, // unused
debug_layer: cfg!(debug_assertions),
rt_format: DXGI_FORMAT_R16G16B16A16_FLOAT,
enable_srgb: false,
render_trace: false,
fovy_deg: 60.,
render_text: true,
};
let mut adapters_to_test = vec![];
let mut adapters_info = false;
let program_name = env::args().next().unwrap().clone();
let mut opts = Options::new();
opts.optflag("h", "help", "Usage");
opts.optflag("i", "info", "Adapters information");
opts.optflag("", "f32-color", "Render using R32G32B32_FLOAT");
opts.optflag("d", "debug", "Enable debug layer");
opts.optflag("", "nodebug", "Disable debug layer");
opts.optflag("", "notext", "Disable DWrite text output");
opts.optflag("", "trace", "Enable logging in render cycle");
opts.optflag(
"",
"srgb",
"Pretend that backbuffer uses sRGB (doesn't seem to affect anything)",
);
opts.optopt("o", "objects", "Number of cubes", "NUM");
opts.optopt("t", "", "Number of state update threads", "NUM");
opts.optopt("b", "", "Number of backbuffers", "NUM");
opts.optopt("s", "", "Time speed multiplier", "FLOAT");
opts.optopt("", "fov", "Vertical FOV", "FLOAT");
let ms = opts.parse(env::args().skip(1))?;
// help
if ms.opt_present("h") {
print_usage(&program_name, opts);
return Ok(());
};
// adapters to use
for anum in &ms.free {
let n = anum.parse::<u32>()
.with_context(|_| format!("Unrecognized command line argument \"{}\"", anum))?;
adapters_to_test.push(n);
}
// adapters info
if ms.opt_present("i") {
adapters_info = true;
};
// use f32 color
if ms.opt_present("f32-color") {
parms.rt_format = DXGI_FORMAT_R32G32B32A32_FLOAT;
};
// enable sRGB
if ms.opt_present("srgb") {
parms.enable_srgb = true;
};
if ms.opt_present("d") {
parms.debug_layer = true;
};
if ms.opt_present("nodebug") {
parms.debug_layer = false;
};
if ms.opt_present("notext") {
parms.render_text = false;
};
if ms.opt_present("trace") {
parms.render_trace = true;
};
// object count
if let Some(num) = ms.opt_str("o") {
parms.object_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Object count in -o should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("t") {
parms.thread_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Thread count in -t should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("b") {
parms.backbuffer_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Backbuffer count in -b should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("s") {
parms.speed_mult = parse_f32_range(&num, Inc(0.), Inc(::std::f32::INFINITY))
.with_context(|_| {
format!(
"Time speed multiplier in -s should be non-negative floating number, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("fov") {
parms.fovy_deg = parse_f32_range(&num, Exc(0.), Exc(180.)).with_context(|_| {
format!(
"FOV in --fov should be floating number in (0., 180.) range, not '{}'",
num
)
})?;
};
let factory: DXGIFactory4 = match create_dxgi_factory2(parms.debug_layer) {
Ok(fact) => fact,
Err(hr) => bail!("Cannot create DXGIFactory4 (0x{:x}). No can do. {}", hr, utils::hr2msg(hr)),
};
let mut adapters = vec![];
// Iterate over available GPUs
for (i, adapter) in &factory {
let descr = adapter.get_desc1().unwrap();
println!(
"Adapter {}: {}",
i,
wchar_array_to_string_lossy(&descr.Description)
);
println!(
" Dedicated video memory: {}MiB",
descr.DedicatedVideoMemory / 1024 / 1024
);
if adapters_to_test.is_empty() || adapters_to_test[..].contains(&i) {
// If there's no numbers in command line add each and every available adapter,
// otherwise use adapter numbers from command line
if adapters_info {
print_adapter_info(&adapter);
}
adapters.push(adapter);
}
}
if adapters_info {
return Ok(());
}
// I used this mutex to sync console output.
let mutex = Arc::new(Mutex::new(()));
crossbeam::scope(|scope| {
//// d2d1 test window
// scope.spawn(|| {
// if let Err(err) = d2d1test::main() {
// error!("d2d1test::main() error 0x{:x}", err);
// }
// });
for (id, a) in adapters.into_iter().enumerate() {
let mutex = mutex.clone();
let parms = &parms;
let dxgi_factory = factory.clone();
scope.spawn(move || {
// Spawn a thread for each adapter
match main_prime(id, dxgi_factory, a, mutex, parms) {
Err(err) => {
let stdout = ::std::io::stdout();
let mut handle = stdout.lock();
for c in err.causes() {
let _ = writeln!(handle, "{}", c);
}
}
_ => (),
}
});
}
});
seh::uninstall_ctrl_c_handler();
Ok(())
}
fn print_adapter_info(adapter: &DXGIAdapter1) {
if let Ok(dev) = d3d12_create_device(Some(adapter), D3D_FEATURE_LEVEL_11_0) {
if let Ok(data) = dev.check_feature_support_virtual_address() {
println!(" {:#?}", data);
}
if let Ok(data) = dev.check_feature_support_options() {
println!(" {:#?}", data);
}
let rdesc = D3D12_RESOURCE_DESC {
Dimension: D3D12_RESOURCE_DIMENSION_TEXTURE1D,
Alignment: 0,
Width: 1024,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 10,
Format: DXGI_FORMAT_R32G32B32A32_FLOAT,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: D3D12_TEXTURE_LAYOUT_UNKNOWN,
Flags: D3D12_RESOURCE_FLAG_NONE,
};
let rai = dev.get_resource_allocation_info(0, &[rdesc]);
println!(" {:#?}", rai);
}
}
const VK_A: i32 = b'A' as i32;
const VK_S: i32 = b'S' as i32;
const VK_D: i32 = b'D' as i32;
const VK_F: i32 = b'F' as i32;
const VK_Q: i32 = b'Q' as i32;
const VK_W: i32 = b'W' as i32;
const VK_E: i32 = b'E' as i32;
const VK_R: i32 = b'R' as i32;
const VK_P: i32 = b'P' as i32;
const KEYS_LEN: usize = 256;
struct KeyState {
keys: [bool; KEYS_LEN],
}
impl KeyState {
pub fn set(&mut self, vk: i32) {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize] = true;
}
}
pub fn unset(&mut self, vk: i32) {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize] = false;
}
}
pub fn pressed(&self, vk: i32) -> bool {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize]
} else {
false
}
}
}
impl Default for KeyState {
fn default() -> KeyState {
KeyState {
keys: [false; KEYS_LEN],
}
}
}
fn main_prime(
id: usize,
dxgi_factory: DXGIFactory4,
adapter: DXGIAdapter1,
_mutex: Arc<Mutex<()>>,
parms: &CubeParms,
) -> Result<(), Error>
{
// Setup window. Currently window module supports only one window per thread.
let descr = wchar_array_to_string_lossy(&adapter.get_desc1().into_error_context("Cannot get adapter description")?.Description);
let title = format!("D3D12 Hello, rusty world! ({})", descr);
let wnd = create_window(&title, 512, 256);
let mut fps = 0.0;
// Initialization of cubes module data required to render all stuff
let data =
RefCell::new(
cubes::AppData::on_init(&wnd, &dxgi_factory, &adapter, parms)
.with_context(|_| format!("Adapter '{}' failed initialization", descr))?);
// x and y store last mouse coords from WM_MOUSEMOVE
let mut x: i32 = 0;
let mut y: i32 = 0;
let mut keys = KeyState::default();
// state of left mouse button
let mut mouse_down = false;
let mut pause = false;
// Profiling stuff
let mut start = time::precise_time_s();
let mut prev_frame_time = time::precise_time_s();
// Window::poll_events() returns non-blocking iterator, which returns Option<MSG>
for mmsg in wnd.poll_events() {
// "if let Some(msg)" extracts msg from mmsg
// if mmsg is None, then 'else' branch is taken
if let Some(msg) = mmsg {
//trace!("{:?} {} {:x} {:x}", msg.time, wmtext::wm_str(msg.message), msg.wParam, msg.lParam);
// Instead of passing messages into cubes module, I process them here
// It is not well-thought-out design decision, it's just slightly simpler now, and cost of changing it is low.
match msg.message {
// Usual message processing stuff
WM_SIZE => {
// Normally this message goes to wndproc, in window.rs I repost it into message queue to prevent reentrancy problems
debug!("WM_SIZE {}, {} ", msg.wParam, msg.lParam);
data.borrow_mut().on_resize(
LOWORD(msg.lParam as u32) as u32,
HIWORD(msg.lParam as u32) as u32,
msg.wParam as u32,
);
}
WM_MOUSEMOVE => {
let x1 = GET_X_LPARAM(msg.lParam) as i32;
let y1 = GET_Y_LPARAM(msg.lParam) as i32;
let (dx, dy) = (x1 - x, y - y1);
x = x1;
y = y1;
if mouse_down {
let mut data = data.borrow_mut();
let camera = data.camera();
let fov = camera.fov();
camera.roty(dy as f32 * fov / 300.);
camera.rotx(-dx as f32 * fov / 300.);
}
}
WM_MOUSEWHEEL => {
let dz = (GET_WHEEL_DELTA_WPARAM(msg.wParam) as f32) / (WHEEL_DELTA as f32);
let mut data = data.borrow_mut();
let camera = data.camera();
let mut vfov = camera.fov();
if vfov < 5.0 {
vfov -= dz * 0.2;
} else {
vfov -= dz * 2.0;
};
vfov = f32::max(vfov, 0.2);
vfov = f32::min(vfov, 120.);
camera.set_fov(vfov);
}
WM_KEYDOWN => {
keys.set(msg.wParam as i32);
}
WM_KEYUP => {
let vk = msg.wParam as i32;
keys.unset(vk);
if vk == VK_P {
pause =!pause;
};
}
WM_LBUTTONDOWN => {
mouse_down = true;
set_capture(wnd.get_hwnd());
}
WM_LBUTTONUP => {
mouse_down = false;
release_capture();
}
_ => {}
};
} else {
// There's no pending window message.
let do_not_render = data.borrow().is_minimized();
if do_not_render {
// MSDN suggest to use MsgWaitForMultipleObjects here, but 10ms sleep shouldn't create problems
std::thread::sleep(Duration::from_millis(10));
} else {
let cur_frame_time = time::precise_time_s();
let frame_dt = cur_frame_time - prev_frame_time;
prev_frame_time = cur_frame_time;
// data is Rc<RefCell<cubes::AppData>>
// Rc is not really needed. I didn't pass it around.
// Take a mutable reference to cubes::AppData
let mut data = data.borrow_mut();
{
// Process WASD keys
let camera = data.camera();
camera.restore_up((180. * frame_dt) as f32);
let step = match (keys.pressed(VK_SHIFT), keys.pressed(VK_CONTROL)) {
(false, false) => 0.1,
(true, false) => 1.0,
(false, true) => 0.01,
(true, true) => 0.001,
};
if keys.pressed(VK_W) {
camera.go(step, 0., 0.);
};
if keys.pressed(VK_S) {
camera.go(-step, 0., 0.);
};
if keys.pressed(VK_A) {
camera.go(0., -step, 0.);
};
if keys.pressed(VK_D) {
camera.go(0., step, 0.);
};
if keys.pressed(VK_R) {
camera.go(0., 0., step);
};
if keys.pressed(VK_F) {
camera.go(0., 0., -step);
};
// Process Q and E keys. They control camera's roll
if keys.pressed(VK_Q) {
camera.rotz(-step);
};
| {
Ok(n)
} | conditional_block |
|
main.rs | '",
num
)
})?;
};
if let Some(num) = ms.opt_str("t") {
parms.thread_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Thread count in -t should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("b") {
parms.backbuffer_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Backbuffer count in -b should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("s") {
parms.speed_mult = parse_f32_range(&num, Inc(0.), Inc(::std::f32::INFINITY))
.with_context(|_| {
format!(
"Time speed multiplier in -s should be non-negative floating number, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("fov") {
parms.fovy_deg = parse_f32_range(&num, Exc(0.), Exc(180.)).with_context(|_| {
format!(
"FOV in --fov should be floating number in (0., 180.) range, not '{}'",
num
)
})?;
};
let factory: DXGIFactory4 = match create_dxgi_factory2(parms.debug_layer) {
Ok(fact) => fact,
Err(hr) => bail!("Cannot create DXGIFactory4 (0x{:x}). No can do. {}", hr, utils::hr2msg(hr)),
};
let mut adapters = vec![];
// Iterate over available GPUs
for (i, adapter) in &factory {
let descr = adapter.get_desc1().unwrap();
println!(
"Adapter {}: {}",
i,
wchar_array_to_string_lossy(&descr.Description)
);
println!(
" Dedicated video memory: {}MiB",
descr.DedicatedVideoMemory / 1024 / 1024
);
if adapters_to_test.is_empty() || adapters_to_test[..].contains(&i) {
// If there's no numbers in command line add each and every available adapter,
// otherwise use adapter numbers from command line
if adapters_info {
print_adapter_info(&adapter);
}
adapters.push(adapter);
}
}
if adapters_info {
return Ok(());
}
// I used this mutex to sync console output.
let mutex = Arc::new(Mutex::new(()));
crossbeam::scope(|scope| {
//// d2d1 test window
// scope.spawn(|| {
// if let Err(err) = d2d1test::main() {
// error!("d2d1test::main() error 0x{:x}", err);
// }
// });
for (id, a) in adapters.into_iter().enumerate() {
let mutex = mutex.clone();
let parms = &parms;
let dxgi_factory = factory.clone();
scope.spawn(move || {
// Spawn a thread for each adapter
match main_prime(id, dxgi_factory, a, mutex, parms) {
Err(err) => {
let stdout = ::std::io::stdout();
let mut handle = stdout.lock();
for c in err.causes() {
let _ = writeln!(handle, "{}", c);
}
}
_ => (),
}
});
}
});
seh::uninstall_ctrl_c_handler();
Ok(())
}
fn print_adapter_info(adapter: &DXGIAdapter1) {
if let Ok(dev) = d3d12_create_device(Some(adapter), D3D_FEATURE_LEVEL_11_0) {
if let Ok(data) = dev.check_feature_support_virtual_address() {
println!(" {:#?}", data);
}
if let Ok(data) = dev.check_feature_support_options() {
println!(" {:#?}", data);
}
let rdesc = D3D12_RESOURCE_DESC {
Dimension: D3D12_RESOURCE_DIMENSION_TEXTURE1D,
Alignment: 0,
Width: 1024,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 10,
Format: DXGI_FORMAT_R32G32B32A32_FLOAT,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: D3D12_TEXTURE_LAYOUT_UNKNOWN,
Flags: D3D12_RESOURCE_FLAG_NONE,
};
let rai = dev.get_resource_allocation_info(0, &[rdesc]);
println!(" {:#?}", rai);
}
}
const VK_A: i32 = b'A' as i32;
const VK_S: i32 = b'S' as i32;
const VK_D: i32 = b'D' as i32;
const VK_F: i32 = b'F' as i32;
const VK_Q: i32 = b'Q' as i32;
const VK_W: i32 = b'W' as i32;
const VK_E: i32 = b'E' as i32;
const VK_R: i32 = b'R' as i32;
const VK_P: i32 = b'P' as i32;
const KEYS_LEN: usize = 256;
struct KeyState {
keys: [bool; KEYS_LEN],
}
impl KeyState {
pub fn set(&mut self, vk: i32) {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize] = true;
}
}
pub fn unset(&mut self, vk: i32) {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize] = false;
}
}
pub fn pressed(&self, vk: i32) -> bool {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize]
} else {
false
}
}
}
impl Default for KeyState {
fn default() -> KeyState {
KeyState {
keys: [false; KEYS_LEN],
}
}
}
fn main_prime(
id: usize,
dxgi_factory: DXGIFactory4,
adapter: DXGIAdapter1,
_mutex: Arc<Mutex<()>>,
parms: &CubeParms,
) -> Result<(), Error>
{
// Setup window. Currently window module supports only one window per thread.
let descr = wchar_array_to_string_lossy(&adapter.get_desc1().into_error_context("Cannot get adapter description")?.Description);
let title = format!("D3D12 Hello, rusty world! ({})", descr);
let wnd = create_window(&title, 512, 256);
let mut fps = 0.0;
// Initialization of cubes module data required to render all stuff
let data =
RefCell::new(
cubes::AppData::on_init(&wnd, &dxgi_factory, &adapter, parms)
.with_context(|_| format!("Adapter '{}' failed initialization", descr))?);
// x and y store last mouse coords from WM_MOUSEMOVE
let mut x: i32 = 0;
let mut y: i32 = 0;
let mut keys = KeyState::default();
// state of left mouse button
let mut mouse_down = false;
let mut pause = false;
// Profiling stuff
let mut start = time::precise_time_s();
let mut prev_frame_time = time::precise_time_s();
// Window::poll_events() returns non-blocking iterator, which returns Option<MSG>
for mmsg in wnd.poll_events() {
// "if let Some(msg)" extracts msg from mmsg
// if mmsg is None, then 'else' branch is taken
if let Some(msg) = mmsg {
//trace!("{:?} {} {:x} {:x}", msg.time, wmtext::wm_str(msg.message), msg.wParam, msg.lParam);
// Instead of passing messages into cubes module, I process them here
// It is not well-thought-out design decision, it's just slightly simpler now, and cost of changing it is low.
match msg.message {
// Usual message processing stuff
WM_SIZE => {
// Normally this message goes to wndproc, in window.rs I repost it into message queue to prevent reentrancy problems
debug!("WM_SIZE {}, {} ", msg.wParam, msg.lParam);
data.borrow_mut().on_resize(
LOWORD(msg.lParam as u32) as u32,
HIWORD(msg.lParam as u32) as u32,
msg.wParam as u32,
);
}
WM_MOUSEMOVE => {
let x1 = GET_X_LPARAM(msg.lParam) as i32;
let y1 = GET_Y_LPARAM(msg.lParam) as i32;
let (dx, dy) = (x1 - x, y - y1);
x = x1;
y = y1;
if mouse_down {
let mut data = data.borrow_mut();
let camera = data.camera();
let fov = camera.fov();
camera.roty(dy as f32 * fov / 300.);
camera.rotx(-dx as f32 * fov / 300.);
}
}
WM_MOUSEWHEEL => {
let dz = (GET_WHEEL_DELTA_WPARAM(msg.wParam) as f32) / (WHEEL_DELTA as f32);
let mut data = data.borrow_mut();
let camera = data.camera();
let mut vfov = camera.fov();
if vfov < 5.0 {
vfov -= dz * 0.2;
} else {
vfov -= dz * 2.0;
};
vfov = f32::max(vfov, 0.2);
vfov = f32::min(vfov, 120.);
camera.set_fov(vfov);
}
WM_KEYDOWN => {
keys.set(msg.wParam as i32);
}
WM_KEYUP => {
let vk = msg.wParam as i32;
keys.unset(vk);
if vk == VK_P {
pause =!pause;
};
}
WM_LBUTTONDOWN => {
mouse_down = true;
set_capture(wnd.get_hwnd());
}
WM_LBUTTONUP => {
mouse_down = false;
release_capture();
}
_ => {}
};
} else {
// There's no pending window message.
let do_not_render = data.borrow().is_minimized();
if do_not_render {
// MSDN suggest to use MsgWaitForMultipleObjects here, but 10ms sleep shouldn't create problems
std::thread::sleep(Duration::from_millis(10));
} else {
let cur_frame_time = time::precise_time_s();
let frame_dt = cur_frame_time - prev_frame_time;
prev_frame_time = cur_frame_time;
// data is Rc<RefCell<cubes::AppData>>
// Rc is not really needed. I didn't pass it around.
// Take a mutable reference to cubes::AppData
let mut data = data.borrow_mut();
{
// Process WASD keys
let camera = data.camera();
camera.restore_up((180. * frame_dt) as f32);
let step = match (keys.pressed(VK_SHIFT), keys.pressed(VK_CONTROL)) {
(false, false) => 0.1,
(true, false) => 1.0,
(false, true) => 0.01,
(true, true) => 0.001,
};
if keys.pressed(VK_W) {
camera.go(step, 0., 0.);
};
if keys.pressed(VK_S) {
camera.go(-step, 0., 0.);
};
if keys.pressed(VK_A) {
camera.go(0., -step, 0.);
};
if keys.pressed(VK_D) {
camera.go(0., step, 0.);
};
if keys.pressed(VK_R) {
camera.go(0., 0., step);
};
if keys.pressed(VK_F) {
camera.go(0., 0., -step);
};
// Process Q and E keys. They control camera's roll
if keys.pressed(VK_Q) {
camera.rotz(-step);
};
if keys.pressed(VK_E) {
camera.rotz(step);
};
}
// For this simple program I don't separate update and render steps.
// State change and rendering is done inside on_render.
::perf_start("total");
let render_dt = if frame_dt > 0.1 { 0.1 } else { frame_dt as f32 };
data.on_render(pause, fps, render_dt)?;
::perf_end("total");
// register rendered frame in performance collector
::perf_frame();
// fps counting stuff
let now = time::precise_time_s();
let frames = PERFDATA.with(|p_data| p_data.borrow().frames);
if frames > 0 && now < start || now >= (start + 1.0) {
// Once per second show stats
print!("Adapter {} FPS: {:4}", id, frames);
fps = frames as f32;
PERFDATA.with(|p_data| {
let p_data = p_data.borrow();
let frames = p_data.frames as f64;
for (&pname, val) in p_data
.perf
.iter()
.sorted_by(|&(&n1, _), &(&n2, _)| n1.cmp(n2))
{
print!(" {}:{:.2}", pname, val * 1000. / frames);
}
println!("");
});
let _ = ::std::io::Write::flush(&mut ::std::io::stdout());
perf_reset();
start = now;
} // stats
} // do not render
} // no window message
if seh::ctrl_c_is_triggered() {
break;
};
} // message loop
// Application should exit fullscreen state before terminating.
data.borrow()
.set_fullscreen(false)
.expect("Cannot exit fullscreen mode");
// wait for all GPU processing to stop
data.borrow().wait_frame();
// Save info_queue before final release of resources
let maybe_iq = data.borrow_mut().take_info_queue();
// release resources
drop(data);
// Let debug layer say its last words
if let Some(iq) = maybe_iq {
core::dump_info_queue(&iq);
};
Ok(())
}
// Simple render statistics collector
pub struct PerfData {
frames: u64,
perf: HashMap<&'static str, f64>,
}
impl PerfData {
fn new() -> PerfData {
PerfData {
frames: 0,
perf: HashMap::new(),
}
}
}
thread_local!(static PERFDATA: RefCell<PerfData> =
RefCell::new(PerfData::new()));
pub fn perf_frame() {
PERFDATA.with(|pd| {
pd.borrow_mut().frames += 1;
});
}
pub fn perf_reset() {
PERFDATA.with(|pd| {
let mut pd = pd.borrow_mut();
*pd = PerfData::new();
});
}
pub fn perf_start(name: &'static str) {
PERFDATA.with(|pd| {
*pd.borrow_mut().perf.entry(name).or_insert(0.) -= time::precise_time_s();
});
}
pub fn perf_end(name: &'static str) {
PERFDATA.with(|pd| {
*pd.borrow_mut().perf.get_mut(name).unwrap() += time::precise_time_s();
});
}
pub fn perf_clear_start() {
perf_start("clear");
}
pub fn perf_clear_end() {
perf_end("clear");
}
pub fn perf_fillbuf_start() {
perf_start("fillbuf");
}
pub fn perf_fillbuf_end() {
perf_end("fillbuf");
}
pub fn perf_exec_start() {
perf_start("exec");
}
pub fn perf_exec_end() {
perf_end("exec");
}
pub fn | perf_present_start | identifier_name |
|
main.rs | } else {
Err(err_msg("number shouldn't be zero"))
}
}
#[derive(Debug, Copy, Clone)]
enum IncExc {
Inc(f32),
Exc(f32),
}
use IncExc::{Exc, Inc};
fn parse_f32_range(s: &str, mn: IncExc, mx: IncExc) -> Result<f32, Error> {
let x = s.parse()?;
let in_range = match (mn, mx) {
(Inc(mn), Inc(mx)) => mn <= x && x <= mx,
(Exc(mn), Inc(mx)) => mn < x && x <= mx,
(Inc(mn), Exc(mx)) => mn <= x && x < mx,
(Exc(mn), Exc(mx)) => mn < x && x < mx,
};
if in_range {
Ok(x)
} else {
Err(err_msg(format!(
"number {} not in range {:?}.. {:?}",
x, mn, mx
)))
}
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} [0] [1] [N] [options]", program);
print!("{}", opts.usage(&brief));
}
fn main() {
match mainr() {
Err(err) => {
for c in err.causes() {
println!("{}", c);
}
//println!("{}", err.backtrace());
process::exit(101);
}
_ => (),
}
}
fn mainr() -> Result<(), Error> {
// install SEH guard to continue on continuable structured exception
let _seh_guard = seh::SehGuard::new();
// install ctrl-C handler to shutdown gracefully
seh::install_ctrl_c_handler();
// Initialize logger
env_logger::init().unwrap();
println!("sound::init()");
sound::init();
// Set default values of cubes module parameters
let mut parms = CubeParms {
thread_count: 2,
object_count: 2_000,
backbuffer_count: FRAME_COUNT,
speed_mult: 1.0,
concurrent_state_update: true, // unused
debug_layer: cfg!(debug_assertions),
rt_format: DXGI_FORMAT_R16G16B16A16_FLOAT,
enable_srgb: false,
render_trace: false,
fovy_deg: 60.,
render_text: true,
};
let mut adapters_to_test = vec![];
let mut adapters_info = false;
let program_name = env::args().next().unwrap().clone();
let mut opts = Options::new();
opts.optflag("h", "help", "Usage");
opts.optflag("i", "info", "Adapters information");
opts.optflag("", "f32-color", "Render using R32G32B32_FLOAT");
opts.optflag("d", "debug", "Enable debug layer");
opts.optflag("", "nodebug", "Disable debug layer");
opts.optflag("", "notext", "Disable DWrite text output");
opts.optflag("", "trace", "Enable logging in render cycle");
opts.optflag(
"",
"srgb",
"Pretend that backbuffer uses sRGB (doesn't seem to affect anything)",
);
opts.optopt("o", "objects", "Number of cubes", "NUM");
opts.optopt("t", "", "Number of state update threads", "NUM");
opts.optopt("b", "", "Number of backbuffers", "NUM");
opts.optopt("s", "", "Time speed multiplier", "FLOAT");
opts.optopt("", "fov", "Vertical FOV", "FLOAT");
let ms = opts.parse(env::args().skip(1))?;
// help
if ms.opt_present("h") {
print_usage(&program_name, opts);
return Ok(());
};
// adapters to use
for anum in &ms.free {
let n = anum.parse::<u32>()
.with_context(|_| format!("Unrecognized command line argument \"{}\"", anum))?;
adapters_to_test.push(n);
}
// adapters info
if ms.opt_present("i") {
adapters_info = true;
};
// use f32 color
if ms.opt_present("f32-color") {
parms.rt_format = DXGI_FORMAT_R32G32B32A32_FLOAT;
};
// enable sRGB
if ms.opt_present("srgb") {
parms.enable_srgb = true;
};
if ms.opt_present("d") {
parms.debug_layer = true;
};
if ms.opt_present("nodebug") {
parms.debug_layer = false;
};
if ms.opt_present("notext") {
parms.render_text = false;
};
if ms.opt_present("trace") {
parms.render_trace = true;
};
// object count
if let Some(num) = ms.opt_str("o") {
parms.object_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Object count in -o should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("t") {
parms.thread_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Thread count in -t should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("b") {
parms.backbuffer_count = parse_non_zero_u32(&num).with_context(|_| {
format!(
"Backbuffer count in -b should be positive non-zero integer, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("s") {
parms.speed_mult = parse_f32_range(&num, Inc(0.), Inc(::std::f32::INFINITY))
.with_context(|_| {
format!(
"Time speed multiplier in -s should be non-negative floating number, not '{}'",
num
)
})?;
};
if let Some(num) = ms.opt_str("fov") {
parms.fovy_deg = parse_f32_range(&num, Exc(0.), Exc(180.)).with_context(|_| {
format!(
"FOV in --fov should be floating number in (0., 180.) range, not '{}'",
num
)
})?;
};
let factory: DXGIFactory4 = match create_dxgi_factory2(parms.debug_layer) {
Ok(fact) => fact,
Err(hr) => bail!("Cannot create DXGIFactory4 (0x{:x}). No can do. {}", hr, utils::hr2msg(hr)),
};
let mut adapters = vec![];
// Iterate over available GPUs
for (i, adapter) in &factory {
let descr = adapter.get_desc1().unwrap();
println!(
"Adapter {}: {}",
i,
wchar_array_to_string_lossy(&descr.Description)
);
println!(
" Dedicated video memory: {}MiB",
descr.DedicatedVideoMemory / 1024 / 1024
);
if adapters_to_test.is_empty() || adapters_to_test[..].contains(&i) {
// If there's no numbers in command line add each and every available adapter,
// otherwise use adapter numbers from command line
if adapters_info {
print_adapter_info(&adapter);
}
adapters.push(adapter);
}
}
if adapters_info {
return Ok(());
}
// I used this mutex to sync console output.
let mutex = Arc::new(Mutex::new(()));
crossbeam::scope(|scope| {
//// d2d1 test window
// scope.spawn(|| {
// if let Err(err) = d2d1test::main() {
// error!("d2d1test::main() error 0x{:x}", err);
// }
// });
for (id, a) in adapters.into_iter().enumerate() {
let mutex = mutex.clone();
let parms = &parms;
let dxgi_factory = factory.clone();
scope.spawn(move || {
// Spawn a thread for each adapter
match main_prime(id, dxgi_factory, a, mutex, parms) {
Err(err) => {
let stdout = ::std::io::stdout();
let mut handle = stdout.lock();
for c in err.causes() {
let _ = writeln!(handle, "{}", c);
}
}
_ => (),
}
});
}
});
seh::uninstall_ctrl_c_handler();
Ok(())
}
fn print_adapter_info(adapter: &DXGIAdapter1) {
if let Ok(dev) = d3d12_create_device(Some(adapter), D3D_FEATURE_LEVEL_11_0) {
if let Ok(data) = dev.check_feature_support_virtual_address() {
println!(" {:#?}", data);
}
if let Ok(data) = dev.check_feature_support_options() {
println!(" {:#?}", data);
}
let rdesc = D3D12_RESOURCE_DESC {
Dimension: D3D12_RESOURCE_DIMENSION_TEXTURE1D,
Alignment: 0,
Width: 1024,
Height: 1,
DepthOrArraySize: 1,
MipLevels: 10,
Format: DXGI_FORMAT_R32G32B32A32_FLOAT,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
Layout: D3D12_TEXTURE_LAYOUT_UNKNOWN,
Flags: D3D12_RESOURCE_FLAG_NONE,
};
let rai = dev.get_resource_allocation_info(0, &[rdesc]);
println!(" {:#?}", rai);
}
}
const VK_A: i32 = b'A' as i32;
const VK_S: i32 = b'S' as i32;
const VK_D: i32 = b'D' as i32;
const VK_F: i32 = b'F' as i32;
const VK_Q: i32 = b'Q' as i32;
const VK_W: i32 = b'W' as i32;
const VK_E: i32 = b'E' as i32;
const VK_R: i32 = b'R' as i32;
const VK_P: i32 = b'P' as i32;
const KEYS_LEN: usize = 256;
struct KeyState {
keys: [bool; KEYS_LEN],
}
impl KeyState {
pub fn set(&mut self, vk: i32) {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize] = true;
}
}
pub fn unset(&mut self, vk: i32) {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize] = false;
}
}
pub fn pressed(&self, vk: i32) -> bool {
if vk >= 0 && vk < self.keys.len() as i32 {
self.keys[vk as usize]
} else {
false
}
}
}
impl Default for KeyState {
fn default() -> KeyState {
KeyState {
keys: [false; KEYS_LEN],
}
}
}
fn main_prime(
id: usize,
dxgi_factory: DXGIFactory4,
adapter: DXGIAdapter1,
_mutex: Arc<Mutex<()>>,
parms: &CubeParms,
) -> Result<(), Error>
{
// Setup window. Currently window module supports only one window per thread.
let descr = wchar_array_to_string_lossy(&adapter.get_desc1().into_error_context("Cannot get adapter description")?.Description);
let title = format!("D3D12 Hello, rusty world! ({})", descr);
let wnd = create_window(&title, 512, 256);
let mut fps = 0.0;
// Initialization of cubes module data required to render all stuff
let data =
RefCell::new(
cubes::AppData::on_init(&wnd, &dxgi_factory, &adapter, parms)
.with_context(|_| format!("Adapter '{}' failed initialization", descr))?);
// x and y store last mouse coords from WM_MOUSEMOVE
let mut x: i32 = 0;
let mut y: i32 = 0;
let mut keys = KeyState::default();
// state of left mouse button
let mut mouse_down = false;
let mut pause = false;
// Profiling stuff
let mut start = time::precise_time_s();
let mut prev_frame_time = time::precise_time_s();
// Window::poll_events() returns non-blocking iterator, which returns Option<MSG>
for mmsg in wnd.poll_events() {
// "if let Some(msg)" extracts msg from mmsg
// if mmsg is None, then 'else' branch is taken
if let Some(msg) = mmsg {
//trace!("{:?} {} {:x} {:x}", msg.time, wmtext::wm_str(msg.message), msg.wParam, msg.lParam);
// Instead of passing messages into cubes module, I process them here
// It is not well-thought-out design decision, it's just slightly simpler now, and cost of changing it is low.
match msg.message {
// Usual message processing stuff
WM_SIZE => {
// Normally this message goes to wndproc, in window.rs I repost it into message queue to prevent reentrancy problems
debug!("WM_SIZE {}, {} ", msg.wParam, msg.lParam);
data.borrow_mut().on_resize(
LOWORD(msg.lParam as u32) as u32,
HIWORD(msg.lParam as u32) as u32,
msg.wParam as u32,
);
}
WM_MOUSEMOVE => {
let x1 = GET_X_LPARAM(msg.lParam) as i32;
let y1 = GET_Y_LPARAM(msg.lParam) as i32;
let (dx, dy) = (x1 - x, y - y1);
x = x1;
y = y1;
if mouse_down {
let mut data = data.borrow_mut();
let camera = data.camera();
let fov = camera.fov();
camera.roty(dy as f32 * fov / 300.);
camera.rotx(-dx as f32 * fov / 300.);
}
}
WM_MOUSEWHEEL => {
let dz = (GET_WHEEL_DELTA_WPARAM(msg.wParam) as f32) / (WHEEL_DELTA as f32);
let mut data = data.borrow_mut();
let camera = data.camera();
let mut vfov = camera.fov();
if vfov < 5.0 {
vfov -= dz * 0.2;
} else {
vfov -= dz * 2.0;
};
vfov = f32::max(vfov, 0.2);
vfov = f32::min(vfov, 120.);
camera.set_fov(vfov);
}
WM_KEYDOWN => {
keys.set(msg.wParam as i32);
}
WM_KEYUP => {
let vk = msg.wParam as i32;
keys.unset(vk);
if vk == VK_P {
pause =!pause;
};
}
WM_LBUTTONDOWN => {
mouse_down = true;
set_capture(wnd.get_hwnd());
}
WM_LBUTTONUP => {
mouse_down = false;
release_capture();
}
_ => {}
};
} else {
// There's no pending window message.
let do_not_render = data.borrow().is_minimized();
if do_not_render {
// MSDN suggest to use MsgWaitForMultipleObjects here, but 10ms sleep shouldn't create problems
std::thread::sleep(Duration::from_millis(10));
} else {
let cur_frame_time = time::precise_time_s();
let frame_dt = cur_frame_time - prev_frame_time;
prev_frame_time = cur_frame_time;
// data is Rc<RefCell<cubes::AppData>>
// Rc is not really needed. I didn't pass it around.
// Take a mutable reference to cubes::AppData
let mut data = data.borrow_mut();
{
// Process WASD keys
let camera = data.camera();
camera.restore_up((180. * frame_dt) as f32);
let step = match (keys.pressed(VK_SHIFT), keys.pressed(VK_CONTROL)) {
(false, false) => 0.1,
(true, false) => 1.0,
(false, true) => 0.01,
(true, true) => 0.001,
};
if keys.pressed(VK_W) {
camera.go(step, 0., 0.);
};
if keys.pressed(VK_S) {
camera.go(-step, 0., 0.);
};
if keys.pressed(VK_A) {
camera.go(0., -step, 0.);
};
if keys.pressed(VK_D) {
camera.go(0., step, 0.);
};
if keys.pressed(VK_R) {
camera.go(0., 0., step);
};
if keys.pressed(VK_F) {
camera.go(0., 0., -step);
};
// Process Q and E keys. They control camera's roll
if keys.pressed(VK_Q) {
camera.ro | fn parse_non_zero_u32(s: &str) -> Result<u32, Error> {
let n = s.parse()?;
if n > 0 {
Ok(n) | random_line_split |
|
integrator.rs | use core::camera::Camera;
use core::color::Color;
use core::image::Image;
use core::interaction::Intersectable;
use core::ray::Ray;
use core::scene::Scene;
use core::spectrum::Spectrum;
// ------------------------
// Integrator
// ------------------------
pub trait Integrator {
// TODO: Should return a Film?
fn render(&self, scene: &Scene, camera: &Camera) -> Box<Image>;
}
// ------------------------
// SamplerIntegrator
// ------------------------
pub struct SamplerIntegrator {
// TODO: Aggregates a sampler!
}
impl SamplerIntegrator {
pub fn new() -> SamplerIntegrator {
SamplerIntegrator {}
}
fn | (ray: &Ray, scene: &Scene) -> Spectrum {
match scene.intersect(ray) {
Some(x) => {
// let y = x / 8.0;
let y = 1.0;
Spectrum::new(&Color::new(y, y, y, y))
},
// No intersection, return background radiance.
None => scene.lights().iter().fold(
Spectrum::default(),
|memo, ref x| memo + x.le(ray)
)
}
}
}
impl Integrator for SamplerIntegrator {
fn render(&self, scene: &Scene, camera: &Camera) -> Box<Image> {
let width = 800;
let height = 600;
// multithreaded for each tile
// for each sample, ray & differentials
// let camera_sample = sampler.get_camera_sample()
// let ray & diff = camera.gen(camera_sample)
let mut img = Box::new(Image::new(width, height));
for row in 0..img.height() {
for col in 0..img.width() {
let ray = camera.generate_ray(col, row);
let res = Self::li(&ray, scene);
img.set(col, row, &res.to_color());
}
}
img
}
}
| li | identifier_name |
integrator.rs | use core::camera::Camera;
use core::color::Color;
use core::image::Image;
use core::interaction::Intersectable;
use core::ray::Ray;
use core::scene::Scene;
use core::spectrum::Spectrum;
// ------------------------
// Integrator
// ------------------------
pub trait Integrator {
// TODO: Should return a Film?
fn render(&self, scene: &Scene, camera: &Camera) -> Box<Image>;
}
// ------------------------
// SamplerIntegrator
// ------------------------
pub struct SamplerIntegrator {
// TODO: Aggregates a sampler!
}
impl SamplerIntegrator {
pub fn new() -> SamplerIntegrator {
SamplerIntegrator {}
}
fn li(ray: &Ray, scene: &Scene) -> Spectrum {
match scene.intersect(ray) {
Some(x) => {
// let y = x / 8.0;
let y = 1.0;
Spectrum::new(&Color::new(y, y, y, y))
},
// No intersection, return background radiance.
None => scene.lights().iter().fold(
Spectrum::default(),
|memo, ref x| memo + x.le(ray)
)
}
}
}
impl Integrator for SamplerIntegrator {
fn render(&self, scene: &Scene, camera: &Camera) -> Box<Image> {
let width = 800;
let height = 600;
// multithreaded for each tile
// for each sample, ray & differentials
// let camera_sample = sampler.get_camera_sample()
// let ray & diff = camera.gen(camera_sample)
let mut img = Box::new(Image::new(width, height));
for row in 0..img.height() {
for col in 0..img.width() {
let ray = camera.generate_ray(col, row);
let res = Self::li(&ray, scene);
img.set(col, row, &res.to_color());
}
} |
img
}
} | random_line_split |
|
integrator.rs | use core::camera::Camera;
use core::color::Color;
use core::image::Image;
use core::interaction::Intersectable;
use core::ray::Ray;
use core::scene::Scene;
use core::spectrum::Spectrum;
// ------------------------
// Integrator
// ------------------------
pub trait Integrator {
// TODO: Should return a Film?
fn render(&self, scene: &Scene, camera: &Camera) -> Box<Image>;
}
// ------------------------
// SamplerIntegrator
// ------------------------
pub struct SamplerIntegrator {
// TODO: Aggregates a sampler!
}
impl SamplerIntegrator {
pub fn new() -> SamplerIntegrator |
fn li(ray: &Ray, scene: &Scene) -> Spectrum {
match scene.intersect(ray) {
Some(x) => {
// let y = x / 8.0;
let y = 1.0;
Spectrum::new(&Color::new(y, y, y, y))
},
// No intersection, return background radiance.
None => scene.lights().iter().fold(
Spectrum::default(),
|memo, ref x| memo + x.le(ray)
)
}
}
}
impl Integrator for SamplerIntegrator {
fn render(&self, scene: &Scene, camera: &Camera) -> Box<Image> {
let width = 800;
let height = 600;
// multithreaded for each tile
// for each sample, ray & differentials
// let camera_sample = sampler.get_camera_sample()
// let ray & diff = camera.gen(camera_sample)
let mut img = Box::new(Image::new(width, height));
for row in 0..img.height() {
for col in 0..img.width() {
let ray = camera.generate_ray(col, row);
let res = Self::li(&ray, scene);
img.set(col, row, &res.to_color());
}
}
img
}
}
| {
SamplerIntegrator {}
} | identifier_body |
packed-struct-generic-size.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
use std::mem;
#[repr(packed)]
struct S<T, S> {
a: T,
b: u8,
c: S
}
pub fn | () {
assert_eq!(mem::size_of::<S<u8, u8>>(), 3);
assert_eq!(mem::size_of::<S<u64, u16>>(), 11);
}
| main | identifier_name |
packed-struct-generic-size.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
use std::mem;
#[repr(packed)]
struct S<T, S> {
a: T,
b: u8,
c: S
}
pub fn main() | {
assert_eq!(mem::size_of::<S<u8, u8>>(), 3);
assert_eq!(mem::size_of::<S<u64, u16>>(), 11);
} | identifier_body |
|
packed-struct-generic-size.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
use std::mem;
#[repr(packed)]
struct S<T, S> {
a: T,
b: u8, |
assert_eq!(mem::size_of::<S<u64, u16>>(), 11);
} | c: S
}
pub fn main() {
assert_eq!(mem::size_of::<S<u8, u8>>(), 3); | random_line_split |
debug.rs | /*
* Rust BareBones OS
* - By John Hodge (Mutabah/thePowersGang)
*
* arch/x86/debug.rs
* - Debug output channel
*
* Writes debug to the standard PC serial port (0x3F8.. 0x3FF)
*
* == LICENCE ==
* This code has been put into the public domain, there are no restrictions on
* its use, and the author takes no liability.
*/
| {
for b in s.bytes()
{
putb(b);
}
}
/// Write a single byte to the output channel
///
/// This method is unsafe because it does port accesses without synchronisation
pub unsafe fn putb(b: u8)
{
// Wait for the serial port's fifo to not be empty
while (::arch::x86_io::inb(0x3F8+5) & 0x20) == 0
{
// Do nothing
}
// Send the byte out the serial port
::arch::x86_io::outb(0x3F8, b);
// Also send to the bochs 0xe9 hack
::arch::x86_io::outb(0xe9, b);
} | /// Write a string to the output channel
///
/// This method is unsafe because it does port accesses without synchronisation
pub unsafe fn puts(s: &str) | random_line_split |
debug.rs | /*
* Rust BareBones OS
* - By John Hodge (Mutabah/thePowersGang)
*
* arch/x86/debug.rs
* - Debug output channel
*
* Writes debug to the standard PC serial port (0x3F8.. 0x3FF)
*
* == LICENCE ==
* This code has been put into the public domain, there are no restrictions on
* its use, and the author takes no liability.
*/
/// Write a string to the output channel
///
/// This method is unsafe because it does port accesses without synchronisation
pub unsafe fn | (s: &str)
{
for b in s.bytes()
{
putb(b);
}
}
/// Write a single byte to the output channel
///
/// This method is unsafe because it does port accesses without synchronisation
pub unsafe fn putb(b: u8)
{
// Wait for the serial port's fifo to not be empty
while (::arch::x86_io::inb(0x3F8+5) & 0x20) == 0
{
// Do nothing
}
// Send the byte out the serial port
::arch::x86_io::outb(0x3F8, b);
// Also send to the bochs 0xe9 hack
::arch::x86_io::outb(0xe9, b);
}
| puts | identifier_name |
debug.rs | /*
* Rust BareBones OS
* - By John Hodge (Mutabah/thePowersGang)
*
* arch/x86/debug.rs
* - Debug output channel
*
* Writes debug to the standard PC serial port (0x3F8.. 0x3FF)
*
* == LICENCE ==
* This code has been put into the public domain, there are no restrictions on
* its use, and the author takes no liability.
*/
/// Write a string to the output channel
///
/// This method is unsafe because it does port accesses without synchronisation
pub unsafe fn puts(s: &str)
|
/// Write a single byte to the output channel
///
/// This method is unsafe because it does port accesses without synchronisation
pub unsafe fn putb(b: u8)
{
// Wait for the serial port's fifo to not be empty
while (::arch::x86_io::inb(0x3F8+5) & 0x20) == 0
{
// Do nothing
}
// Send the byte out the serial port
::arch::x86_io::outb(0x3F8, b);
// Also send to the bochs 0xe9 hack
::arch::x86_io::outb(0xe9, b);
}
| {
for b in s.bytes()
{
putb(b);
}
} | identifier_body |
error.rs | use std;
use std::fmt;
use std::ops::Deref;
/// Error type
#[derive(Debug)]
pub struct Error {
// Description
what: String, | // Cause
cause: Option<Box<std::error::Error + Sized>>,
}
impl Error {
/// Construct `Error` with message only
pub fn new<S>(what: S) -> Self
where S: Into<String> {
Error {
what: what.into(),
cause: None,
}
}
/// Consumes self and constructs `Error` with message and cause
pub fn because<E>(self, cause: E) -> Self
where E:'static + std::error::Error + Sized {
Error {
what: self.what,
cause: Some(Box::new(cause)),
}
}
/// Consumes self and retuns `Err` variant of `Result<T, Error>`.
pub fn result<T>(self) -> Result<T, Self> {
Err(self)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.what
}
fn cause(&self) -> Option<&std::error::Error> {
match self.cause {
None => None,
Some(ref boxed) => Some(boxed.deref())
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.cause {
None => write!(f, "{}", self.what),
Some(ref boxed) => write!(f, "{} because {}", self.what, &boxed),
}
}
} | random_line_split |
|
error.rs | use std;
use std::fmt;
use std::ops::Deref;
/// Error type
#[derive(Debug)]
pub struct | {
// Description
what: String,
// Cause
cause: Option<Box<std::error::Error + Sized>>,
}
impl Error {
/// Construct `Error` with message only
pub fn new<S>(what: S) -> Self
where S: Into<String> {
Error {
what: what.into(),
cause: None,
}
}
/// Consumes self and constructs `Error` with message and cause
pub fn because<E>(self, cause: E) -> Self
where E:'static + std::error::Error + Sized {
Error {
what: self.what,
cause: Some(Box::new(cause)),
}
}
/// Consumes self and retuns `Err` variant of `Result<T, Error>`.
pub fn result<T>(self) -> Result<T, Self> {
Err(self)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.what
}
fn cause(&self) -> Option<&std::error::Error> {
match self.cause {
None => None,
Some(ref boxed) => Some(boxed.deref())
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.cause {
None => write!(f, "{}", self.what),
Some(ref boxed) => write!(f, "{} because {}", self.what, &boxed),
}
}
}
| Error | identifier_name |
thread.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use boxed::Box;
use mem;
use uint;
use libc;
use thunk::Thunk;
use sys_common::stack;
use sys::{thread, stack_overflow};
// This is the starting point of rust os threads. The first thing we do
// is make sure that we don't trigger __morestack (also why this has a
// no_stack_check annotation), and then we extract the main function
// and invoke it.
#[no_stack_check]
pub fn | (main: *mut libc::c_void) -> thread::rust_thread_return {
unsafe {
stack::record_os_managed_stack_bounds(0, uint::MAX);
let handler = stack_overflow::Handler::new();
let f: Box<Thunk> = mem::transmute(main);
f.invoke(());
drop(handler);
mem::transmute(0 as thread::rust_thread_return)
}
}
| start_thread | identifier_name |
thread.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use boxed::Box;
use mem;
use uint;
use libc;
use thunk::Thunk;
use sys_common::stack;
use sys::{thread, stack_overflow};
// This is the starting point of rust os threads. The first thing we do
// is make sure that we don't trigger __morestack (also why this has a
// no_stack_check annotation), and then we extract the main function
// and invoke it.
#[no_stack_check]
pub fn start_thread(main: *mut libc::c_void) -> thread::rust_thread_return { | f.invoke(());
drop(handler);
mem::transmute(0 as thread::rust_thread_return)
}
} | unsafe {
stack::record_os_managed_stack_bounds(0, uint::MAX);
let handler = stack_overflow::Handler::new();
let f: Box<Thunk> = mem::transmute(main); | random_line_split |
thread.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use boxed::Box;
use mem;
use uint;
use libc;
use thunk::Thunk;
use sys_common::stack;
use sys::{thread, stack_overflow};
// This is the starting point of rust os threads. The first thing we do
// is make sure that we don't trigger __morestack (also why this has a
// no_stack_check annotation), and then we extract the main function
// and invoke it.
#[no_stack_check]
pub fn start_thread(main: *mut libc::c_void) -> thread::rust_thread_return | {
unsafe {
stack::record_os_managed_stack_bounds(0, uint::MAX);
let handler = stack_overflow::Handler::new();
let f: Box<Thunk> = mem::transmute(main);
f.invoke(());
drop(handler);
mem::transmute(0 as thread::rust_thread_return)
}
} | identifier_body |
|
main.rs | extern crate toml;
use std::{env, process};
use std::io::prelude::*;
use std::io::{BufReader, ErrorKind};
use std::collections::BTreeMap;
use std::fs::File;
use toml::Value;
static OVERRIDES_PATH : &'static str = ".multirust/overrides";
static SETTINGS_PATH : &'static str = ".rustup/settings.toml";
static OLD_SETTINGS_PATH : &'static str = ".multirust/settings.toml";
enum OverridesDatabase {
Plain(BTreeMap<String, String>),
Toml(BTreeMap<String, toml::Value>),
}
impl OverridesDatabase {
pub fn get(&self, key: &str) -> Option<&str> {
use OverridesDatabase::*;
match *self {
Plain(ref db) => db.get(key).map(|s| &s[..]),
Toml(ref db) => {
db.get(key).map(|v| v.as_str().expect("Expected value is not a string."))
}
}
}
}
fn with_date<'a>(short: &'a str, toolchain: &'a str) -> Option<&'a str> {
let date_start = short.len() + 1;
let date_end = short.len() + 3 + 4 + 2 + 2;
let char_range = toolchain.chars()
.skip(date_start)
.take(4)
.all(char::is_numeric);
if toolchain.len() > date_start && char_range {
Some(&toolchain[0..date_end])
} else {
None
}
}
fn clean_toolchain_name(toolchain: &str) -> &str {
static SHORTNAMES : &'static [&'static str] = &["stable", "nightly", "beta"];
for short in SHORTNAMES {
if toolchain.starts_with(short) {
return match with_date(short, toolchain) {
Some(s) => s,
None => short
}
}
}
toolchain
}
fn plain_overrides_file(f: File) {
let overrides = BufReader::new(f);
let mut overrides_map = BTreeMap::new();
for line in overrides.lines() {
let line = line.expect("No valid line found");
let mut s = line.split(';');
let path = s.next().expect("No path in line");
let toolchain = s.next().expect("No toolchain in line");
overrides_map.insert(path.into(), toolchain.into());
}
let database = OverridesDatabase::Plain(overrides_map);
toolchain(database);
}
fn settings_toml(mut settings: File) -> Result<(), ()> {
let mut content = String::new();
settings.read_to_string(&mut content).expect("Can't read settings file");
let database = content.parse::<Value>().map_err(|_| ())?;
let database = database.get("overrides").cloned()
.and_then(|overrides| overrides.as_table().cloned())
.and_then(|database| Some(OverridesDatabase::Toml(database)))
.ok_or(())?;
toolchain(database);
Ok(())
}
fn toolchain(database: OverridesDatabase) {
let mut cwd = match env::current_dir() {
Ok(cwd) => cwd,
Err(_) => return,
};
loop {
let path = format!("{}", cwd.display());
| println!("{}", clean_toolchain_name(toolchain));
return;
}
if!cwd.pop() {
break;
}
}
println!("default");
}
fn main() {
let home = env::home_dir().expect("Impossible to get your home dir!");
let mut overrides_path = home.clone();
overrides_path.push(OVERRIDES_PATH);
match File::open(&overrides_path) {
Ok(f) => {
plain_overrides_file(f);
process::exit(0);
},
Err(ref e) if e.kind() == ErrorKind::NotFound => { /* ignored */ },
Err(_) => {
println!("default");
process::exit(0);
}
}
let mut settings_path = home.clone();
settings_path.push(SETTINGS_PATH);
if let Ok(f) = File::open(&settings_path) {
settings_toml(f).unwrap_or_else(|_| println!("default"));
process::exit(0);
}
let mut settings_path = home.clone();
settings_path.push(OLD_SETTINGS_PATH);
if let Ok(f) = File::open(&settings_path) {
settings_toml(f).unwrap_or_else(|_| println!("default"));
process::exit(0);
}
println!("default");
}
#[cfg(test)]
mod test {
use super::clean_toolchain_name;
#[test]
fn simple_name() {
assert_eq!("nightly", clean_toolchain_name("nightly-x86_64-unknown-linux-gnu"));
assert_eq!("nightly", clean_toolchain_name("nightly"));
}
#[test]
fn name_with_date() {
assert_eq!("nightly-2016-06-05", clean_toolchain_name("nightly-2016-06-05-x86_64-unknown-linux-gnu"));
}
} | if let Some(toolchain) = database.get(&path) { | random_line_split |
main.rs | extern crate toml;
use std::{env, process};
use std::io::prelude::*;
use std::io::{BufReader, ErrorKind};
use std::collections::BTreeMap;
use std::fs::File;
use toml::Value;
static OVERRIDES_PATH : &'static str = ".multirust/overrides";
static SETTINGS_PATH : &'static str = ".rustup/settings.toml";
static OLD_SETTINGS_PATH : &'static str = ".multirust/settings.toml";
enum OverridesDatabase {
Plain(BTreeMap<String, String>),
Toml(BTreeMap<String, toml::Value>),
}
impl OverridesDatabase {
pub fn | (&self, key: &str) -> Option<&str> {
use OverridesDatabase::*;
match *self {
Plain(ref db) => db.get(key).map(|s| &s[..]),
Toml(ref db) => {
db.get(key).map(|v| v.as_str().expect("Expected value is not a string."))
}
}
}
}
fn with_date<'a>(short: &'a str, toolchain: &'a str) -> Option<&'a str> {
let date_start = short.len() + 1;
let date_end = short.len() + 3 + 4 + 2 + 2;
let char_range = toolchain.chars()
.skip(date_start)
.take(4)
.all(char::is_numeric);
if toolchain.len() > date_start && char_range {
Some(&toolchain[0..date_end])
} else {
None
}
}
fn clean_toolchain_name(toolchain: &str) -> &str {
static SHORTNAMES : &'static [&'static str] = &["stable", "nightly", "beta"];
for short in SHORTNAMES {
if toolchain.starts_with(short) {
return match with_date(short, toolchain) {
Some(s) => s,
None => short
}
}
}
toolchain
}
fn plain_overrides_file(f: File) {
let overrides = BufReader::new(f);
let mut overrides_map = BTreeMap::new();
for line in overrides.lines() {
let line = line.expect("No valid line found");
let mut s = line.split(';');
let path = s.next().expect("No path in line");
let toolchain = s.next().expect("No toolchain in line");
overrides_map.insert(path.into(), toolchain.into());
}
let database = OverridesDatabase::Plain(overrides_map);
toolchain(database);
}
fn settings_toml(mut settings: File) -> Result<(), ()> {
let mut content = String::new();
settings.read_to_string(&mut content).expect("Can't read settings file");
let database = content.parse::<Value>().map_err(|_| ())?;
let database = database.get("overrides").cloned()
.and_then(|overrides| overrides.as_table().cloned())
.and_then(|database| Some(OverridesDatabase::Toml(database)))
.ok_or(())?;
toolchain(database);
Ok(())
}
fn toolchain(database: OverridesDatabase) {
let mut cwd = match env::current_dir() {
Ok(cwd) => cwd,
Err(_) => return,
};
loop {
let path = format!("{}", cwd.display());
if let Some(toolchain) = database.get(&path) {
println!("{}", clean_toolchain_name(toolchain));
return;
}
if!cwd.pop() {
break;
}
}
println!("default");
}
fn main() {
let home = env::home_dir().expect("Impossible to get your home dir!");
let mut overrides_path = home.clone();
overrides_path.push(OVERRIDES_PATH);
match File::open(&overrides_path) {
Ok(f) => {
plain_overrides_file(f);
process::exit(0);
},
Err(ref e) if e.kind() == ErrorKind::NotFound => { /* ignored */ },
Err(_) => {
println!("default");
process::exit(0);
}
}
let mut settings_path = home.clone();
settings_path.push(SETTINGS_PATH);
if let Ok(f) = File::open(&settings_path) {
settings_toml(f).unwrap_or_else(|_| println!("default"));
process::exit(0);
}
let mut settings_path = home.clone();
settings_path.push(OLD_SETTINGS_PATH);
if let Ok(f) = File::open(&settings_path) {
settings_toml(f).unwrap_or_else(|_| println!("default"));
process::exit(0);
}
println!("default");
}
#[cfg(test)]
mod test {
use super::clean_toolchain_name;
#[test]
fn simple_name() {
assert_eq!("nightly", clean_toolchain_name("nightly-x86_64-unknown-linux-gnu"));
assert_eq!("nightly", clean_toolchain_name("nightly"));
}
#[test]
fn name_with_date() {
assert_eq!("nightly-2016-06-05", clean_toolchain_name("nightly-2016-06-05-x86_64-unknown-linux-gnu"));
}
}
| get | identifier_name |
main.rs | extern crate toml;
use std::{env, process};
use std::io::prelude::*;
use std::io::{BufReader, ErrorKind};
use std::collections::BTreeMap;
use std::fs::File;
use toml::Value;
static OVERRIDES_PATH : &'static str = ".multirust/overrides";
static SETTINGS_PATH : &'static str = ".rustup/settings.toml";
static OLD_SETTINGS_PATH : &'static str = ".multirust/settings.toml";
enum OverridesDatabase {
Plain(BTreeMap<String, String>),
Toml(BTreeMap<String, toml::Value>),
}
impl OverridesDatabase {
pub fn get(&self, key: &str) -> Option<&str> {
use OverridesDatabase::*;
match *self {
Plain(ref db) => db.get(key).map(|s| &s[..]),
Toml(ref db) => {
db.get(key).map(|v| v.as_str().expect("Expected value is not a string."))
}
}
}
}
fn with_date<'a>(short: &'a str, toolchain: &'a str) -> Option<&'a str> |
fn clean_toolchain_name(toolchain: &str) -> &str {
static SHORTNAMES : &'static [&'static str] = &["stable", "nightly", "beta"];
for short in SHORTNAMES {
if toolchain.starts_with(short) {
return match with_date(short, toolchain) {
Some(s) => s,
None => short
}
}
}
toolchain
}
fn plain_overrides_file(f: File) {
let overrides = BufReader::new(f);
let mut overrides_map = BTreeMap::new();
for line in overrides.lines() {
let line = line.expect("No valid line found");
let mut s = line.split(';');
let path = s.next().expect("No path in line");
let toolchain = s.next().expect("No toolchain in line");
overrides_map.insert(path.into(), toolchain.into());
}
let database = OverridesDatabase::Plain(overrides_map);
toolchain(database);
}
fn settings_toml(mut settings: File) -> Result<(), ()> {
let mut content = String::new();
settings.read_to_string(&mut content).expect("Can't read settings file");
let database = content.parse::<Value>().map_err(|_| ())?;
let database = database.get("overrides").cloned()
.and_then(|overrides| overrides.as_table().cloned())
.and_then(|database| Some(OverridesDatabase::Toml(database)))
.ok_or(())?;
toolchain(database);
Ok(())
}
fn toolchain(database: OverridesDatabase) {
let mut cwd = match env::current_dir() {
Ok(cwd) => cwd,
Err(_) => return,
};
loop {
let path = format!("{}", cwd.display());
if let Some(toolchain) = database.get(&path) {
println!("{}", clean_toolchain_name(toolchain));
return;
}
if!cwd.pop() {
break;
}
}
println!("default");
}
fn main() {
let home = env::home_dir().expect("Impossible to get your home dir!");
let mut overrides_path = home.clone();
overrides_path.push(OVERRIDES_PATH);
match File::open(&overrides_path) {
Ok(f) => {
plain_overrides_file(f);
process::exit(0);
},
Err(ref e) if e.kind() == ErrorKind::NotFound => { /* ignored */ },
Err(_) => {
println!("default");
process::exit(0);
}
}
let mut settings_path = home.clone();
settings_path.push(SETTINGS_PATH);
if let Ok(f) = File::open(&settings_path) {
settings_toml(f).unwrap_or_else(|_| println!("default"));
process::exit(0);
}
let mut settings_path = home.clone();
settings_path.push(OLD_SETTINGS_PATH);
if let Ok(f) = File::open(&settings_path) {
settings_toml(f).unwrap_or_else(|_| println!("default"));
process::exit(0);
}
println!("default");
}
#[cfg(test)]
mod test {
use super::clean_toolchain_name;
#[test]
fn simple_name() {
assert_eq!("nightly", clean_toolchain_name("nightly-x86_64-unknown-linux-gnu"));
assert_eq!("nightly", clean_toolchain_name("nightly"));
}
#[test]
fn name_with_date() {
assert_eq!("nightly-2016-06-05", clean_toolchain_name("nightly-2016-06-05-x86_64-unknown-linux-gnu"));
}
}
| {
let date_start = short.len() + 1;
let date_end = short.len() + 3 + 4 + 2 + 2;
let char_range = toolchain.chars()
.skip(date_start)
.take(4)
.all(char::is_numeric);
if toolchain.len() > date_start && char_range {
Some(&toolchain[0..date_end])
} else {
None
}
} | identifier_body |
htmlfontelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::attr::AttrValue;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{LayoutJS, Root};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use style::values::specified;
use util::str::{HTML_SPACE_CHARACTERS, read_numbers};
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
}
impl HTMLFontElement {
fn new_inherited(localName: Atom, prefix: Option<DOMString>, document: &Document) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLFontElement> {
let element = HTMLFontElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLFontElementBinding::Wrap)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face | // https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-size
make_getter!(Size, "size");
// https://html.spec.whatwg.org/multipage/#dom-font-size
fn SetSize(&self, value: DOMString) {
let element = self.upcast::<Element>();
let length = parse_length(&value);
element.set_attribute(&atom!("size"), AttrValue::Length(value.into(), length));
}
}
impl VirtualMethods for HTMLFontElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("face") => AttrValue::from_atomic(value.into()),
&atom!("color") => AttrValue::from_legacy_color(value.into()),
&atom!("size") => {
let length = parse_length(&value);
AttrValue::Length(value.into(), length)
},
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
pub trait HTMLFontElementLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_face(&self) -> Option<Atom>;
fn get_size(&self) -> Option<specified::Length>;
}
impl HTMLFontElementLayoutHelpers for LayoutJS<HTMLFontElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_face(&self) -> Option<Atom> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("face"))
.map(AttrValue::as_atom)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_size(&self) -> Option<specified::Length> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("size"))
.and_then(AttrValue::as_length)
.cloned()
}
}
}
/// https://html.spec.whatwg.org/multipage/#rules-for-parsing-a-legacy-font-size
pub fn parse_legacy_font_size(mut input: &str) -> Option<&'static str> {
// Steps 1 & 2 are not relevant
// Step 3
input = input.trim_matches(HTML_SPACE_CHARACTERS);
enum ParseMode {
RelativePlus,
RelativeMinus,
Absolute,
}
let mut input_chars = input.chars().peekable();
let parse_mode = match input_chars.peek() {
// Step 4
None => return None,
// Step 5
Some(&'+') => {
let _ = input_chars.next(); // consume the '+'
ParseMode::RelativePlus
}
Some(&'-') => {
let _ = input_chars.next(); // consume the '-'
ParseMode::RelativeMinus
}
Some(_) => ParseMode::Absolute,
};
// Steps 6, 7, 8
let mut value = match read_numbers(input_chars) {
(Some(v), _) => v,
(None, _) => return None,
};
// Step 9
match parse_mode {
ParseMode::RelativePlus => value = 3 + value,
ParseMode::RelativeMinus => value = 3 - value,
ParseMode::Absolute => (),
}
// Steps 10, 11, 12
Some(match value {
n if n >= 7 => "xxx-large",
6 => "xx-large",
5 => "x-large",
4 => "large",
3 => "medium",
2 => "small",
n if n <= 1 => "x-small",
_ => unreachable!(),
})
}
fn parse_length(value: &str) -> Option<specified::Length> {
parse_legacy_font_size(&value).and_then(|parsed| specified::Length::from_str(&parsed))
} | make_getter!(Face, "face");
| random_line_split |
htmlfontelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::attr::AttrValue;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{LayoutJS, Root};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use style::values::specified;
use util::str::{HTML_SPACE_CHARACTERS, read_numbers};
#[dom_struct]
pub struct | {
htmlelement: HTMLElement,
}
impl HTMLFontElement {
fn new_inherited(localName: Atom, prefix: Option<DOMString>, document: &Document) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLFontElement> {
let element = HTMLFontElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLFontElementBinding::Wrap)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-size
make_getter!(Size, "size");
// https://html.spec.whatwg.org/multipage/#dom-font-size
fn SetSize(&self, value: DOMString) {
let element = self.upcast::<Element>();
let length = parse_length(&value);
element.set_attribute(&atom!("size"), AttrValue::Length(value.into(), length));
}
}
impl VirtualMethods for HTMLFontElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("face") => AttrValue::from_atomic(value.into()),
&atom!("color") => AttrValue::from_legacy_color(value.into()),
&atom!("size") => {
let length = parse_length(&value);
AttrValue::Length(value.into(), length)
},
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
pub trait HTMLFontElementLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_face(&self) -> Option<Atom>;
fn get_size(&self) -> Option<specified::Length>;
}
impl HTMLFontElementLayoutHelpers for LayoutJS<HTMLFontElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_face(&self) -> Option<Atom> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("face"))
.map(AttrValue::as_atom)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_size(&self) -> Option<specified::Length> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("size"))
.and_then(AttrValue::as_length)
.cloned()
}
}
}
/// https://html.spec.whatwg.org/multipage/#rules-for-parsing-a-legacy-font-size
pub fn parse_legacy_font_size(mut input: &str) -> Option<&'static str> {
// Steps 1 & 2 are not relevant
// Step 3
input = input.trim_matches(HTML_SPACE_CHARACTERS);
enum ParseMode {
RelativePlus,
RelativeMinus,
Absolute,
}
let mut input_chars = input.chars().peekable();
let parse_mode = match input_chars.peek() {
// Step 4
None => return None,
// Step 5
Some(&'+') => {
let _ = input_chars.next(); // consume the '+'
ParseMode::RelativePlus
}
Some(&'-') => {
let _ = input_chars.next(); // consume the '-'
ParseMode::RelativeMinus
}
Some(_) => ParseMode::Absolute,
};
// Steps 6, 7, 8
let mut value = match read_numbers(input_chars) {
(Some(v), _) => v,
(None, _) => return None,
};
// Step 9
match parse_mode {
ParseMode::RelativePlus => value = 3 + value,
ParseMode::RelativeMinus => value = 3 - value,
ParseMode::Absolute => (),
}
// Steps 10, 11, 12
Some(match value {
n if n >= 7 => "xxx-large",
6 => "xx-large",
5 => "x-large",
4 => "large",
3 => "medium",
2 => "small",
n if n <= 1 => "x-small",
_ => unreachable!(),
})
}
fn parse_length(value: &str) -> Option<specified::Length> {
parse_legacy_font_size(&value).and_then(|parsed| specified::Length::from_str(&parsed))
}
| HTMLFontElement | identifier_name |
htmlfontelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::attr::AttrValue;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{LayoutJS, Root};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use style::values::specified;
use util::str::{HTML_SPACE_CHARACTERS, read_numbers};
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
}
impl HTMLFontElement {
fn new_inherited(localName: Atom, prefix: Option<DOMString>, document: &Document) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLFontElement> {
let element = HTMLFontElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLFontElementBinding::Wrap)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-size
make_getter!(Size, "size");
// https://html.spec.whatwg.org/multipage/#dom-font-size
fn SetSize(&self, value: DOMString) {
let element = self.upcast::<Element>();
let length = parse_length(&value);
element.set_attribute(&atom!("size"), AttrValue::Length(value.into(), length));
}
}
impl VirtualMethods for HTMLFontElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("face") => AttrValue::from_atomic(value.into()),
&atom!("color") => AttrValue::from_legacy_color(value.into()),
&atom!("size") => {
let length = parse_length(&value);
AttrValue::Length(value.into(), length)
},
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
pub trait HTMLFontElementLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_face(&self) -> Option<Atom>;
fn get_size(&self) -> Option<specified::Length>;
}
impl HTMLFontElementLayoutHelpers for LayoutJS<HTMLFontElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_face(&self) -> Option<Atom> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("face"))
.map(AttrValue::as_atom)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_size(&self) -> Option<specified::Length> |
}
/// https://html.spec.whatwg.org/multipage/#rules-for-parsing-a-legacy-font-size
pub fn parse_legacy_font_size(mut input: &str) -> Option<&'static str> {
// Steps 1 & 2 are not relevant
// Step 3
input = input.trim_matches(HTML_SPACE_CHARACTERS);
enum ParseMode {
RelativePlus,
RelativeMinus,
Absolute,
}
let mut input_chars = input.chars().peekable();
let parse_mode = match input_chars.peek() {
// Step 4
None => return None,
// Step 5
Some(&'+') => {
let _ = input_chars.next(); // consume the '+'
ParseMode::RelativePlus
}
Some(&'-') => {
let _ = input_chars.next(); // consume the '-'
ParseMode::RelativeMinus
}
Some(_) => ParseMode::Absolute,
};
// Steps 6, 7, 8
let mut value = match read_numbers(input_chars) {
(Some(v), _) => v,
(None, _) => return None,
};
// Step 9
match parse_mode {
ParseMode::RelativePlus => value = 3 + value,
ParseMode::RelativeMinus => value = 3 - value,
ParseMode::Absolute => (),
}
// Steps 10, 11, 12
Some(match value {
n if n >= 7 => "xxx-large",
6 => "xx-large",
5 => "x-large",
4 => "large",
3 => "medium",
2 => "small",
n if n <= 1 => "x-small",
_ => unreachable!(),
})
}
fn parse_length(value: &str) -> Option<specified::Length> {
parse_legacy_font_size(&value).and_then(|parsed| specified::Length::from_str(&parsed))
}
| {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("size"))
.and_then(AttrValue::as_length)
.cloned()
}
} | identifier_body |
more.rs | #![crate_name = "uu_more"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Martin Kysel <[email protected]>
*
* For the full copyright and license information, please view the LICENSE file
* that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::io::{stdout, Write, Read};
use std::fs::File;
#[cfg(unix)]
extern crate nix;
#[cfg(unix)]
use nix::sys::termios;
#[derive(Clone, Eq, PartialEq)]
pub enum Mode {
More,
Help,
Version,
}
static NAME: &'static str = "more";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("v", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
show_error!("{}", e);
panic!()
},
};
let usage = opts.usage("more TARGET.");
let mode = if matches.opt_present("version") {
Mode::Version
} else if matches.opt_present("help") {
Mode::Help
} else | ;
match mode {
Mode::More => more(matches),
Mode::Help => help(&usage),
Mode::Version => version(),
}
0
}
fn version() {
println!("{} {}", NAME, VERSION);
}
fn help(usage: &str) {
let msg = format!("{0} {1}\n\n\
Usage: {0} TARGET\n \
\n\
{2}", NAME, VERSION, usage);
println!("{}", msg);
}
#[cfg(unix)]
fn setup_term() -> termios::Termios {
let mut term = termios::tcgetattr(0).unwrap();
// Unset canonical mode, so we get characters immediately
term.c_lflag.remove(termios::ICANON);
// Disable local echo
term.c_lflag.remove(termios::ECHO);
termios::tcsetattr(0, termios::TCSADRAIN, &term).unwrap();
term
}
#[cfg(windows)]
fn setup_term() -> usize {
0
}
#[cfg(unix)]
fn reset_term(term: &mut termios::Termios) {
term.c_lflag.insert(termios::ICANON);
term.c_lflag.insert(termios::ECHO);
termios::tcsetattr(0, termios::TCSADRAIN, &term).unwrap();
}
#[cfg(windows)]
fn reset_term(_: &mut usize) {
}
fn more(matches: getopts::Matches) {
let files = matches.free;
let mut f = File::open(files.first().unwrap()).unwrap();
let mut buffer = [0; 1024];
let mut term = setup_term();
let mut end = false;
while let Ok(sz) = f.read(&mut buffer) {
if sz == 0 { break; }
stdout().write(&buffer[0..sz]).unwrap();
for byte in std::io::stdin().bytes() {
match byte.unwrap() {
b''=> break,
b'q' | 27 => {
end = true;
break;
},
_ => ()
}
}
if end { break;}
}
reset_term(&mut term);
println!("");
}
| {
Mode::More
} | conditional_block |
more.rs | #![crate_name = "uu_more"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Martin Kysel <[email protected]>
*
* For the full copyright and license information, please view the LICENSE file
* that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::io::{stdout, Write, Read};
use std::fs::File;
#[cfg(unix)]
extern crate nix;
#[cfg(unix)]
use nix::sys::termios;
#[derive(Clone, Eq, PartialEq)]
pub enum Mode {
More,
Help,
Version,
}
static NAME: &'static str = "more";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("v", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
show_error!("{}", e);
panic!()
},
};
let usage = opts.usage("more TARGET.");
let mode = if matches.opt_present("version") {
Mode::Version
} else if matches.opt_present("help") {
Mode::Help
} else {
Mode::More
};
match mode {
Mode::More => more(matches),
Mode::Help => help(&usage),
Mode::Version => version(),
}
0
}
fn version() {
println!("{} {}", NAME, VERSION);
}
fn help(usage: &str) {
let msg = format!("{0} {1}\n\n\
Usage: {0} TARGET\n \
\n\
{2}", NAME, VERSION, usage);
println!("{}", msg);
}
#[cfg(unix)]
fn setup_term() -> termios::Termios |
#[cfg(windows)]
fn setup_term() -> usize {
0
}
#[cfg(unix)]
fn reset_term(term: &mut termios::Termios) {
term.c_lflag.insert(termios::ICANON);
term.c_lflag.insert(termios::ECHO);
termios::tcsetattr(0, termios::TCSADRAIN, &term).unwrap();
}
#[cfg(windows)]
fn reset_term(_: &mut usize) {
}
fn more(matches: getopts::Matches) {
let files = matches.free;
let mut f = File::open(files.first().unwrap()).unwrap();
let mut buffer = [0; 1024];
let mut term = setup_term();
let mut end = false;
while let Ok(sz) = f.read(&mut buffer) {
if sz == 0 { break; }
stdout().write(&buffer[0..sz]).unwrap();
for byte in std::io::stdin().bytes() {
match byte.unwrap() {
b''=> break,
b'q' | 27 => {
end = true;
break;
},
_ => ()
}
}
if end { break;}
}
reset_term(&mut term);
println!("");
}
| {
let mut term = termios::tcgetattr(0).unwrap();
// Unset canonical mode, so we get characters immediately
term.c_lflag.remove(termios::ICANON);
// Disable local echo
term.c_lflag.remove(termios::ECHO);
termios::tcsetattr(0, termios::TCSADRAIN, &term).unwrap();
term
} | identifier_body |
more.rs | #![crate_name = "uu_more"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Martin Kysel <[email protected]>
*
* For the full copyright and license information, please view the LICENSE file
* that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::io::{stdout, Write, Read};
use std::fs::File;
#[cfg(unix)]
extern crate nix;
#[cfg(unix)]
use nix::sys::termios;
#[derive(Clone, Eq, PartialEq)]
pub enum | {
More,
Help,
Version,
}
static NAME: &'static str = "more";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("v", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
show_error!("{}", e);
panic!()
},
};
let usage = opts.usage("more TARGET.");
let mode = if matches.opt_present("version") {
Mode::Version
} else if matches.opt_present("help") {
Mode::Help
} else {
Mode::More
};
match mode {
Mode::More => more(matches),
Mode::Help => help(&usage),
Mode::Version => version(),
}
0
}
fn version() {
println!("{} {}", NAME, VERSION);
}
fn help(usage: &str) {
let msg = format!("{0} {1}\n\n\
Usage: {0} TARGET\n \
\n\
{2}", NAME, VERSION, usage);
println!("{}", msg);
}
#[cfg(unix)]
fn setup_term() -> termios::Termios {
let mut term = termios::tcgetattr(0).unwrap();
// Unset canonical mode, so we get characters immediately
term.c_lflag.remove(termios::ICANON);
// Disable local echo
term.c_lflag.remove(termios::ECHO);
termios::tcsetattr(0, termios::TCSADRAIN, &term).unwrap();
term
}
#[cfg(windows)]
fn setup_term() -> usize {
0
}
#[cfg(unix)]
fn reset_term(term: &mut termios::Termios) {
term.c_lflag.insert(termios::ICANON);
term.c_lflag.insert(termios::ECHO);
termios::tcsetattr(0, termios::TCSADRAIN, &term).unwrap();
}
#[cfg(windows)]
fn reset_term(_: &mut usize) {
}
fn more(matches: getopts::Matches) {
let files = matches.free;
let mut f = File::open(files.first().unwrap()).unwrap();
let mut buffer = [0; 1024];
let mut term = setup_term();
let mut end = false;
while let Ok(sz) = f.read(&mut buffer) {
if sz == 0 { break; }
stdout().write(&buffer[0..sz]).unwrap();
for byte in std::io::stdin().bytes() {
match byte.unwrap() {
b''=> break,
b'q' | 27 => {
end = true;
break;
},
_ => ()
}
}
if end { break;}
}
reset_term(&mut term);
println!("");
}
| Mode | identifier_name |
more.rs | #![crate_name = "uu_more"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Martin Kysel <[email protected]>
*
* For the full copyright and license information, please view the LICENSE file
* that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::io::{stdout, Write, Read};
use std::fs::File;
#[cfg(unix)]
extern crate nix;
#[cfg(unix)]
use nix::sys::termios;
#[derive(Clone, Eq, PartialEq)]
pub enum Mode {
More,
Help,
Version,
}
static NAME: &'static str = "more";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("v", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
show_error!("{}", e);
panic!()
},
};
let usage = opts.usage("more TARGET.");
let mode = if matches.opt_present("version") {
Mode::Version
} else if matches.opt_present("help") {
Mode::Help
} else {
Mode::More
};
| Mode::Version => version(),
}
0
}
fn version() {
println!("{} {}", NAME, VERSION);
}
fn help(usage: &str) {
let msg = format!("{0} {1}\n\n\
Usage: {0} TARGET\n \
\n\
{2}", NAME, VERSION, usage);
println!("{}", msg);
}
#[cfg(unix)]
fn setup_term() -> termios::Termios {
let mut term = termios::tcgetattr(0).unwrap();
// Unset canonical mode, so we get characters immediately
term.c_lflag.remove(termios::ICANON);
// Disable local echo
term.c_lflag.remove(termios::ECHO);
termios::tcsetattr(0, termios::TCSADRAIN, &term).unwrap();
term
}
#[cfg(windows)]
fn setup_term() -> usize {
0
}
#[cfg(unix)]
fn reset_term(term: &mut termios::Termios) {
term.c_lflag.insert(termios::ICANON);
term.c_lflag.insert(termios::ECHO);
termios::tcsetattr(0, termios::TCSADRAIN, &term).unwrap();
}
#[cfg(windows)]
fn reset_term(_: &mut usize) {
}
fn more(matches: getopts::Matches) {
let files = matches.free;
let mut f = File::open(files.first().unwrap()).unwrap();
let mut buffer = [0; 1024];
let mut term = setup_term();
let mut end = false;
while let Ok(sz) = f.read(&mut buffer) {
if sz == 0 { break; }
stdout().write(&buffer[0..sz]).unwrap();
for byte in std::io::stdin().bytes() {
match byte.unwrap() {
b''=> break,
b'q' | 27 => {
end = true;
break;
},
_ => ()
}
}
if end { break;}
}
reset_term(&mut term);
println!("");
} | match mode {
Mode::More => more(matches),
Mode::Help => help(&usage), | random_line_split |
q_timer_quit.rs | use cpp_core::NullPtr;
use qt_core::{QCoreApplication, QTimer, SlotNoArgs};
use std::cell::RefCell;
use std::rc::Rc;
#[test]
fn timer_quit() | {
QCoreApplication::init(|app| unsafe {
let value = Rc::new(RefCell::new(Some(42)));
let value2 = Rc::clone(&value);
let slot1 = SlotNoArgs::new(NullPtr, move || {
assert_eq!(value2.borrow_mut().take(), Some(42));
});
let c = app.about_to_quit().connect(&slot1);
assert!(c.is_valid());
let timer = QTimer::new_0a();
let c = timer.timeout().connect(app.slot_quit());
assert!(c.is_valid());
timer.start_1a(1000);
let r = QCoreApplication::exec();
assert!(value.borrow().is_none());
r
})
} | identifier_body |
|
q_timer_quit.rs | use cpp_core::NullPtr;
use qt_core::{QCoreApplication, QTimer, SlotNoArgs};
use std::cell::RefCell;
use std::rc::Rc;
#[test]
fn | () {
QCoreApplication::init(|app| unsafe {
let value = Rc::new(RefCell::new(Some(42)));
let value2 = Rc::clone(&value);
let slot1 = SlotNoArgs::new(NullPtr, move || {
assert_eq!(value2.borrow_mut().take(), Some(42));
});
let c = app.about_to_quit().connect(&slot1);
assert!(c.is_valid());
let timer = QTimer::new_0a();
let c = timer.timeout().connect(app.slot_quit());
assert!(c.is_valid());
timer.start_1a(1000);
let r = QCoreApplication::exec();
assert!(value.borrow().is_none());
r
})
}
| timer_quit | identifier_name |
q_timer_quit.rs | use cpp_core::NullPtr;
use qt_core::{QCoreApplication, QTimer, SlotNoArgs};
use std::cell::RefCell;
use std::rc::Rc;
#[test]
fn timer_quit() {
QCoreApplication::init(|app| unsafe {
let value = Rc::new(RefCell::new(Some(42)));
let value2 = Rc::clone(&value);
let slot1 = SlotNoArgs::new(NullPtr, move || { | assert_eq!(value2.borrow_mut().take(), Some(42));
});
let c = app.about_to_quit().connect(&slot1);
assert!(c.is_valid());
let timer = QTimer::new_0a();
let c = timer.timeout().connect(app.slot_quit());
assert!(c.is_valid());
timer.start_1a(1000);
let r = QCoreApplication::exec();
assert!(value.borrow().is_none());
r
})
} | random_line_split |
|
structfields.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// @has structfields/Foo.t.html
// @has - struct.Foo.html
// @has structfields/struct.Foo.html
pub struct Foo {
// @has - //pre "pub a: ()"
pub a: (),
// @has - //pre "// some fields omitted"
// @!has - //pre "b: ()"
b: (),
// @!has - //pre "c: usize"
#[doc(hidden)]
c: usize,
// @has - //pre "pub d: usize"
pub d: usize,
}
// @has structfields/Bar.t.html
// @has - struct.Bar.html
// @has structfields/struct.Bar.html
pub struct Bar {
// @has - //pre "pub a: ()"
pub a: (),
// @!has - //pre "// some fields omitted"
}
// @has structfields/Qux.t.html
// @has - enum.Qux.html
// @has structfields/enum.Qux.html
pub enum Qux {
Quz {
// @has - //pre "a: ()"
a: (),
// @!has - //pre "b: ()"
#[doc(hidden)]
b: (), | }
// @has structfields/struct.Baz.html //pre "pub struct Baz { /* fields omitted */ }"
pub struct Baz {
x: u8,
#[doc(hidden)]
pub y: u8,
}
// @has structfields/struct.Quux.html //pre "pub struct Quux {}"
pub struct Quux {} | // @has - //pre "c: usize"
c: usize,
// @has - //pre "// some fields omitted"
}, | random_line_split |
structfields.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// @has structfields/Foo.t.html
// @has - struct.Foo.html
// @has structfields/struct.Foo.html
pub struct Foo {
// @has - //pre "pub a: ()"
pub a: (),
// @has - //pre "// some fields omitted"
// @!has - //pre "b: ()"
b: (),
// @!has - //pre "c: usize"
#[doc(hidden)]
c: usize,
// @has - //pre "pub d: usize"
pub d: usize,
}
// @has structfields/Bar.t.html
// @has - struct.Bar.html
// @has structfields/struct.Bar.html
pub struct Bar {
// @has - //pre "pub a: ()"
pub a: (),
// @!has - //pre "// some fields omitted"
}
// @has structfields/Qux.t.html
// @has - enum.Qux.html
// @has structfields/enum.Qux.html
pub enum Qux {
Quz {
// @has - //pre "a: ()"
a: (),
// @!has - //pre "b: ()"
#[doc(hidden)]
b: (),
// @has - //pre "c: usize"
c: usize,
// @has - //pre "// some fields omitted"
},
}
// @has structfields/struct.Baz.html //pre "pub struct Baz { /* fields omitted */ }"
pub struct | {
x: u8,
#[doc(hidden)]
pub y: u8,
}
// @has structfields/struct.Quux.html //pre "pub struct Quux {}"
pub struct Quux {}
| Baz | identifier_name |
target.rs | use crate::associations::{HasTable, Identifiable};
use crate::dsl::Find;
use crate::query_dsl::methods::FindDsl;
use crate::query_source::Table;
#[doc(hidden)]
#[derive(Debug)]
pub struct UpdateTarget<Table, WhereClause> {
pub table: Table,
pub where_clause: WhereClause,
}
/// A type which can be passed to [`update`] or [`delete`].
///
/// Apps will never need to implement this type directly. There are three kinds
/// which implement this trait. Tables, queries which have only had `filter`
/// called on them, and types which implement `Identifiable`.
///
/// When a table is passed to `update`, every row in the table will be updated.
/// You can scope this down by calling [`filter`] which will
/// result in `UPDATE your_table SET... WHERE args_to_filter`. Passing a type
/// which implements `Identifiable` is the same as passing
/// `SomeStruct::table().find(some_struct)`.
///
/// [`update`]: crate::update()
/// [`delete`]: crate::delete()
/// [`filter`]: crate::query_builder::UpdateStatement::filter()
pub trait IntoUpdateTarget: HasTable {
/// What is the `WHERE` clause of this target?
type WhereClause;
/// Decomposes `self` into the table and where clause.
fn into_update_target(self) -> UpdateTarget<Self::Table, Self::WhereClause>;
}
impl<T, Tab, V> IntoUpdateTarget for T
where
T: Identifiable<Table = Tab>,
Tab: Table + FindDsl<T::Id>,
Find<Tab, T::Id>: IntoUpdateTarget<Table = Tab, WhereClause = V>,
{
type WhereClause = V;
fn into_update_target(self) -> UpdateTarget<Self::Table, Self::WhereClause> |
}
| {
T::table().find(self.id()).into_update_target()
} | identifier_body |
target.rs | use crate::associations::{HasTable, Identifiable};
use crate::dsl::Find;
use crate::query_dsl::methods::FindDsl;
use crate::query_source::Table;
#[doc(hidden)]
#[derive(Debug)]
pub struct UpdateTarget<Table, WhereClause> {
pub table: Table,
pub where_clause: WhereClause,
}
/// A type which can be passed to [`update`] or [`delete`].
///
/// Apps will never need to implement this type directly. There are three kinds
/// which implement this trait. Tables, queries which have only had `filter`
/// called on them, and types which implement `Identifiable`.
///
/// When a table is passed to `update`, every row in the table will be updated.
/// You can scope this down by calling [`filter`] which will
/// result in `UPDATE your_table SET... WHERE args_to_filter`. Passing a type
/// which implements `Identifiable` is the same as passing
/// `SomeStruct::table().find(some_struct)`.
///
/// [`update`]: crate::update()
/// [`delete`]: crate::delete()
/// [`filter`]: crate::query_builder::UpdateStatement::filter()
pub trait IntoUpdateTarget: HasTable {
/// What is the `WHERE` clause of this target?
type WhereClause;
/// Decomposes `self` into the table and where clause.
fn into_update_target(self) -> UpdateTarget<Self::Table, Self::WhereClause>;
}
impl<T, Tab, V> IntoUpdateTarget for T
where
T: Identifiable<Table = Tab>,
Tab: Table + FindDsl<T::Id>,
Find<Tab, T::Id>: IntoUpdateTarget<Table = Tab, WhereClause = V>,
{
type WhereClause = V;
fn | (self) -> UpdateTarget<Self::Table, Self::WhereClause> {
T::table().find(self.id()).into_update_target()
}
}
| into_update_target | identifier_name |
target.rs | use crate::associations::{HasTable, Identifiable};
use crate::dsl::Find;
use crate::query_dsl::methods::FindDsl;
use crate::query_source::Table;
#[doc(hidden)]
#[derive(Debug)] | pub where_clause: WhereClause,
}
/// A type which can be passed to [`update`] or [`delete`].
///
/// Apps will never need to implement this type directly. There are three kinds
/// which implement this trait. Tables, queries which have only had `filter`
/// called on them, and types which implement `Identifiable`.
///
/// When a table is passed to `update`, every row in the table will be updated.
/// You can scope this down by calling [`filter`] which will
/// result in `UPDATE your_table SET... WHERE args_to_filter`. Passing a type
/// which implements `Identifiable` is the same as passing
/// `SomeStruct::table().find(some_struct)`.
///
/// [`update`]: crate::update()
/// [`delete`]: crate::delete()
/// [`filter`]: crate::query_builder::UpdateStatement::filter()
pub trait IntoUpdateTarget: HasTable {
/// What is the `WHERE` clause of this target?
type WhereClause;
/// Decomposes `self` into the table and where clause.
fn into_update_target(self) -> UpdateTarget<Self::Table, Self::WhereClause>;
}
impl<T, Tab, V> IntoUpdateTarget for T
where
T: Identifiable<Table = Tab>,
Tab: Table + FindDsl<T::Id>,
Find<Tab, T::Id>: IntoUpdateTarget<Table = Tab, WhereClause = V>,
{
type WhereClause = V;
fn into_update_target(self) -> UpdateTarget<Self::Table, Self::WhereClause> {
T::table().find(self.id()).into_update_target()
}
} | pub struct UpdateTarget<Table, WhereClause> {
pub table: Table, | random_line_split |
websocket_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::connector::create_ssl_connector_builder;
use crate::cookie::Cookie;
use crate::fetch::methods::should_be_blocked_due_to_bad_port;
use crate::hosts::replace_host;
use crate::http_loader::HttpState;
use embedder_traits::resources::{self, Resource};
use headers_ext::Host;
use http::header::{self, HeaderMap, HeaderName, HeaderValue};
use http::uri::Authority;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use net_traits::request::{RequestInit, RequestMode};
use net_traits::{CookieSource, MessageData};
use net_traits::{WebSocketDomAction, WebSocketNetworkEvent};
use openssl::ssl::SslStream;
use servo_config::opts;
use servo_url::ServoUrl;
use std::fs;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use url::Url;
use ws::util::TcpStream;
use ws::{
CloseCode, Factory, Handler, Handshake, Message, Request, Response as WsResponse, Sender,
WebSocket,
};
use ws::{Error as WebSocketError, ErrorKind as WebSocketErrorKind, Result as WebSocketResult};
/// A client for connecting to a websocket server
#[derive(Clone)]
struct Client<'a> {
origin: &'a str,
host: &'a Host,
protocols: &'a [String],
http_state: &'a Arc<HttpState>,
resource_url: &'a ServoUrl,
event_sender: &'a IpcSender<WebSocketNetworkEvent>,
protocol_in_use: Option<String>,
}
impl<'a> Factory for Client<'a> {
type Handler = Self;
fn connection_made(&mut self, _: Sender) -> Self::Handler {
self.clone()
}
fn connection_lost(&mut self, _: Self::Handler) {
let _ = self.event_sender.send(WebSocketNetworkEvent::Fail);
}
}
impl<'a> Handler for Client<'a> {
fn build_request(&mut self, url: &Url) -> WebSocketResult<Request> {
let mut req = Request::from_url(url)?;
req.headers_mut()
.push(("Origin".to_string(), self.origin.as_bytes().to_owned()));
req.headers_mut().push((
"Host".to_string(),
format!("{}", self.host).as_bytes().to_owned(),
));
for protocol in self.protocols {
req.add_protocol(protocol);
}
let mut cookie_jar = self.http_state.cookie_jar.write().unwrap();
if let Some(cookie_list) = cookie_jar.cookies_for_url(self.resource_url, CookieSource::HTTP)
{
req.headers_mut()
.push(("Cookie".into(), cookie_list.as_bytes().to_owned()))
}
Ok(req)
}
fn | (&mut self, shake: Handshake) -> WebSocketResult<()> {
let mut headers = HeaderMap::new();
for &(ref name, ref value) in shake.response.headers().iter() {
let name = HeaderName::from_bytes(name.as_bytes()).unwrap();
let value = HeaderValue::from_bytes(&value).unwrap();
headers.insert(name, value);
}
let mut jar = self.http_state.cookie_jar.write().unwrap();
// TODO(eijebong): Replace thise once typed headers settled on a cookie impl
for cookie in headers.get_all(header::SET_COOKIE) {
if let Ok(s) = cookie.to_str() {
if let Some(cookie) =
Cookie::from_cookie_string(s.into(), self.resource_url, CookieSource::HTTP)
{
jar.push(cookie, self.resource_url, CookieSource::HTTP);
}
}
}
let _ = self
.event_sender
.send(WebSocketNetworkEvent::ConnectionEstablished {
protocol_in_use: self.protocol_in_use.clone(),
});
Ok(())
}
fn on_message(&mut self, message: Message) -> WebSocketResult<()> {
let message = match message {
Message::Text(message) => MessageData::Text(message),
Message::Binary(message) => MessageData::Binary(message),
};
let _ = self
.event_sender
.send(WebSocketNetworkEvent::MessageReceived(message));
Ok(())
}
fn on_error(&mut self, err: WebSocketError) {
debug!("Error in WebSocket communication: {:?}", err);
let _ = self.event_sender.send(WebSocketNetworkEvent::Fail);
}
fn on_response(&mut self, res: &WsResponse) -> WebSocketResult<()> {
let protocol_in_use = res.protocol()?;
if let Some(protocol_name) = protocol_in_use {
if!self.protocols.is_empty() &&!self.protocols.iter().any(|p| protocol_name == (*p)) {
let error = WebSocketError::new(
WebSocketErrorKind::Protocol,
"Protocol in Use not in client-supplied protocol list",
);
return Err(error);
}
self.protocol_in_use = Some(protocol_name.into());
}
Ok(())
}
fn on_close(&mut self, code: CloseCode, reason: &str) {
debug!("Connection closing due to ({:?}) {}", code, reason);
let _ = self.event_sender.send(WebSocketNetworkEvent::Close(
Some(code.into()),
reason.to_owned(),
));
}
fn upgrade_ssl_client(
&mut self,
stream: TcpStream,
url: &Url,
) -> WebSocketResult<SslStream<TcpStream>> {
let certs = match opts::get().certificate_path {
Some(ref path) => fs::read_to_string(path).expect("Couldn't not find certificate file"),
None => resources::read_string(Resource::SSLCertificates),
};
let domain = self
.resource_url
.as_url()
.domain()
.ok_or(WebSocketError::new(
WebSocketErrorKind::Protocol,
format!("Unable to parse domain from {}. Needed for SSL.", url),
))?;
let connector = create_ssl_connector_builder(&certs).build();
connector
.connect(domain, stream)
.map_err(WebSocketError::from)
}
}
pub fn init(
req_init: RequestInit,
resource_event_sender: IpcSender<WebSocketNetworkEvent>,
dom_action_receiver: IpcReceiver<WebSocketDomAction>,
http_state: Arc<HttpState>,
) {
thread::Builder::new()
.name(format!("WebSocket connection to {}", req_init.url))
.spawn(move || {
let protocols = match req_init.mode {
RequestMode::WebSocket { protocols } => protocols.clone(),
_ => panic!("Received a RequestInit with a non-websocket mode in websocket_loader"),
};
let scheme = req_init.url.scheme();
let mut req_url = req_init.url.clone();
if scheme == "ws" {
req_url.as_mut_url().set_scheme("http").unwrap();
} else if scheme == "wss" {
req_url.as_mut_url().set_scheme("https").unwrap();
}
if should_be_blocked_due_to_bad_port(&req_url) {
debug!("Failed to establish a WebSocket connection: port blocked");
let _ = resource_event_sender.send(WebSocketNetworkEvent::Fail);
return;
}
let host = replace_host(req_init.url.host_str().unwrap());
let mut net_url = req_init.url.clone().into_url();
net_url.set_host(Some(&host)).unwrap();
let host = Host::from(
format!(
"{}{}",
req_init.url.host_str().unwrap(),
req_init
.url
.port_or_known_default()
.map(|v| format!(":{}", v))
.unwrap_or("".into())
)
.parse::<Authority>()
.unwrap(),
);
let client = Client {
origin: &req_init.origin.ascii_serialization(),
host: &host,
protocols: &protocols,
http_state: &http_state,
resource_url: &req_init.url,
event_sender: &resource_event_sender,
protocol_in_use: None,
};
let mut ws = WebSocket::new(client).unwrap();
if let Err(e) = ws.connect(net_url) {
debug!("Failed to establish a WebSocket connection: {:?}", e);
return;
};
let ws_sender = ws.broadcaster();
let initiated_close = Arc::new(AtomicBool::new(false));
thread::spawn(move || {
while let Ok(dom_action) = dom_action_receiver.recv() {
match dom_action {
WebSocketDomAction::SendMessage(MessageData::Text(data)) => {
ws_sender.send(Message::text(data)).unwrap();
},
WebSocketDomAction::SendMessage(MessageData::Binary(data)) => {
ws_sender.send(Message::binary(data)).unwrap();
},
WebSocketDomAction::Close(code, reason) => {
if!initiated_close.fetch_or(true, Ordering::SeqCst) {
match code {
Some(code) => ws_sender
.close_with_reason(
code.into(),
reason.unwrap_or("".to_owned()),
)
.unwrap(),
None => ws_sender.close(CloseCode::Status).unwrap(),
};
}
},
}
}
});
if let Err(e) = ws.run() {
debug!("Failed to run WebSocket: {:?}", e);
let _ = resource_event_sender.send(WebSocketNetworkEvent::Fail);
};
})
.expect("Thread spawning failed");
}
| on_open | identifier_name |
websocket_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::connector::create_ssl_connector_builder;
use crate::cookie::Cookie;
use crate::fetch::methods::should_be_blocked_due_to_bad_port;
use crate::hosts::replace_host;
use crate::http_loader::HttpState;
use embedder_traits::resources::{self, Resource};
use headers_ext::Host;
use http::header::{self, HeaderMap, HeaderName, HeaderValue};
use http::uri::Authority;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use net_traits::request::{RequestInit, RequestMode};
use net_traits::{CookieSource, MessageData};
use net_traits::{WebSocketDomAction, WebSocketNetworkEvent};
use openssl::ssl::SslStream;
use servo_config::opts;
use servo_url::ServoUrl;
use std::fs;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use url::Url;
use ws::util::TcpStream;
use ws::{
CloseCode, Factory, Handler, Handshake, Message, Request, Response as WsResponse, Sender,
WebSocket,
};
use ws::{Error as WebSocketError, ErrorKind as WebSocketErrorKind, Result as WebSocketResult};
/// A client for connecting to a websocket server
#[derive(Clone)]
struct Client<'a> {
origin: &'a str,
host: &'a Host,
protocols: &'a [String],
http_state: &'a Arc<HttpState>,
resource_url: &'a ServoUrl,
event_sender: &'a IpcSender<WebSocketNetworkEvent>,
protocol_in_use: Option<String>,
}
impl<'a> Factory for Client<'a> {
type Handler = Self;
fn connection_made(&mut self, _: Sender) -> Self::Handler {
self.clone()
}
fn connection_lost(&mut self, _: Self::Handler) {
let _ = self.event_sender.send(WebSocketNetworkEvent::Fail);
}
}
impl<'a> Handler for Client<'a> {
fn build_request(&mut self, url: &Url) -> WebSocketResult<Request> {
let mut req = Request::from_url(url)?;
req.headers_mut()
.push(("Origin".to_string(), self.origin.as_bytes().to_owned()));
req.headers_mut().push((
"Host".to_string(),
format!("{}", self.host).as_bytes().to_owned(),
));
for protocol in self.protocols {
req.add_protocol(protocol);
}
let mut cookie_jar = self.http_state.cookie_jar.write().unwrap();
if let Some(cookie_list) = cookie_jar.cookies_for_url(self.resource_url, CookieSource::HTTP)
{
req.headers_mut()
.push(("Cookie".into(), cookie_list.as_bytes().to_owned()))
}
Ok(req)
}
fn on_open(&mut self, shake: Handshake) -> WebSocketResult<()> {
let mut headers = HeaderMap::new();
for &(ref name, ref value) in shake.response.headers().iter() {
let name = HeaderName::from_bytes(name.as_bytes()).unwrap();
let value = HeaderValue::from_bytes(&value).unwrap();
headers.insert(name, value);
}
let mut jar = self.http_state.cookie_jar.write().unwrap();
// TODO(eijebong): Replace thise once typed headers settled on a cookie impl
for cookie in headers.get_all(header::SET_COOKIE) {
if let Ok(s) = cookie.to_str() {
if let Some(cookie) =
Cookie::from_cookie_string(s.into(), self.resource_url, CookieSource::HTTP)
{
jar.push(cookie, self.resource_url, CookieSource::HTTP);
}
}
}
let _ = self
.event_sender
.send(WebSocketNetworkEvent::ConnectionEstablished {
protocol_in_use: self.protocol_in_use.clone(),
});
Ok(())
}
fn on_message(&mut self, message: Message) -> WebSocketResult<()> {
let message = match message {
Message::Text(message) => MessageData::Text(message),
Message::Binary(message) => MessageData::Binary(message),
};
let _ = self
.event_sender
.send(WebSocketNetworkEvent::MessageReceived(message));
Ok(())
}
fn on_error(&mut self, err: WebSocketError) {
debug!("Error in WebSocket communication: {:?}", err);
let _ = self.event_sender.send(WebSocketNetworkEvent::Fail);
}
fn on_response(&mut self, res: &WsResponse) -> WebSocketResult<()> {
let protocol_in_use = res.protocol()?;
if let Some(protocol_name) = protocol_in_use {
if!self.protocols.is_empty() &&!self.protocols.iter().any(|p| protocol_name == (*p)) {
let error = WebSocketError::new(
WebSocketErrorKind::Protocol,
"Protocol in Use not in client-supplied protocol list",
);
return Err(error);
}
self.protocol_in_use = Some(protocol_name.into());
}
Ok(())
}
fn on_close(&mut self, code: CloseCode, reason: &str) {
debug!("Connection closing due to ({:?}) {}", code, reason);
let _ = self.event_sender.send(WebSocketNetworkEvent::Close(
Some(code.into()),
reason.to_owned(),
));
}
fn upgrade_ssl_client(
&mut self,
stream: TcpStream,
url: &Url,
) -> WebSocketResult<SslStream<TcpStream>> {
let certs = match opts::get().certificate_path {
Some(ref path) => fs::read_to_string(path).expect("Couldn't not find certificate file"),
None => resources::read_string(Resource::SSLCertificates),
};
let domain = self
.resource_url
.as_url()
.domain()
.ok_or(WebSocketError::new(
WebSocketErrorKind::Protocol,
format!("Unable to parse domain from {}. Needed for SSL.", url),
))?;
let connector = create_ssl_connector_builder(&certs).build();
connector
.connect(domain, stream)
.map_err(WebSocketError::from)
}
}
pub fn init(
req_init: RequestInit,
resource_event_sender: IpcSender<WebSocketNetworkEvent>,
dom_action_receiver: IpcReceiver<WebSocketDomAction>,
http_state: Arc<HttpState>,
) | return;
}
let host = replace_host(req_init.url.host_str().unwrap());
let mut net_url = req_init.url.clone().into_url();
net_url.set_host(Some(&host)).unwrap();
let host = Host::from(
format!(
"{}{}",
req_init.url.host_str().unwrap(),
req_init
.url
.port_or_known_default()
.map(|v| format!(":{}", v))
.unwrap_or("".into())
)
.parse::<Authority>()
.unwrap(),
);
let client = Client {
origin: &req_init.origin.ascii_serialization(),
host: &host,
protocols: &protocols,
http_state: &http_state,
resource_url: &req_init.url,
event_sender: &resource_event_sender,
protocol_in_use: None,
};
let mut ws = WebSocket::new(client).unwrap();
if let Err(e) = ws.connect(net_url) {
debug!("Failed to establish a WebSocket connection: {:?}", e);
return;
};
let ws_sender = ws.broadcaster();
let initiated_close = Arc::new(AtomicBool::new(false));
thread::spawn(move || {
while let Ok(dom_action) = dom_action_receiver.recv() {
match dom_action {
WebSocketDomAction::SendMessage(MessageData::Text(data)) => {
ws_sender.send(Message::text(data)).unwrap();
},
WebSocketDomAction::SendMessage(MessageData::Binary(data)) => {
ws_sender.send(Message::binary(data)).unwrap();
},
WebSocketDomAction::Close(code, reason) => {
if!initiated_close.fetch_or(true, Ordering::SeqCst) {
match code {
Some(code) => ws_sender
.close_with_reason(
code.into(),
reason.unwrap_or("".to_owned()),
)
.unwrap(),
None => ws_sender.close(CloseCode::Status).unwrap(),
};
}
},
}
}
});
if let Err(e) = ws.run() {
debug!("Failed to run WebSocket: {:?}", e);
let _ = resource_event_sender.send(WebSocketNetworkEvent::Fail);
};
})
.expect("Thread spawning failed");
}
| {
thread::Builder::new()
.name(format!("WebSocket connection to {}", req_init.url))
.spawn(move || {
let protocols = match req_init.mode {
RequestMode::WebSocket { protocols } => protocols.clone(),
_ => panic!("Received a RequestInit with a non-websocket mode in websocket_loader"),
};
let scheme = req_init.url.scheme();
let mut req_url = req_init.url.clone();
if scheme == "ws" {
req_url.as_mut_url().set_scheme("http").unwrap();
} else if scheme == "wss" {
req_url.as_mut_url().set_scheme("https").unwrap();
}
if should_be_blocked_due_to_bad_port(&req_url) {
debug!("Failed to establish a WebSocket connection: port blocked");
let _ = resource_event_sender.send(WebSocketNetworkEvent::Fail); | identifier_body |
websocket_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::connector::create_ssl_connector_builder;
use crate::cookie::Cookie;
use crate::fetch::methods::should_be_blocked_due_to_bad_port;
use crate::hosts::replace_host;
use crate::http_loader::HttpState;
use embedder_traits::resources::{self, Resource};
use headers_ext::Host;
use http::header::{self, HeaderMap, HeaderName, HeaderValue};
use http::uri::Authority;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use net_traits::request::{RequestInit, RequestMode};
use net_traits::{CookieSource, MessageData};
use net_traits::{WebSocketDomAction, WebSocketNetworkEvent};
use openssl::ssl::SslStream;
use servo_config::opts;
use servo_url::ServoUrl;
use std::fs;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use url::Url;
use ws::util::TcpStream;
use ws::{
CloseCode, Factory, Handler, Handshake, Message, Request, Response as WsResponse, Sender,
WebSocket,
};
use ws::{Error as WebSocketError, ErrorKind as WebSocketErrorKind, Result as WebSocketResult};
/// A client for connecting to a websocket server
#[derive(Clone)]
struct Client<'a> {
origin: &'a str,
host: &'a Host,
protocols: &'a [String],
http_state: &'a Arc<HttpState>,
resource_url: &'a ServoUrl,
event_sender: &'a IpcSender<WebSocketNetworkEvent>,
protocol_in_use: Option<String>,
} | fn connection_made(&mut self, _: Sender) -> Self::Handler {
self.clone()
}
fn connection_lost(&mut self, _: Self::Handler) {
let _ = self.event_sender.send(WebSocketNetworkEvent::Fail);
}
}
impl<'a> Handler for Client<'a> {
fn build_request(&mut self, url: &Url) -> WebSocketResult<Request> {
let mut req = Request::from_url(url)?;
req.headers_mut()
.push(("Origin".to_string(), self.origin.as_bytes().to_owned()));
req.headers_mut().push((
"Host".to_string(),
format!("{}", self.host).as_bytes().to_owned(),
));
for protocol in self.protocols {
req.add_protocol(protocol);
}
let mut cookie_jar = self.http_state.cookie_jar.write().unwrap();
if let Some(cookie_list) = cookie_jar.cookies_for_url(self.resource_url, CookieSource::HTTP)
{
req.headers_mut()
.push(("Cookie".into(), cookie_list.as_bytes().to_owned()))
}
Ok(req)
}
fn on_open(&mut self, shake: Handshake) -> WebSocketResult<()> {
let mut headers = HeaderMap::new();
for &(ref name, ref value) in shake.response.headers().iter() {
let name = HeaderName::from_bytes(name.as_bytes()).unwrap();
let value = HeaderValue::from_bytes(&value).unwrap();
headers.insert(name, value);
}
let mut jar = self.http_state.cookie_jar.write().unwrap();
// TODO(eijebong): Replace thise once typed headers settled on a cookie impl
for cookie in headers.get_all(header::SET_COOKIE) {
if let Ok(s) = cookie.to_str() {
if let Some(cookie) =
Cookie::from_cookie_string(s.into(), self.resource_url, CookieSource::HTTP)
{
jar.push(cookie, self.resource_url, CookieSource::HTTP);
}
}
}
let _ = self
.event_sender
.send(WebSocketNetworkEvent::ConnectionEstablished {
protocol_in_use: self.protocol_in_use.clone(),
});
Ok(())
}
fn on_message(&mut self, message: Message) -> WebSocketResult<()> {
let message = match message {
Message::Text(message) => MessageData::Text(message),
Message::Binary(message) => MessageData::Binary(message),
};
let _ = self
.event_sender
.send(WebSocketNetworkEvent::MessageReceived(message));
Ok(())
}
fn on_error(&mut self, err: WebSocketError) {
debug!("Error in WebSocket communication: {:?}", err);
let _ = self.event_sender.send(WebSocketNetworkEvent::Fail);
}
fn on_response(&mut self, res: &WsResponse) -> WebSocketResult<()> {
let protocol_in_use = res.protocol()?;
if let Some(protocol_name) = protocol_in_use {
if!self.protocols.is_empty() &&!self.protocols.iter().any(|p| protocol_name == (*p)) {
let error = WebSocketError::new(
WebSocketErrorKind::Protocol,
"Protocol in Use not in client-supplied protocol list",
);
return Err(error);
}
self.protocol_in_use = Some(protocol_name.into());
}
Ok(())
}
fn on_close(&mut self, code: CloseCode, reason: &str) {
debug!("Connection closing due to ({:?}) {}", code, reason);
let _ = self.event_sender.send(WebSocketNetworkEvent::Close(
Some(code.into()),
reason.to_owned(),
));
}
fn upgrade_ssl_client(
&mut self,
stream: TcpStream,
url: &Url,
) -> WebSocketResult<SslStream<TcpStream>> {
let certs = match opts::get().certificate_path {
Some(ref path) => fs::read_to_string(path).expect("Couldn't not find certificate file"),
None => resources::read_string(Resource::SSLCertificates),
};
let domain = self
.resource_url
.as_url()
.domain()
.ok_or(WebSocketError::new(
WebSocketErrorKind::Protocol,
format!("Unable to parse domain from {}. Needed for SSL.", url),
))?;
let connector = create_ssl_connector_builder(&certs).build();
connector
.connect(domain, stream)
.map_err(WebSocketError::from)
}
}
pub fn init(
req_init: RequestInit,
resource_event_sender: IpcSender<WebSocketNetworkEvent>,
dom_action_receiver: IpcReceiver<WebSocketDomAction>,
http_state: Arc<HttpState>,
) {
thread::Builder::new()
.name(format!("WebSocket connection to {}", req_init.url))
.spawn(move || {
let protocols = match req_init.mode {
RequestMode::WebSocket { protocols } => protocols.clone(),
_ => panic!("Received a RequestInit with a non-websocket mode in websocket_loader"),
};
let scheme = req_init.url.scheme();
let mut req_url = req_init.url.clone();
if scheme == "ws" {
req_url.as_mut_url().set_scheme("http").unwrap();
} else if scheme == "wss" {
req_url.as_mut_url().set_scheme("https").unwrap();
}
if should_be_blocked_due_to_bad_port(&req_url) {
debug!("Failed to establish a WebSocket connection: port blocked");
let _ = resource_event_sender.send(WebSocketNetworkEvent::Fail);
return;
}
let host = replace_host(req_init.url.host_str().unwrap());
let mut net_url = req_init.url.clone().into_url();
net_url.set_host(Some(&host)).unwrap();
let host = Host::from(
format!(
"{}{}",
req_init.url.host_str().unwrap(),
req_init
.url
.port_or_known_default()
.map(|v| format!(":{}", v))
.unwrap_or("".into())
)
.parse::<Authority>()
.unwrap(),
);
let client = Client {
origin: &req_init.origin.ascii_serialization(),
host: &host,
protocols: &protocols,
http_state: &http_state,
resource_url: &req_init.url,
event_sender: &resource_event_sender,
protocol_in_use: None,
};
let mut ws = WebSocket::new(client).unwrap();
if let Err(e) = ws.connect(net_url) {
debug!("Failed to establish a WebSocket connection: {:?}", e);
return;
};
let ws_sender = ws.broadcaster();
let initiated_close = Arc::new(AtomicBool::new(false));
thread::spawn(move || {
while let Ok(dom_action) = dom_action_receiver.recv() {
match dom_action {
WebSocketDomAction::SendMessage(MessageData::Text(data)) => {
ws_sender.send(Message::text(data)).unwrap();
},
WebSocketDomAction::SendMessage(MessageData::Binary(data)) => {
ws_sender.send(Message::binary(data)).unwrap();
},
WebSocketDomAction::Close(code, reason) => {
if!initiated_close.fetch_or(true, Ordering::SeqCst) {
match code {
Some(code) => ws_sender
.close_with_reason(
code.into(),
reason.unwrap_or("".to_owned()),
)
.unwrap(),
None => ws_sender.close(CloseCode::Status).unwrap(),
};
}
},
}
}
});
if let Err(e) = ws.run() {
debug!("Failed to run WebSocket: {:?}", e);
let _ = resource_event_sender.send(WebSocketNetworkEvent::Fail);
};
})
.expect("Thread spawning failed");
} |
impl<'a> Factory for Client<'a> {
type Handler = Self;
| random_line_split |
websocket_loader.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::connector::create_ssl_connector_builder;
use crate::cookie::Cookie;
use crate::fetch::methods::should_be_blocked_due_to_bad_port;
use crate::hosts::replace_host;
use crate::http_loader::HttpState;
use embedder_traits::resources::{self, Resource};
use headers_ext::Host;
use http::header::{self, HeaderMap, HeaderName, HeaderValue};
use http::uri::Authority;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use net_traits::request::{RequestInit, RequestMode};
use net_traits::{CookieSource, MessageData};
use net_traits::{WebSocketDomAction, WebSocketNetworkEvent};
use openssl::ssl::SslStream;
use servo_config::opts;
use servo_url::ServoUrl;
use std::fs;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread;
use url::Url;
use ws::util::TcpStream;
use ws::{
CloseCode, Factory, Handler, Handshake, Message, Request, Response as WsResponse, Sender,
WebSocket,
};
use ws::{Error as WebSocketError, ErrorKind as WebSocketErrorKind, Result as WebSocketResult};
/// A client for connecting to a websocket server
#[derive(Clone)]
struct Client<'a> {
origin: &'a str,
host: &'a Host,
protocols: &'a [String],
http_state: &'a Arc<HttpState>,
resource_url: &'a ServoUrl,
event_sender: &'a IpcSender<WebSocketNetworkEvent>,
protocol_in_use: Option<String>,
}
impl<'a> Factory for Client<'a> {
type Handler = Self;
fn connection_made(&mut self, _: Sender) -> Self::Handler {
self.clone()
}
fn connection_lost(&mut self, _: Self::Handler) {
let _ = self.event_sender.send(WebSocketNetworkEvent::Fail);
}
}
impl<'a> Handler for Client<'a> {
fn build_request(&mut self, url: &Url) -> WebSocketResult<Request> {
let mut req = Request::from_url(url)?;
req.headers_mut()
.push(("Origin".to_string(), self.origin.as_bytes().to_owned()));
req.headers_mut().push((
"Host".to_string(),
format!("{}", self.host).as_bytes().to_owned(),
));
for protocol in self.protocols {
req.add_protocol(protocol);
}
let mut cookie_jar = self.http_state.cookie_jar.write().unwrap();
if let Some(cookie_list) = cookie_jar.cookies_for_url(self.resource_url, CookieSource::HTTP)
{
req.headers_mut()
.push(("Cookie".into(), cookie_list.as_bytes().to_owned()))
}
Ok(req)
}
fn on_open(&mut self, shake: Handshake) -> WebSocketResult<()> {
let mut headers = HeaderMap::new();
for &(ref name, ref value) in shake.response.headers().iter() {
let name = HeaderName::from_bytes(name.as_bytes()).unwrap();
let value = HeaderValue::from_bytes(&value).unwrap();
headers.insert(name, value);
}
let mut jar = self.http_state.cookie_jar.write().unwrap();
// TODO(eijebong): Replace thise once typed headers settled on a cookie impl
for cookie in headers.get_all(header::SET_COOKIE) {
if let Ok(s) = cookie.to_str() {
if let Some(cookie) =
Cookie::from_cookie_string(s.into(), self.resource_url, CookieSource::HTTP)
{
jar.push(cookie, self.resource_url, CookieSource::HTTP);
}
}
}
let _ = self
.event_sender
.send(WebSocketNetworkEvent::ConnectionEstablished {
protocol_in_use: self.protocol_in_use.clone(),
});
Ok(())
}
fn on_message(&mut self, message: Message) -> WebSocketResult<()> {
let message = match message {
Message::Text(message) => MessageData::Text(message),
Message::Binary(message) => MessageData::Binary(message),
};
let _ = self
.event_sender
.send(WebSocketNetworkEvent::MessageReceived(message));
Ok(())
}
fn on_error(&mut self, err: WebSocketError) {
debug!("Error in WebSocket communication: {:?}", err);
let _ = self.event_sender.send(WebSocketNetworkEvent::Fail);
}
fn on_response(&mut self, res: &WsResponse) -> WebSocketResult<()> {
let protocol_in_use = res.protocol()?;
if let Some(protocol_name) = protocol_in_use {
if!self.protocols.is_empty() &&!self.protocols.iter().any(|p| protocol_name == (*p)) {
let error = WebSocketError::new(
WebSocketErrorKind::Protocol,
"Protocol in Use not in client-supplied protocol list",
);
return Err(error);
}
self.protocol_in_use = Some(protocol_name.into());
}
Ok(())
}
fn on_close(&mut self, code: CloseCode, reason: &str) {
debug!("Connection closing due to ({:?}) {}", code, reason);
let _ = self.event_sender.send(WebSocketNetworkEvent::Close(
Some(code.into()),
reason.to_owned(),
));
}
fn upgrade_ssl_client(
&mut self,
stream: TcpStream,
url: &Url,
) -> WebSocketResult<SslStream<TcpStream>> {
let certs = match opts::get().certificate_path {
Some(ref path) => fs::read_to_string(path).expect("Couldn't not find certificate file"),
None => resources::read_string(Resource::SSLCertificates),
};
let domain = self
.resource_url
.as_url()
.domain()
.ok_or(WebSocketError::new(
WebSocketErrorKind::Protocol,
format!("Unable to parse domain from {}. Needed for SSL.", url),
))?;
let connector = create_ssl_connector_builder(&certs).build();
connector
.connect(domain, stream)
.map_err(WebSocketError::from)
}
}
pub fn init(
req_init: RequestInit,
resource_event_sender: IpcSender<WebSocketNetworkEvent>,
dom_action_receiver: IpcReceiver<WebSocketDomAction>,
http_state: Arc<HttpState>,
) {
thread::Builder::new()
.name(format!("WebSocket connection to {}", req_init.url))
.spawn(move || {
let protocols = match req_init.mode {
RequestMode::WebSocket { protocols } => protocols.clone(),
_ => panic!("Received a RequestInit with a non-websocket mode in websocket_loader"),
};
let scheme = req_init.url.scheme();
let mut req_url = req_init.url.clone();
if scheme == "ws" {
req_url.as_mut_url().set_scheme("http").unwrap();
} else if scheme == "wss" {
req_url.as_mut_url().set_scheme("https").unwrap();
}
if should_be_blocked_due_to_bad_port(&req_url) {
debug!("Failed to establish a WebSocket connection: port blocked");
let _ = resource_event_sender.send(WebSocketNetworkEvent::Fail);
return;
}
let host = replace_host(req_init.url.host_str().unwrap());
let mut net_url = req_init.url.clone().into_url();
net_url.set_host(Some(&host)).unwrap();
let host = Host::from(
format!(
"{}{}",
req_init.url.host_str().unwrap(),
req_init
.url
.port_or_known_default()
.map(|v| format!(":{}", v))
.unwrap_or("".into())
)
.parse::<Authority>()
.unwrap(),
);
let client = Client {
origin: &req_init.origin.ascii_serialization(),
host: &host,
protocols: &protocols,
http_state: &http_state,
resource_url: &req_init.url,
event_sender: &resource_event_sender,
protocol_in_use: None,
};
let mut ws = WebSocket::new(client).unwrap();
if let Err(e) = ws.connect(net_url) {
debug!("Failed to establish a WebSocket connection: {:?}", e);
return;
};
let ws_sender = ws.broadcaster();
let initiated_close = Arc::new(AtomicBool::new(false));
thread::spawn(move || {
while let Ok(dom_action) = dom_action_receiver.recv() {
match dom_action {
WebSocketDomAction::SendMessage(MessageData::Text(data)) => {
ws_sender.send(Message::text(data)).unwrap();
},
WebSocketDomAction::SendMessage(MessageData::Binary(data)) => {
ws_sender.send(Message::binary(data)).unwrap();
},
WebSocketDomAction::Close(code, reason) => | ,
}
}
});
if let Err(e) = ws.run() {
debug!("Failed to run WebSocket: {:?}", e);
let _ = resource_event_sender.send(WebSocketNetworkEvent::Fail);
};
})
.expect("Thread spawning failed");
}
| {
if !initiated_close.fetch_or(true, Ordering::SeqCst) {
match code {
Some(code) => ws_sender
.close_with_reason(
code.into(),
reason.unwrap_or("".to_owned()),
)
.unwrap(),
None => ws_sender.close(CloseCode::Status).unwrap(),
};
}
} | conditional_block |
host.rs | //! The Host request header, defined in RFC 2616, Section 14.23.
use std::io::Reader;
/// A simple little thing for the host of a request
#[deriving(Clone, Eq)]
pub struct Host {
/// The name of the host that was requested
name: ~str,
/// If unspecified, assume the default port was used (80 for HTTP, 443 for HTTPS).
/// In that case, you shouldn't need to worry about it in URLs that you build, provided you
/// include the scheme.
port: Option<u16>,
}
impl ToStr for Host {
fn to_str(&self) -> ~str {
match self.port {
Some(port) => format!("{}:{}", self.name, port.to_str()),
None => self.name.clone(),
}
}
}
impl super::HeaderConvertible for Host {
fn from_stream<T: Reader>(reader: &mut super::HeaderValueByteIterator<T>) -> Option<Host> {
let s = reader.collect_to_str();
// TODO: this doesn't support IPv6 address access (e.g. "[::1]")
// Do this properly with correct authority parsing.
let mut hi = s.splitn(':', 1);
Some(Host { | None => None,
},
})
}
fn http_value(&self) -> ~str {
self.to_str()
}
} | name: hi.next().unwrap().to_owned(),
port: match hi.next() {
Some(name) => from_str::<u16>(name), | random_line_split |
host.rs | //! The Host request header, defined in RFC 2616, Section 14.23.
use std::io::Reader;
/// A simple little thing for the host of a request
#[deriving(Clone, Eq)]
pub struct | {
/// The name of the host that was requested
name: ~str,
/// If unspecified, assume the default port was used (80 for HTTP, 443 for HTTPS).
/// In that case, you shouldn't need to worry about it in URLs that you build, provided you
/// include the scheme.
port: Option<u16>,
}
impl ToStr for Host {
fn to_str(&self) -> ~str {
match self.port {
Some(port) => format!("{}:{}", self.name, port.to_str()),
None => self.name.clone(),
}
}
}
impl super::HeaderConvertible for Host {
fn from_stream<T: Reader>(reader: &mut super::HeaderValueByteIterator<T>) -> Option<Host> {
let s = reader.collect_to_str();
// TODO: this doesn't support IPv6 address access (e.g. "[::1]")
// Do this properly with correct authority parsing.
let mut hi = s.splitn(':', 1);
Some(Host {
name: hi.next().unwrap().to_owned(),
port: match hi.next() {
Some(name) => from_str::<u16>(name),
None => None,
},
})
}
fn http_value(&self) -> ~str {
self.to_str()
}
}
| Host | identifier_name |
host.rs | //! The Host request header, defined in RFC 2616, Section 14.23.
use std::io::Reader;
/// A simple little thing for the host of a request
#[deriving(Clone, Eq)]
pub struct Host {
/// The name of the host that was requested
name: ~str,
/// If unspecified, assume the default port was used (80 for HTTP, 443 for HTTPS).
/// In that case, you shouldn't need to worry about it in URLs that you build, provided you
/// include the scheme.
port: Option<u16>,
}
impl ToStr for Host {
fn to_str(&self) -> ~str {
match self.port {
Some(port) => format!("{}:{}", self.name, port.to_str()),
None => self.name.clone(),
}
}
}
impl super::HeaderConvertible for Host {
fn from_stream<T: Reader>(reader: &mut super::HeaderValueByteIterator<T>) -> Option<Host> {
let s = reader.collect_to_str();
// TODO: this doesn't support IPv6 address access (e.g. "[::1]")
// Do this properly with correct authority parsing.
let mut hi = s.splitn(':', 1);
Some(Host {
name: hi.next().unwrap().to_owned(),
port: match hi.next() {
Some(name) => from_str::<u16>(name),
None => None,
},
})
}
fn http_value(&self) -> ~str |
}
| {
self.to_str()
} | identifier_body |
lib.rs | //! Weave deltas, inspired by SCCS.
//!
//! The [SCCS](https://en.wikipedia.org/wiki/Source_Code_Control_System) revision control system is
//! one of the oldest source code management systems (1973). Although many of its concepts are
//! quite dated in these days of git, the underlying "weave" delta format it used turns out to be a
//! good way of representing multiple versions of data that differ only in parts.
//!
//! This package implements a weave-based storage of "plain text", where plain text consists of
//! lines of UTF-8 printable characters separated by a newline.
//!
//! The format is similar to SCCS, but with no constraints to keep what are relatively poor design
//! decisions from SCCS, such as putting a checksum at the top of the file, and using limited-sized
//! field for values such as the number of lines in a file, or the use of 2-digit years. However,
//! the main body of the weaved file, that which describes inserts and deletes is the same, and
//! allows us to test this version by comparing with the storage of sccs.
//!
//! Weave files are written using [`NewWeave`], which works like a regular file writer. The file
//! itself has a small amount of surrounding metadata, but is otherwise mostly just the contents of
//! the initial file.
//!
//! Adding a delta to a weave file is done with the [`DeltaWriter`]. This is also written to, as a
//! regular file, and then [`DeltaWriter::close`] method will extract a base revision and use the
//! `diff` command to write a new version of the weave. The `close` method will make several
//! temporary files in the process.
//!
//! The weave data is stored using a [`NamingConvention`], a trait that manages a related
//! collection of files, and temp files. [`SimpleNaming`] is a basic representation of this that
//! has a base name, a backup file, and some temporary files. The data in the file can be
//! compressed.
#![warn(bare_trait_objects)]
mod delta;
mod errors;
mod header;
mod naming;
mod newweave;
mod parse;
pub use crate::{
delta::DeltaWriter,
errors::{Error, Result},
header::{DeltaInfo, Header},
naming::NamingConvention,
naming::SimpleNaming,
naming::Compression,
newweave::NewWeave,
parse::{Entry, Parser, PullParser, Sink},
};
use std::{io::Write, path::PathBuf};
/// Something we can write into, that remembers its name. The writer is boxed because the writer
/// may be compressed.
pub struct WriterInfo {
name: PathBuf,
writer: Box<dyn Write>,
}
/// Read the header from a weave file.
pub fn read_header(naming: &dyn NamingConvention) -> Result<Header> {
Ok(PullParser::new(naming, 1)?.into_header())
}
/// Retrieve the last delta in the weave file. Will panic if the weave file is malformed and
/// contains no revisions.
pub fn | (naming: &dyn NamingConvention) -> Result<usize> {
let header = read_header(naming)?;
Ok(header
.deltas
.iter()
.map(|x| x.number)
.max()
.expect("at least one delta in weave file"))
}
| get_last_delta | identifier_name |
lib.rs | //! Weave deltas, inspired by SCCS.
//!
//! The [SCCS](https://en.wikipedia.org/wiki/Source_Code_Control_System) revision control system is
//! one of the oldest source code management systems (1973). Although many of its concepts are
//! quite dated in these days of git, the underlying "weave" delta format it used turns out to be a
//! good way of representing multiple versions of data that differ only in parts.
//!
//! This package implements a weave-based storage of "plain text", where plain text consists of
//! lines of UTF-8 printable characters separated by a newline.
//!
//! The format is similar to SCCS, but with no constraints to keep what are relatively poor design
//! decisions from SCCS, such as putting a checksum at the top of the file, and using limited-sized
//! field for values such as the number of lines in a file, or the use of 2-digit years. However,
//! the main body of the weaved file, that which describes inserts and deletes is the same, and
//! allows us to test this version by comparing with the storage of sccs.
//!
//! Weave files are written using [`NewWeave`], which works like a regular file writer. The file
//! itself has a small amount of surrounding metadata, but is otherwise mostly just the contents of
//! the initial file.
//!
//! Adding a delta to a weave file is done with the [`DeltaWriter`]. This is also written to, as a
//! regular file, and then [`DeltaWriter::close`] method will extract a base revision and use the
//! `diff` command to write a new version of the weave. The `close` method will make several
//! temporary files in the process.
//!
//! The weave data is stored using a [`NamingConvention`], a trait that manages a related
//! collection of files, and temp files. [`SimpleNaming`] is a basic representation of this that
//! has a base name, a backup file, and some temporary files. The data in the file can be
//! compressed.
#![warn(bare_trait_objects)]
mod delta;
mod errors;
mod header;
mod naming;
mod newweave;
mod parse;
pub use crate::{
delta::DeltaWriter,
errors::{Error, Result},
header::{DeltaInfo, Header},
naming::NamingConvention,
naming::SimpleNaming,
naming::Compression,
newweave::NewWeave,
parse::{Entry, Parser, PullParser, Sink},
};
use std::{io::Write, path::PathBuf};
/// Something we can write into, that remembers its name. The writer is boxed because the writer
/// may be compressed.
pub struct WriterInfo {
name: PathBuf,
writer: Box<dyn Write>,
}
/// Read the header from a weave file.
pub fn read_header(naming: &dyn NamingConvention) -> Result<Header> {
Ok(PullParser::new(naming, 1)?.into_header())
}
/// Retrieve the last delta in the weave file. Will panic if the weave file is malformed and
/// contains no revisions.
pub fn get_last_delta(naming: &dyn NamingConvention) -> Result<usize> | {
let header = read_header(naming)?;
Ok(header
.deltas
.iter()
.map(|x| x.number)
.max()
.expect("at least one delta in weave file"))
} | identifier_body |
|
lib.rs | //! Weave deltas, inspired by SCCS.
//!
//! The [SCCS](https://en.wikipedia.org/wiki/Source_Code_Control_System) revision control system is
//! one of the oldest source code management systems (1973). Although many of its concepts are
//! quite dated in these days of git, the underlying "weave" delta format it used turns out to be a
//! good way of representing multiple versions of data that differ only in parts.
//!
//! This package implements a weave-based storage of "plain text", where plain text consists of
//! lines of UTF-8 printable characters separated by a newline.
//!
//! The format is similar to SCCS, but with no constraints to keep what are relatively poor design
//! decisions from SCCS, such as putting a checksum at the top of the file, and using limited-sized
//! field for values such as the number of lines in a file, or the use of 2-digit years. However,
//! the main body of the weaved file, that which describes inserts and deletes is the same, and
//! allows us to test this version by comparing with the storage of sccs.
//!
//! Weave files are written using [`NewWeave`], which works like a regular file writer. The file
//! itself has a small amount of surrounding metadata, but is otherwise mostly just the contents of
//! the initial file.
//!
//! Adding a delta to a weave file is done with the [`DeltaWriter`]. This is also written to, as a
//! regular file, and then [`DeltaWriter::close`] method will extract a base revision and use the
//! `diff` command to write a new version of the weave. The `close` method will make several
//! temporary files in the process.
//!
//! The weave data is stored using a [`NamingConvention`], a trait that manages a related
//! collection of files, and temp files. [`SimpleNaming`] is a basic representation of this that
//! has a base name, a backup file, and some temporary files. The data in the file can be
//! compressed.
#![warn(bare_trait_objects)]
mod delta;
mod errors;
mod header;
mod naming;
mod newweave;
mod parse;
pub use crate::{
delta::DeltaWriter,
errors::{Error, Result},
header::{DeltaInfo, Header},
naming::NamingConvention,
naming::SimpleNaming,
naming::Compression,
newweave::NewWeave,
parse::{Entry, Parser, PullParser, Sink},
};
use std::{io::Write, path::PathBuf};
/// Something we can write into, that remembers its name. The writer is boxed because the writer
/// may be compressed.
pub struct WriterInfo {
name: PathBuf,
writer: Box<dyn Write>,
}
/// Read the header from a weave file.
pub fn read_header(naming: &dyn NamingConvention) -> Result<Header> {
Ok(PullParser::new(naming, 1)?.into_header())
}
/// Retrieve the last delta in the weave file. Will panic if the weave file is malformed and
/// contains no revisions.
pub fn get_last_delta(naming: &dyn NamingConvention) -> Result<usize> {
let header = read_header(naming)?;
Ok(header
.deltas
.iter() | .map(|x| x.number)
.max()
.expect("at least one delta in weave file"))
} | random_line_split |
|
foo.rs | #![crate_type = "cdylib"]
extern "C" {
fn observe(ptr: *const u8, len: usize);
}
macro_rules! s {
( $( $f:ident -> $t:ty );* $(;)* ) => {
$(
extern "C" {
fn $f() -> $t;
}
let s = $f().to_string();
observe(s.as_ptr(), s.len());
)*
};
}
#[no_mangle]
pub unsafe extern "C" fn foo() {
s! {
get_u8 -> u8; | get_i32 -> i32;
get_u64 -> u64;
get_i64 -> i64;
get_usize -> usize;
get_isize -> isize;
}
} | get_i8 -> i8;
get_u16 -> u16;
get_i16 -> i16;
get_u32 -> u32; | random_line_split |
foo.rs | #![crate_type = "cdylib"]
extern "C" {
fn observe(ptr: *const u8, len: usize);
}
macro_rules! s {
( $( $f:ident -> $t:ty );* $(;)* ) => {
$(
extern "C" {
fn $f() -> $t;
}
let s = $f().to_string();
observe(s.as_ptr(), s.len());
)*
};
}
#[no_mangle]
pub unsafe extern "C" fn | () {
s! {
get_u8 -> u8;
get_i8 -> i8;
get_u16 -> u16;
get_i16 -> i16;
get_u32 -> u32;
get_i32 -> i32;
get_u64 -> u64;
get_i64 -> i64;
get_usize -> usize;
get_isize -> isize;
}
}
| foo | identifier_name |
foo.rs | #![crate_type = "cdylib"]
extern "C" {
fn observe(ptr: *const u8, len: usize);
}
macro_rules! s {
( $( $f:ident -> $t:ty );* $(;)* ) => {
$(
extern "C" {
fn $f() -> $t;
}
let s = $f().to_string();
observe(s.as_ptr(), s.len());
)*
};
}
#[no_mangle]
pub unsafe extern "C" fn foo() | {
s! {
get_u8 -> u8;
get_i8 -> i8;
get_u16 -> u16;
get_i16 -> i16;
get_u32 -> u32;
get_i32 -> i32;
get_u64 -> u64;
get_i64 -> i64;
get_usize -> usize;
get_isize -> isize;
}
} | identifier_body |
|
p129.rs | //! [Problem 129](https://projecteuler.net/problem=129) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#![feature(step_by)]
#[macro_use(problem)] extern crate common;
extern crate num;
use num::Integer;
fn a(n: u64) -> u64 {
if n == 1 { return 1 }
let mut x = 1;
let mut k = 1;
loop {
x = (x * 10 + 1) % n;
k += 1;
if x == 0 {
return k;
}
}
}
fn solve() -> String {
let limit = 1000001;
(limit..).step_by(2)
.filter(|&n|!n.is_multiple_of(&5))
.find(|&n| a(n) >= limit)
.unwrap()
.to_string() | #[cfg(test)]
mod tests {
use num::Integer;
mod naive {
use num::{One, Zero, Integer, BigUint, FromPrimitive};
pub fn r(k: u64) -> BigUint {
let mut r: BigUint = Zero::zero();
let ten: BigUint = FromPrimitive::from_u64(10).unwrap();
let one: BigUint = One::one();
for _ in (0.. k) {
r = &r * &ten + &one;
}
r
}
pub fn a(n: u64) -> u64 {
let n = FromPrimitive::from_u64(n).unwrap();
(1..).find(|&k| r(k).is_multiple_of(&n))
.unwrap()
}
}
#[test]
fn naive_r() {
assert_eq!("1".to_string(), naive::r(1).to_string());
assert_eq!("11".to_string(), naive::r(2).to_string());
assert_eq!("111".to_string(), naive::r(3).to_string());
}
#[test]
fn naive_a() {
assert_eq!(6, naive::a(7));
assert_eq!(5, naive::a(41));
}
#[test]
fn cmp_with_naive() {
for n in (1..100).step_by(2) {
if n.is_multiple_of(&5) { continue; }
assert_eq!(naive::a(n), super::a(n));
}
}
#[test]
fn a() {
assert_eq!(6, super::a(7));
assert_eq!(5, super::a(41));
}
} | }
problem!("1000023", solve);
| random_line_split |
p129.rs | //! [Problem 129](https://projecteuler.net/problem=129) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#![feature(step_by)]
#[macro_use(problem)] extern crate common;
extern crate num;
use num::Integer;
fn a(n: u64) -> u64 {
if n == 1 { return 1 }
let mut x = 1;
let mut k = 1;
loop {
x = (x * 10 + 1) % n;
k += 1;
if x == 0 {
return k;
}
}
}
fn solve() -> String {
let limit = 1000001;
(limit..).step_by(2)
.filter(|&n|!n.is_multiple_of(&5))
.find(|&n| a(n) >= limit)
.unwrap()
.to_string()
}
problem!("1000023", solve);
#[cfg(test)]
mod tests {
use num::Integer;
mod naive {
use num::{One, Zero, Integer, BigUint, FromPrimitive};
pub fn r(k: u64) -> BigUint {
let mut r: BigUint = Zero::zero();
let ten: BigUint = FromPrimitive::from_u64(10).unwrap();
let one: BigUint = One::one();
for _ in (0.. k) {
r = &r * &ten + &one;
}
r
}
pub fn a(n: u64) -> u64 {
let n = FromPrimitive::from_u64(n).unwrap();
(1..).find(|&k| r(k).is_multiple_of(&n))
.unwrap()
}
}
#[test]
fn naive_r() {
assert_eq!("1".to_string(), naive::r(1).to_string());
assert_eq!("11".to_string(), naive::r(2).to_string());
assert_eq!("111".to_string(), naive::r(3).to_string());
}
#[test]
fn naive_a() {
assert_eq!(6, naive::a(7));
assert_eq!(5, naive::a(41));
}
#[test]
fn cmp_with_naive() {
for n in (1..100).step_by(2) {
if n.is_multiple_of(&5) |
assert_eq!(naive::a(n), super::a(n));
}
}
#[test]
fn a() {
assert_eq!(6, super::a(7));
assert_eq!(5, super::a(41));
}
}
| { continue; } | conditional_block |
p129.rs | //! [Problem 129](https://projecteuler.net/problem=129) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#![feature(step_by)]
#[macro_use(problem)] extern crate common;
extern crate num;
use num::Integer;
fn a(n: u64) -> u64 {
if n == 1 { return 1 }
let mut x = 1;
let mut k = 1;
loop {
x = (x * 10 + 1) % n;
k += 1;
if x == 0 {
return k;
}
}
}
fn solve() -> String {
let limit = 1000001;
(limit..).step_by(2)
.filter(|&n|!n.is_multiple_of(&5))
.find(|&n| a(n) >= limit)
.unwrap()
.to_string()
}
problem!("1000023", solve);
#[cfg(test)]
mod tests {
use num::Integer;
mod naive {
use num::{One, Zero, Integer, BigUint, FromPrimitive};
pub fn r(k: u64) -> BigUint {
let mut r: BigUint = Zero::zero();
let ten: BigUint = FromPrimitive::from_u64(10).unwrap();
let one: BigUint = One::one();
for _ in (0.. k) {
r = &r * &ten + &one;
}
r
}
pub fn a(n: u64) -> u64 {
let n = FromPrimitive::from_u64(n).unwrap();
(1..).find(|&k| r(k).is_multiple_of(&n))
.unwrap()
}
}
#[test]
fn naive_r() {
assert_eq!("1".to_string(), naive::r(1).to_string());
assert_eq!("11".to_string(), naive::r(2).to_string());
assert_eq!("111".to_string(), naive::r(3).to_string());
}
#[test]
fn naive_a() {
assert_eq!(6, naive::a(7));
assert_eq!(5, naive::a(41));
}
#[test]
fn cmp_with_naive() {
for n in (1..100).step_by(2) {
if n.is_multiple_of(&5) { continue; }
assert_eq!(naive::a(n), super::a(n));
}
}
#[test]
fn a() |
}
| {
assert_eq!(6, super::a(7));
assert_eq!(5, super::a(41));
} | identifier_body |
p129.rs | //! [Problem 129](https://projecteuler.net/problem=129) solver.
#![warn(bad_style,
unused, unused_extern_crates, unused_import_braces,
unused_qualifications, unused_results)]
#![feature(step_by)]
#[macro_use(problem)] extern crate common;
extern crate num;
use num::Integer;
fn a(n: u64) -> u64 {
if n == 1 { return 1 }
let mut x = 1;
let mut k = 1;
loop {
x = (x * 10 + 1) % n;
k += 1;
if x == 0 {
return k;
}
}
}
fn | () -> String {
let limit = 1000001;
(limit..).step_by(2)
.filter(|&n|!n.is_multiple_of(&5))
.find(|&n| a(n) >= limit)
.unwrap()
.to_string()
}
problem!("1000023", solve);
#[cfg(test)]
mod tests {
use num::Integer;
mod naive {
use num::{One, Zero, Integer, BigUint, FromPrimitive};
pub fn r(k: u64) -> BigUint {
let mut r: BigUint = Zero::zero();
let ten: BigUint = FromPrimitive::from_u64(10).unwrap();
let one: BigUint = One::one();
for _ in (0.. k) {
r = &r * &ten + &one;
}
r
}
pub fn a(n: u64) -> u64 {
let n = FromPrimitive::from_u64(n).unwrap();
(1..).find(|&k| r(k).is_multiple_of(&n))
.unwrap()
}
}
#[test]
fn naive_r() {
assert_eq!("1".to_string(), naive::r(1).to_string());
assert_eq!("11".to_string(), naive::r(2).to_string());
assert_eq!("111".to_string(), naive::r(3).to_string());
}
#[test]
fn naive_a() {
assert_eq!(6, naive::a(7));
assert_eq!(5, naive::a(41));
}
#[test]
fn cmp_with_naive() {
for n in (1..100).step_by(2) {
if n.is_multiple_of(&5) { continue; }
assert_eq!(naive::a(n), super::a(n));
}
}
#[test]
fn a() {
assert_eq!(6, super::a(7));
assert_eq!(5, super::a(41));
}
}
| solve | identifier_name |
app_uart.rs | #![feature(phase)]
#![crate_type="staticlib"]
#![no_std]
extern crate core;
extern crate zinc;
#[phase(plugin)] extern crate macro_platformtree;
platformtree!(
lpc17xx@mcu {
clock {
source = "main-oscillator";
source_frequency = 12_000_000;
pll {
m = 50;
n = 3;
divisor = 4;
}
}
timer {
timer@1 {
counter = 25;
divisor = 4;
}
}
uart {
uart@0 {
baud_rate = 115200;
mode = "8N1";
tx = &uart_tx;
rx = &uart_rx;
}
}
gpio {
0 {
uart_tx@2;
uart_rx@3;
}
1 {
led4@23 { direction = "out"; }
}
}
}
os {
single_task {
loop = "run";
args {
timer = &timer;
txled = &led4;
uart = &uart;
}
}
}
)
#[no_split_stack]
fn run(args: &pt::run_args) | {
use zinc::drivers::chario::CharIO;
use zinc::hal::timer::Timer;
use zinc::hal::pin::GPIO;
args.uart.puts("Hello, world\n");
let mut i = 0;
loop {
args.txled.set_high();
args.uart.puts("Waiting for ");
args.uart.puti(i);
args.uart.puts(" seconds...\n");
i += 1;
args.txled.set_low();
args.timer.wait(1);
}
} | identifier_body |
|
app_uart.rs | #![feature(phase)]
#![crate_type="staticlib"]
#![no_std]
extern crate core;
extern crate zinc;
#[phase(plugin)] extern crate macro_platformtree;
platformtree!(
lpc17xx@mcu {
clock {
source = "main-oscillator";
source_frequency = 12_000_000;
pll {
m = 50;
n = 3;
divisor = 4;
}
}
timer {
timer@1 {
counter = 25;
divisor = 4;
}
}
uart {
uart@0 {
baud_rate = 115200;
mode = "8N1";
tx = &uart_tx;
rx = &uart_rx;
}
}
gpio {
0 {
uart_tx@2;
uart_rx@3;
}
1 {
led4@23 { direction = "out"; }
}
}
}
os {
single_task {
loop = "run";
args {
timer = &timer;
txled = &led4;
uart = &uart;
}
}
}
)
#[no_split_stack]
fn | (args: &pt::run_args) {
use zinc::drivers::chario::CharIO;
use zinc::hal::timer::Timer;
use zinc::hal::pin::GPIO;
args.uart.puts("Hello, world\n");
let mut i = 0;
loop {
args.txled.set_high();
args.uart.puts("Waiting for ");
args.uart.puti(i);
args.uart.puts(" seconds...\n");
i += 1;
args.txled.set_low();
args.timer.wait(1);
}
}
| run | identifier_name |
app_uart.rs | #![feature(phase)]
#![crate_type="staticlib"]
#![no_std]
extern crate core;
extern crate zinc; |
platformtree!(
lpc17xx@mcu {
clock {
source = "main-oscillator";
source_frequency = 12_000_000;
pll {
m = 50;
n = 3;
divisor = 4;
}
}
timer {
timer@1 {
counter = 25;
divisor = 4;
}
}
uart {
uart@0 {
baud_rate = 115200;
mode = "8N1";
tx = &uart_tx;
rx = &uart_rx;
}
}
gpio {
0 {
uart_tx@2;
uart_rx@3;
}
1 {
led4@23 { direction = "out"; }
}
}
}
os {
single_task {
loop = "run";
args {
timer = &timer;
txled = &led4;
uart = &uart;
}
}
}
)
#[no_split_stack]
fn run(args: &pt::run_args) {
use zinc::drivers::chario::CharIO;
use zinc::hal::timer::Timer;
use zinc::hal::pin::GPIO;
args.uart.puts("Hello, world\n");
let mut i = 0;
loop {
args.txled.set_high();
args.uart.puts("Waiting for ");
args.uart.puti(i);
args.uart.puts(" seconds...\n");
i += 1;
args.txled.set_low();
args.timer.wait(1);
}
} | #[phase(plugin)] extern crate macro_platformtree; | random_line_split |
client.rs | //
// Copyright 2022 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use crate::proto::{
streaming_session_client::StreamingSessionClient, StreamingRequest, StreamingResponse,
};
use anyhow::Context;
use oak_remote_attestation::handshaker::{
AttestationBehavior, ClientHandshaker, Encryptor, ServerIdentityVerifier,
};
use tokio::sync::mpsc::Sender;
use tonic::{transport::Channel, Request, Streaming};
const MESSAGE_BUFFER_SIZE: usize = 1;
/// Convenience structure for sending requests to and receiving responses from the server.
struct GrpcChannel {
sender: Sender<StreamingRequest>,
response_stream: Streaming<StreamingResponse>,
}
impl GrpcChannel {
async fn create(uri: &str) -> anyhow::Result<Self> {
let channel = Channel::from_shared(uri.to_string())
.context("Couldn't create gRPC channel")?
.connect()
.await
.context("Couldn't connect via gRPC channel")?;
let mut client = StreamingSessionClient::new(channel);
let (sender, mut receiver) = tokio::sync::mpsc::channel(MESSAGE_BUFFER_SIZE);
let request_stream = async_stream::stream! {
while let Some(message) = receiver.recv().await {
yield message;
}
};
let response = client
.stream(Request::new(request_stream))
.await
.context("Couldn't send request")?;
let response_stream = response.into_inner();
Ok(Self {
sender,
response_stream,
})
}
async fn send(&mut self, request: StreamingRequest) -> anyhow::Result<()> {
self.sender
.send(request)
.await
.context("Couldn't send request")
}
async fn receive(&mut self) -> anyhow::Result<Option<StreamingResponse>> {
self.response_stream
.message()
.await
.context("Couldn't receive response")
}
}
/// gRPC Attestation Service client implementation.
pub struct AttestationClient {
channel: GrpcChannel,
encryptor: Encryptor,
}
impl AttestationClient {
pub async fn | (
uri: &str,
expected_tee_measurement: &[u8],
server_verifier: ServerIdentityVerifier,
) -> anyhow::Result<Self> {
let mut channel = GrpcChannel::create(uri)
.await
.context("Couldn't create gRPC client")?;
let encryptor =
Self::set_up_tunnel(&mut channel, expected_tee_measurement, server_verifier)
.await
.context("Couldn't attest server")?;
Ok(Self { channel, encryptor })
}
/// Performs a key exchange handshake and validates the server attestation information to set up
/// an end-to-end encrypted, attested tunnel to the server.
async fn set_up_tunnel(
channel: &mut GrpcChannel,
expected_tee_measurement: &[u8],
server_verifier: ServerIdentityVerifier,
) -> anyhow::Result<Encryptor> {
let mut handshaker = ClientHandshaker::new(
AttestationBehavior::create_peer_attestation(expected_tee_measurement),
server_verifier,
);
let client_hello = handshaker
.create_client_hello()
.context("Couldn't create client hello message")?;
channel
.send(StreamingRequest { body: client_hello })
.await
.context("Couldn't send client hello message")?;
while!handshaker.is_completed() {
let incoming_message = channel
.receive()
.await
.context("Couldn't receive handshake message")?
.context("Stream stopped preemptively")?;
let outgoing_message = handshaker
.next_step(&incoming_message.body)
.context("Couldn't process handshake message")?;
if let Some(outgoing_message) = outgoing_message {
channel
.send(StreamingRequest {
body: outgoing_message,
})
.await
.context("Couldn't send handshake message")?;
}
}
let encryptor = handshaker
.get_encryptor()
.context("Couldn't get encryptor")?;
Ok(encryptor)
}
/// Sends data encrypted by the [`Encryptor`] to the server and decrypts the server responses.
/// Returns `Ok(None)` to indicate that the corresponding gRPC stream has ended.
pub async fn send(
&mut self,
request: oak_functions_abi::proto::Request,
) -> anyhow::Result<Option<Vec<u8>>> {
let encrypted_request = self
.encryptor
.encrypt(&request.body)
.context("Couldn't encrypt request")?;
self.channel
.send(StreamingRequest {
body: encrypted_request,
})
.await
.context("Couldn't send encrypted data request")?;
let encrypted_response = self
.channel
.receive()
.await
.context("Couldn't send encrypted data request")?;
let response = match encrypted_response {
Some(encrypted_response) => {
let response = self
.encryptor
.decrypt(&encrypted_response.body)
.context("Couldn't decrypt response")?;
Some(response)
}
None => None,
};
Ok(response)
}
}
| create | identifier_name |
client.rs | //
// Copyright 2022 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use crate::proto::{
streaming_session_client::StreamingSessionClient, StreamingRequest, StreamingResponse,
};
use anyhow::Context;
use oak_remote_attestation::handshaker::{
AttestationBehavior, ClientHandshaker, Encryptor, ServerIdentityVerifier,
};
use tokio::sync::mpsc::Sender;
use tonic::{transport::Channel, Request, Streaming};
const MESSAGE_BUFFER_SIZE: usize = 1;
/// Convenience structure for sending requests to and receiving responses from the server.
struct GrpcChannel {
sender: Sender<StreamingRequest>,
response_stream: Streaming<StreamingResponse>,
}
impl GrpcChannel {
async fn create(uri: &str) -> anyhow::Result<Self> {
let channel = Channel::from_shared(uri.to_string())
.context("Couldn't create gRPC channel")?
.connect()
.await
.context("Couldn't connect via gRPC channel")?;
let mut client = StreamingSessionClient::new(channel);
let (sender, mut receiver) = tokio::sync::mpsc::channel(MESSAGE_BUFFER_SIZE);
let request_stream = async_stream::stream! {
while let Some(message) = receiver.recv().await {
yield message;
}
};
let response = client
.stream(Request::new(request_stream))
.await
.context("Couldn't send request")?;
let response_stream = response.into_inner();
Ok(Self {
sender,
response_stream,
})
}
async fn send(&mut self, request: StreamingRequest) -> anyhow::Result<()> {
self.sender
.send(request)
.await | async fn receive(&mut self) -> anyhow::Result<Option<StreamingResponse>> {
self.response_stream
.message()
.await
.context("Couldn't receive response")
}
}
/// gRPC Attestation Service client implementation.
pub struct AttestationClient {
channel: GrpcChannel,
encryptor: Encryptor,
}
impl AttestationClient {
pub async fn create(
uri: &str,
expected_tee_measurement: &[u8],
server_verifier: ServerIdentityVerifier,
) -> anyhow::Result<Self> {
let mut channel = GrpcChannel::create(uri)
.await
.context("Couldn't create gRPC client")?;
let encryptor =
Self::set_up_tunnel(&mut channel, expected_tee_measurement, server_verifier)
.await
.context("Couldn't attest server")?;
Ok(Self { channel, encryptor })
}
/// Performs a key exchange handshake and validates the server attestation information to set up
/// an end-to-end encrypted, attested tunnel to the server.
async fn set_up_tunnel(
channel: &mut GrpcChannel,
expected_tee_measurement: &[u8],
server_verifier: ServerIdentityVerifier,
) -> anyhow::Result<Encryptor> {
let mut handshaker = ClientHandshaker::new(
AttestationBehavior::create_peer_attestation(expected_tee_measurement),
server_verifier,
);
let client_hello = handshaker
.create_client_hello()
.context("Couldn't create client hello message")?;
channel
.send(StreamingRequest { body: client_hello })
.await
.context("Couldn't send client hello message")?;
while!handshaker.is_completed() {
let incoming_message = channel
.receive()
.await
.context("Couldn't receive handshake message")?
.context("Stream stopped preemptively")?;
let outgoing_message = handshaker
.next_step(&incoming_message.body)
.context("Couldn't process handshake message")?;
if let Some(outgoing_message) = outgoing_message {
channel
.send(StreamingRequest {
body: outgoing_message,
})
.await
.context("Couldn't send handshake message")?;
}
}
let encryptor = handshaker
.get_encryptor()
.context("Couldn't get encryptor")?;
Ok(encryptor)
}
/// Sends data encrypted by the [`Encryptor`] to the server and decrypts the server responses.
/// Returns `Ok(None)` to indicate that the corresponding gRPC stream has ended.
pub async fn send(
&mut self,
request: oak_functions_abi::proto::Request,
) -> anyhow::Result<Option<Vec<u8>>> {
let encrypted_request = self
.encryptor
.encrypt(&request.body)
.context("Couldn't encrypt request")?;
self.channel
.send(StreamingRequest {
body: encrypted_request,
})
.await
.context("Couldn't send encrypted data request")?;
let encrypted_response = self
.channel
.receive()
.await
.context("Couldn't send encrypted data request")?;
let response = match encrypted_response {
Some(encrypted_response) => {
let response = self
.encryptor
.decrypt(&encrypted_response.body)
.context("Couldn't decrypt response")?;
Some(response)
}
None => None,
};
Ok(response)
}
} | .context("Couldn't send request")
}
| random_line_split |
client.rs | //
// Copyright 2022 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use crate::proto::{
streaming_session_client::StreamingSessionClient, StreamingRequest, StreamingResponse,
};
use anyhow::Context;
use oak_remote_attestation::handshaker::{
AttestationBehavior, ClientHandshaker, Encryptor, ServerIdentityVerifier,
};
use tokio::sync::mpsc::Sender;
use tonic::{transport::Channel, Request, Streaming};
const MESSAGE_BUFFER_SIZE: usize = 1;
/// Convenience structure for sending requests to and receiving responses from the server.
struct GrpcChannel {
sender: Sender<StreamingRequest>,
response_stream: Streaming<StreamingResponse>,
}
impl GrpcChannel {
async fn create(uri: &str) -> anyhow::Result<Self> {
let channel = Channel::from_shared(uri.to_string())
.context("Couldn't create gRPC channel")?
.connect()
.await
.context("Couldn't connect via gRPC channel")?;
let mut client = StreamingSessionClient::new(channel);
let (sender, mut receiver) = tokio::sync::mpsc::channel(MESSAGE_BUFFER_SIZE);
let request_stream = async_stream::stream! {
while let Some(message) = receiver.recv().await {
yield message;
}
};
let response = client
.stream(Request::new(request_stream))
.await
.context("Couldn't send request")?;
let response_stream = response.into_inner();
Ok(Self {
sender,
response_stream,
})
}
async fn send(&mut self, request: StreamingRequest) -> anyhow::Result<()> {
self.sender
.send(request)
.await
.context("Couldn't send request")
}
async fn receive(&mut self) -> anyhow::Result<Option<StreamingResponse>> {
self.response_stream
.message()
.await
.context("Couldn't receive response")
}
}
/// gRPC Attestation Service client implementation.
pub struct AttestationClient {
channel: GrpcChannel,
encryptor: Encryptor,
}
impl AttestationClient {
pub async fn create(
uri: &str,
expected_tee_measurement: &[u8],
server_verifier: ServerIdentityVerifier,
) -> anyhow::Result<Self> {
let mut channel = GrpcChannel::create(uri)
.await
.context("Couldn't create gRPC client")?;
let encryptor =
Self::set_up_tunnel(&mut channel, expected_tee_measurement, server_verifier)
.await
.context("Couldn't attest server")?;
Ok(Self { channel, encryptor })
}
/// Performs a key exchange handshake and validates the server attestation information to set up
/// an end-to-end encrypted, attested tunnel to the server.
async fn set_up_tunnel(
channel: &mut GrpcChannel,
expected_tee_measurement: &[u8],
server_verifier: ServerIdentityVerifier,
) -> anyhow::Result<Encryptor> | let outgoing_message = handshaker
.next_step(&incoming_message.body)
.context("Couldn't process handshake message")?;
if let Some(outgoing_message) = outgoing_message {
channel
.send(StreamingRequest {
body: outgoing_message,
})
.await
.context("Couldn't send handshake message")?;
}
}
let encryptor = handshaker
.get_encryptor()
.context("Couldn't get encryptor")?;
Ok(encryptor)
}
/// Sends data encrypted by the [`Encryptor`] to the server and decrypts the server responses.
/// Returns `Ok(None)` to indicate that the corresponding gRPC stream has ended.
pub async fn send(
&mut self,
request: oak_functions_abi::proto::Request,
) -> anyhow::Result<Option<Vec<u8>>> {
let encrypted_request = self
.encryptor
.encrypt(&request.body)
.context("Couldn't encrypt request")?;
self.channel
.send(StreamingRequest {
body: encrypted_request,
})
.await
.context("Couldn't send encrypted data request")?;
let encrypted_response = self
.channel
.receive()
.await
.context("Couldn't send encrypted data request")?;
let response = match encrypted_response {
Some(encrypted_response) => {
let response = self
.encryptor
.decrypt(&encrypted_response.body)
.context("Couldn't decrypt response")?;
Some(response)
}
None => None,
};
Ok(response)
}
}
| {
let mut handshaker = ClientHandshaker::new(
AttestationBehavior::create_peer_attestation(expected_tee_measurement),
server_verifier,
);
let client_hello = handshaker
.create_client_hello()
.context("Couldn't create client hello message")?;
channel
.send(StreamingRequest { body: client_hello })
.await
.context("Couldn't send client hello message")?;
while !handshaker.is_completed() {
let incoming_message = channel
.receive()
.await
.context("Couldn't receive handshake message")?
.context("Stream stopped preemptively")?;
| identifier_body |
client.rs | //
// Copyright 2022 The Project Oak Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use crate::proto::{
streaming_session_client::StreamingSessionClient, StreamingRequest, StreamingResponse,
};
use anyhow::Context;
use oak_remote_attestation::handshaker::{
AttestationBehavior, ClientHandshaker, Encryptor, ServerIdentityVerifier,
};
use tokio::sync::mpsc::Sender;
use tonic::{transport::Channel, Request, Streaming};
const MESSAGE_BUFFER_SIZE: usize = 1;
/// Convenience structure for sending requests to and receiving responses from the server.
struct GrpcChannel {
sender: Sender<StreamingRequest>,
response_stream: Streaming<StreamingResponse>,
}
impl GrpcChannel {
async fn create(uri: &str) -> anyhow::Result<Self> {
let channel = Channel::from_shared(uri.to_string())
.context("Couldn't create gRPC channel")?
.connect()
.await
.context("Couldn't connect via gRPC channel")?;
let mut client = StreamingSessionClient::new(channel);
let (sender, mut receiver) = tokio::sync::mpsc::channel(MESSAGE_BUFFER_SIZE);
let request_stream = async_stream::stream! {
while let Some(message) = receiver.recv().await {
yield message;
}
};
let response = client
.stream(Request::new(request_stream))
.await
.context("Couldn't send request")?;
let response_stream = response.into_inner();
Ok(Self {
sender,
response_stream,
})
}
async fn send(&mut self, request: StreamingRequest) -> anyhow::Result<()> {
self.sender
.send(request)
.await
.context("Couldn't send request")
}
async fn receive(&mut self) -> anyhow::Result<Option<StreamingResponse>> {
self.response_stream
.message()
.await
.context("Couldn't receive response")
}
}
/// gRPC Attestation Service client implementation.
pub struct AttestationClient {
channel: GrpcChannel,
encryptor: Encryptor,
}
impl AttestationClient {
pub async fn create(
uri: &str,
expected_tee_measurement: &[u8],
server_verifier: ServerIdentityVerifier,
) -> anyhow::Result<Self> {
let mut channel = GrpcChannel::create(uri)
.await
.context("Couldn't create gRPC client")?;
let encryptor =
Self::set_up_tunnel(&mut channel, expected_tee_measurement, server_verifier)
.await
.context("Couldn't attest server")?;
Ok(Self { channel, encryptor })
}
/// Performs a key exchange handshake and validates the server attestation information to set up
/// an end-to-end encrypted, attested tunnel to the server.
async fn set_up_tunnel(
channel: &mut GrpcChannel,
expected_tee_measurement: &[u8],
server_verifier: ServerIdentityVerifier,
) -> anyhow::Result<Encryptor> {
let mut handshaker = ClientHandshaker::new(
AttestationBehavior::create_peer_attestation(expected_tee_measurement),
server_verifier,
);
let client_hello = handshaker
.create_client_hello()
.context("Couldn't create client hello message")?;
channel
.send(StreamingRequest { body: client_hello })
.await
.context("Couldn't send client hello message")?;
while!handshaker.is_completed() {
let incoming_message = channel
.receive()
.await
.context("Couldn't receive handshake message")?
.context("Stream stopped preemptively")?;
let outgoing_message = handshaker
.next_step(&incoming_message.body)
.context("Couldn't process handshake message")?;
if let Some(outgoing_message) = outgoing_message |
}
let encryptor = handshaker
.get_encryptor()
.context("Couldn't get encryptor")?;
Ok(encryptor)
}
/// Sends data encrypted by the [`Encryptor`] to the server and decrypts the server responses.
/// Returns `Ok(None)` to indicate that the corresponding gRPC stream has ended.
pub async fn send(
&mut self,
request: oak_functions_abi::proto::Request,
) -> anyhow::Result<Option<Vec<u8>>> {
let encrypted_request = self
.encryptor
.encrypt(&request.body)
.context("Couldn't encrypt request")?;
self.channel
.send(StreamingRequest {
body: encrypted_request,
})
.await
.context("Couldn't send encrypted data request")?;
let encrypted_response = self
.channel
.receive()
.await
.context("Couldn't send encrypted data request")?;
let response = match encrypted_response {
Some(encrypted_response) => {
let response = self
.encryptor
.decrypt(&encrypted_response.body)
.context("Couldn't decrypt response")?;
Some(response)
}
None => None,
};
Ok(response)
}
}
| {
channel
.send(StreamingRequest {
body: outgoing_message,
})
.await
.context("Couldn't send handshake message")?;
} | conditional_block |
get_outgoing.rs | use chrono::{UTC, Date, Datelike};
use std::str::FromStr; // Use of #from_str.
use api::client::{TellerClient, ApiServiceResult, Transaction};
use api::client::parse_utc_date_from_transaction;
use api::inform::Money;
pub trait GetOutgoing {
fn get_outgoing(&self, account_id: &str, for_month: &Date<UTC>) -> ApiServiceResult<Money>;
}
impl<'a> GetOutgoing for TellerClient<'a> {
fn get_outgoing(&self, account_id: &str, for_month: &Date<UTC>) -> ApiServiceResult<Money> {
let account = try!(self.get_account(&account_id));
let currency = account.currency;
let from = for_month.with_day(1).unwrap();
let to = if from.month() < 12 {
from.with_month(from.month() + 1).unwrap()
} else {
from.with_year(from.year() + 1).unwrap().with_month(1).unwrap()
};
let transactions: Vec<Transaction> = self.raw_transactions(&account_id, 250, 1)
.unwrap_or(vec![])
.into_iter()
.filter(|t| {
let transaction_date =
parse_utc_date_from_transaction(&t);
from <= transaction_date && transaction_date <= to
})
.collect();
let from_float_string_to_cent_integer = |t: &Transaction| {
(f64::from_str(&t.amount).unwrap() * 100f64).round() as i64
};
let from_cent_integer_to_float_string = |amount: i64| format!("{:.2}", amount as f64 / 100f64);
let outgoing = transactions.iter()
.map(from_float_string_to_cent_integer)
.filter(|ci| *ci < 0)
.fold(0i64, |sum, v| sum + v);
Ok(Money::new(from_cent_integer_to_float_string(outgoing.abs()), currency))
}
}
#[cfg(test)]
mod tests {
use api::client::{TellerClient, generate_utc_date_from_date_str};
use super::GetOutgoing;
use hyper;
mock_connector_in_order!(GetAccountFollowedByGetTransactions {
include_str!("../mocks/get-account.http")
include_str!("../mocks/get-transactions.http")
});
#[test]
fn can_get_outgoing() {
let c = hyper::client::Client::with_connector(GetAccountFollowedByGetTransactions::default());
let teller = TellerClient::new_with_hyper_client("fake-auth-token", c);
let current_month = generate_utc_date_from_date_str("2016-01-01");
let money = teller.get_outgoing("123", ¤t_month).unwrap();
assert_eq!("55.00 GBP", money.get_balance_for_display(&false)); |
} | } | random_line_split |
get_outgoing.rs | use chrono::{UTC, Date, Datelike};
use std::str::FromStr; // Use of #from_str.
use api::client::{TellerClient, ApiServiceResult, Transaction};
use api::client::parse_utc_date_from_transaction;
use api::inform::Money;
pub trait GetOutgoing {
fn get_outgoing(&self, account_id: &str, for_month: &Date<UTC>) -> ApiServiceResult<Money>;
}
impl<'a> GetOutgoing for TellerClient<'a> {
fn get_outgoing(&self, account_id: &str, for_month: &Date<UTC>) -> ApiServiceResult<Money> | let from_float_string_to_cent_integer = |t: &Transaction| {
(f64::from_str(&t.amount).unwrap() * 100f64).round() as i64
};
let from_cent_integer_to_float_string = |amount: i64| format!("{:.2}", amount as f64 / 100f64);
let outgoing = transactions.iter()
.map(from_float_string_to_cent_integer)
.filter(|ci| *ci < 0)
.fold(0i64, |sum, v| sum + v);
Ok(Money::new(from_cent_integer_to_float_string(outgoing.abs()), currency))
}
}
#[cfg(test)]
mod tests {
use api::client::{TellerClient, generate_utc_date_from_date_str};
use super::GetOutgoing;
use hyper;
mock_connector_in_order!(GetAccountFollowedByGetTransactions {
include_str!("../mocks/get-account.http")
include_str!("../mocks/get-transactions.http")
});
#[test]
fn can_get_outgoing() {
let c = hyper::client::Client::with_connector(GetAccountFollowedByGetTransactions::default());
let teller = TellerClient::new_with_hyper_client("fake-auth-token", c);
let current_month = generate_utc_date_from_date_str("2016-01-01");
let money = teller.get_outgoing("123", ¤t_month).unwrap();
assert_eq!("55.00 GBP", money.get_balance_for_display(&false));
}
}
| {
let account = try!(self.get_account(&account_id));
let currency = account.currency;
let from = for_month.with_day(1).unwrap();
let to = if from.month() < 12 {
from.with_month(from.month() + 1).unwrap()
} else {
from.with_year(from.year() + 1).unwrap().with_month(1).unwrap()
};
let transactions: Vec<Transaction> = self.raw_transactions(&account_id, 250, 1)
.unwrap_or(vec![])
.into_iter()
.filter(|t| {
let transaction_date =
parse_utc_date_from_transaction(&t);
from <= transaction_date && transaction_date <= to
})
.collect();
| identifier_body |
get_outgoing.rs | use chrono::{UTC, Date, Datelike};
use std::str::FromStr; // Use of #from_str.
use api::client::{TellerClient, ApiServiceResult, Transaction};
use api::client::parse_utc_date_from_transaction;
use api::inform::Money;
pub trait GetOutgoing {
fn get_outgoing(&self, account_id: &str, for_month: &Date<UTC>) -> ApiServiceResult<Money>;
}
impl<'a> GetOutgoing for TellerClient<'a> {
fn | (&self, account_id: &str, for_month: &Date<UTC>) -> ApiServiceResult<Money> {
let account = try!(self.get_account(&account_id));
let currency = account.currency;
let from = for_month.with_day(1).unwrap();
let to = if from.month() < 12 {
from.with_month(from.month() + 1).unwrap()
} else {
from.with_year(from.year() + 1).unwrap().with_month(1).unwrap()
};
let transactions: Vec<Transaction> = self.raw_transactions(&account_id, 250, 1)
.unwrap_or(vec![])
.into_iter()
.filter(|t| {
let transaction_date =
parse_utc_date_from_transaction(&t);
from <= transaction_date && transaction_date <= to
})
.collect();
let from_float_string_to_cent_integer = |t: &Transaction| {
(f64::from_str(&t.amount).unwrap() * 100f64).round() as i64
};
let from_cent_integer_to_float_string = |amount: i64| format!("{:.2}", amount as f64 / 100f64);
let outgoing = transactions.iter()
.map(from_float_string_to_cent_integer)
.filter(|ci| *ci < 0)
.fold(0i64, |sum, v| sum + v);
Ok(Money::new(from_cent_integer_to_float_string(outgoing.abs()), currency))
}
}
#[cfg(test)]
mod tests {
use api::client::{TellerClient, generate_utc_date_from_date_str};
use super::GetOutgoing;
use hyper;
mock_connector_in_order!(GetAccountFollowedByGetTransactions {
include_str!("../mocks/get-account.http")
include_str!("../mocks/get-transactions.http")
});
#[test]
fn can_get_outgoing() {
let c = hyper::client::Client::with_connector(GetAccountFollowedByGetTransactions::default());
let teller = TellerClient::new_with_hyper_client("fake-auth-token", c);
let current_month = generate_utc_date_from_date_str("2016-01-01");
let money = teller.get_outgoing("123", ¤t_month).unwrap();
assert_eq!("55.00 GBP", money.get_balance_for_display(&false));
}
}
| get_outgoing | identifier_name |
get_outgoing.rs | use chrono::{UTC, Date, Datelike};
use std::str::FromStr; // Use of #from_str.
use api::client::{TellerClient, ApiServiceResult, Transaction};
use api::client::parse_utc_date_from_transaction;
use api::inform::Money;
pub trait GetOutgoing {
fn get_outgoing(&self, account_id: &str, for_month: &Date<UTC>) -> ApiServiceResult<Money>;
}
impl<'a> GetOutgoing for TellerClient<'a> {
fn get_outgoing(&self, account_id: &str, for_month: &Date<UTC>) -> ApiServiceResult<Money> {
let account = try!(self.get_account(&account_id));
let currency = account.currency;
let from = for_month.with_day(1).unwrap();
let to = if from.month() < 12 {
from.with_month(from.month() + 1).unwrap()
} else | ;
let transactions: Vec<Transaction> = self.raw_transactions(&account_id, 250, 1)
.unwrap_or(vec![])
.into_iter()
.filter(|t| {
let transaction_date =
parse_utc_date_from_transaction(&t);
from <= transaction_date && transaction_date <= to
})
.collect();
let from_float_string_to_cent_integer = |t: &Transaction| {
(f64::from_str(&t.amount).unwrap() * 100f64).round() as i64
};
let from_cent_integer_to_float_string = |amount: i64| format!("{:.2}", amount as f64 / 100f64);
let outgoing = transactions.iter()
.map(from_float_string_to_cent_integer)
.filter(|ci| *ci < 0)
.fold(0i64, |sum, v| sum + v);
Ok(Money::new(from_cent_integer_to_float_string(outgoing.abs()), currency))
}
}
#[cfg(test)]
mod tests {
use api::client::{TellerClient, generate_utc_date_from_date_str};
use super::GetOutgoing;
use hyper;
mock_connector_in_order!(GetAccountFollowedByGetTransactions {
include_str!("../mocks/get-account.http")
include_str!("../mocks/get-transactions.http")
});
#[test]
fn can_get_outgoing() {
let c = hyper::client::Client::with_connector(GetAccountFollowedByGetTransactions::default());
let teller = TellerClient::new_with_hyper_client("fake-auth-token", c);
let current_month = generate_utc_date_from_date_str("2016-01-01");
let money = teller.get_outgoing("123", ¤t_month).unwrap();
assert_eq!("55.00 GBP", money.get_balance_for_display(&false));
}
}
| {
from.with_year(from.year() + 1).unwrap().with_month(1).unwrap()
} | conditional_block |
obligations.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Code that handles "type-outlives" constraints like `T: 'a`. This
//! is based on the `push_outlives_components` function defined on the tcx,
//! but it adds a bit of heuristics on top, in particular to deal with
//! associated types and projections.
//!
//! When we process a given `T: 'a` obligation, we may produce two
//! kinds of constraints for the region inferencer:
//!
//! - Relationships between inference variables and other regions.
//! For example, if we have `&'?0 u32: 'a`, then we would produce
//! a constraint that `'a <= '?0`.
//! - "Verifys" that must be checked after inferencing is done.
//! For example, if we know that, for some type parameter `T`,
//! `T: 'a + 'b`, and we have a requirement that `T: '?1`,
//! then we add a "verify" that checks that `'?1 <= 'a || '?1 <= 'b`.
//! - Note the difference with the previous case: here, the region
//! variable must be less than something else, so this doesn't
//! affect how inference works (it finds the smallest region that
//! will do); it's just a post-condition that we have to check.
//!
//! **The key point is that once this function is done, we have
//! reduced all of our "type-region outlives" obligations into relationships
//! between individual regions.**
//!
//! One key input to this function is the set of "region-bound pairs".
//! These are basically the relationships between type parameters and
//! regions that are in scope at the point where the outlives
//! obligation was incurred. **When type-checking a function,
//! particularly in the face of closures, this is not known until
//! regionck runs!** This is because some of those bounds come
//! from things we have yet to infer.
//!
//! Consider:
//!
//! ```
//! fn bar<T>(a: T, b: impl for<'a> Fn(&'a T));
//! fn foo<T>(x: T) {
//! bar(x, |y| {... })
//! // ^ closure arg
//! }
//! ```
//!
//! Here, the type of `y` may involve inference variables and the
//! like, and it may also contain implied bounds that are needed to
//! type-check the closure body (e.g., here it informs us that `T`
//! outlives the late-bound region `'a`).
//!
//! Note that by delaying the gathering of implied bounds until all
//! inference information is known, we may find relationships between
//! bound regions and other regions in the environment. For example,
//! when we first check a closure like the one expected as argument
//! to `foo`:
//!
//! ```
//! fn foo<U, F: for<'a> FnMut(&'a U)>(_f: F) {}
//! ```
//!
//! the type of the closure's first argument would be `&'a?U`. We
//! might later infer `?U` to something like `&'b u32`, which would
//! imply that `'b: 'a`.
use infer::outlives::env::RegionBoundPairs;
use infer::outlives::verify::VerifyBoundCx;
use infer::{self, GenericKind, InferCtxt, RegionObligation, SubregionOrigin, VerifyBound};
use rustc_data_structures::fx::FxHashMap;
use syntax::ast;
use traits::ObligationCause;
use ty::outlives::Component;
use ty::{self, Region, Ty, TyCtxt, TypeFoldable};
impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> {
/// Registers that the given region obligation must be resolved
/// from within the scope of `body_id`. These regions are enqueued
/// and later processed by regionck, when full type information is
/// available (see `region_obligations` field for more
/// information).
pub fn register_region_obligation(
&self,
body_id: ast::NodeId,
obligation: RegionObligation<'tcx>,
) {
debug!(
"register_region_obligation(body_id={:?}, obligation={:?})",
body_id, obligation
);
self.region_obligations
.borrow_mut()
.push((body_id, obligation));
}
pub fn register_region_obligation_with_cause(
&self,
sup_type: Ty<'tcx>,
sub_region: Region<'tcx>,
cause: &ObligationCause<'tcx>,
) {
let origin = SubregionOrigin::from_obligation_cause(cause, || {
infer::RelateParamBound(cause.span, sup_type)
});
self.register_region_obligation(
cause.body_id,
RegionObligation {
sup_type,
sub_region,
origin,
},
);
}
/// Trait queries just want to pass back type obligations "as is"
pub fn take_registered_region_obligations(&self) -> Vec<(ast::NodeId, RegionObligation<'tcx>)> {
::std::mem::replace(&mut *self.region_obligations.borrow_mut(), vec![])
}
/// Process the region obligations that must be proven (during
/// `regionck`) for the given `body_id`, given information about
/// the region bounds in scope and so forth. This function must be
/// invoked for all relevant body-ids before region inference is
/// done (or else an assert will fire).
///
/// See the `region_obligations` field of `InferCtxt` for some
/// comments about how this function fits into the overall expected
/// flow of the inferencer. The key point is that it is
/// invoked after all type-inference variables have been bound --
/// towards the end of regionck. This also ensures that the
/// region-bound-pairs are available (see comments above regarding
/// closures).
///
/// # Parameters
///
/// - `region_bound_pairs`: the set of region bounds implied by
/// the parameters and where-clauses. In particular, each pair
/// `('a, K)` in this list tells us that the bounds in scope
/// indicate that `K: 'a`, where `K` is either a generic
/// parameter like `T` or a projection like `T::Item`.
/// - `implicit_region_bound`: if some, this is a region bound
/// that is considered to hold for all type parameters (the
/// function body).
/// - `param_env` is the parameter environment for the enclosing function.
/// - `body_id` is the body-id whose region obligations are being
/// processed.
///
/// # Returns
///
/// This function may have to perform normalizations, and hence it
/// returns an `InferOk` with subobligations that must be
/// processed.
pub fn process_registered_region_obligations(
&self,
region_bound_pairs_map: &FxHashMap<ast::NodeId, RegionBoundPairs<'tcx>>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
) {
assert!(
!self.in_snapshot.get(),
"cannot process registered region obligations in a snapshot"
);
debug!("process_registered_region_obligations()");
let my_region_obligations = self.take_registered_region_obligations();
for (
body_id,
RegionObligation {
sup_type,
sub_region,
origin,
},
) in my_region_obligations
{
debug!(
"process_registered_region_obligations: sup_type={:?} sub_region={:?} origin={:?}",
sup_type, sub_region, origin
);
let sup_type = self.resolve_type_vars_if_possible(&sup_type);
if let Some(region_bound_pairs) = region_bound_pairs_map.get(&body_id) {
let outlives = &mut TypeOutlives::new(
self,
self.tcx,
®ion_bound_pairs,
implicit_region_bound,
param_env,
);
outlives.type_must_outlive(origin, sup_type, sub_region);
} else {
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("no region-bound-pairs for {:?}", body_id),
)
}
}
}
/// Processes a single ad-hoc region obligation that was not
/// registered in advance.
pub fn type_must_outlive(
&self,
region_bound_pairs: &RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
let outlives = &mut TypeOutlives::new(
self,
self.tcx,
region_bound_pairs,
implicit_region_bound,
param_env,
);
let ty = self.resolve_type_vars_if_possible(&ty);
outlives.type_must_outlive(origin, ty, region);
}
}
/// The `TypeOutlives` struct has the job of "lowering" a `T: 'a`
/// obligation into a series of `'a: 'b` constraints and "verifys", as
/// described on the module comment. The final constraints are emitted
/// via a "delegate" of type `D` -- this is usually the `infcx`, which
/// accrues them into the `region_obligations` code, but for NLL we
/// use something else.
pub struct TypeOutlives<'cx, 'gcx: 'tcx, 'tcx: 'cx, D>
where
D: TypeOutlivesDelegate<'tcx>,
{
// See the comments on `process_registered_region_obligations` for the meaning
// of these fields.
delegate: D,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
verify_bound: VerifyBoundCx<'cx, 'gcx, 'tcx>,
}
pub trait TypeOutlivesDelegate<'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
);
fn push_verify(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
);
}
impl<'cx, 'gcx, 'tcx, D> TypeOutlives<'cx, 'gcx, 'tcx, D>
where
D: TypeOutlivesDelegate<'tcx>,
{
pub fn new(
delegate: D,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
region_bound_pairs: &'cx RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
) -> Self {
Self {
delegate,
tcx,
verify_bound: VerifyBoundCx::new(
tcx,
region_bound_pairs,
implicit_region_bound,
param_env,
),
}
}
/// Adds constraints to inference such that `T: 'a` holds (or
/// reports an error if it cannot).
///
/// # Parameters
///
/// - `origin`, the reason we need this constraint
/// - `ty`, the type `T`
/// - `region`, the region `'a`
pub fn type_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
debug!(
"type_must_outlive(ty={:?}, region={:?}, origin={:?})",
ty, region, origin
);
assert!(!ty.has_escaping_bound_vars());
let mut components = smallvec![];
self.tcx.push_outlives_components(ty, &mut components);
self.components_must_outlive(origin, &components, region);
}
fn components_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
components: &[Component<'tcx>],
region: ty::Region<'tcx>,
) {
for component in components.iter() {
let origin = origin.clone();
match component {
Component::Region(region1) => {
self.delegate
.push_sub_region_constraint(origin, region, region1);
}
Component::Param(param_ty) => {
self.param_ty_must_outlive(origin, region, *param_ty);
}
Component::Projection(projection_ty) => {
self.projection_must_outlive(origin, region, *projection_ty);
}
Component::EscapingProjection(subcomponents) => {
self.components_must_outlive(origin, &subcomponents, region);
}
Component::UnresolvedInferenceVariable(v) => {
// ignore this, we presume it will yield an error
// later, since if a type variable is not resolved by
// this point it never will be
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("unresolved inference variable in outlives: {:?}", v),
);
}
}
}
}
fn param_ty_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
param_ty: ty::ParamTy,
) {
debug!(
"param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
region, param_ty, origin
);
let generic = GenericKind::Param(param_ty);
let verify_bound = self.verify_bound.generic_bound(generic);
self.delegate
.push_verify(origin, generic, region, verify_bound);
}
fn projection_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
) {
debug!(
"projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})",
region, projection_ty, origin
);
// This case is thorny for inference. The fundamental problem is
// that there are many cases where we have choice, and inference
// doesn't like choice (the current region inference in
// particular). :) First off, we have to choose between using the
// OutlivesProjectionEnv, OutlivesProjectionTraitDef, and
// OutlivesProjectionComponent rules, any one of which is
// sufficient. If there are no inference variables involved, it's
// not hard to pick the right rule, but if there are, we're in a
// bit of a catch 22: if we picked which rule we were going to
// use, we could add constraints to the region inference graph
// that make it apply, but if we don't add those constraints, the
// rule might not apply (but another rule might). For now, we err
// on the side of adding too few edges into the graph.
// Compute the bounds we can derive from the trait definition.
// These are guaranteed to apply, no matter the inference
// results.
let trait_bounds: Vec<_> = self.verify_bound
.projection_declared_bounds_from_trait(projection_ty)
.collect();
// Compute the bounds we can derive from the environment. This
// is an "approximate" match -- in some cases, these bounds
// may not apply.
let mut approx_env_bounds = self.verify_bound
.projection_approx_declared_bounds_from_env(projection_ty);
debug!(
"projection_must_outlive: approx_env_bounds={:?}",
approx_env_bounds
);
// Remove outlives bounds that we get from the environment but
// which are also deducable from the trait. This arises (cc
// #55756) in cases where you have e.g., `<T as Foo<'a>>::Item:
// 'a` in the environment but `trait Foo<'b> { type Item: 'b
// }` in the trait definition.
approx_env_bounds.retain(|bound| {
match bound.0.sty {
ty::Projection(projection_ty) => {
self.verify_bound.projection_declared_bounds_from_trait(projection_ty)
.all(|r| r!= bound.1)
}
_ => panic!("expected only projection types from env, not {:?}", bound.0),
}
});
// If declared bounds list is empty, the only applicable rule is
// OutlivesProjectionComponent. If there are inference variables,
// then, we can break down the outlives into more primitive
// components without adding unnecessary edges.
//
// If there are *no* inference variables, however, we COULD do
// this, but we choose not to, because the error messages are less
// good. For example, a requirement like `T::Item: 'r` would be
// translated to a requirement that `T: 'r`; when this is reported
// to the user, it will thus say "T: 'r must hold so that T::Item:
// 'r holds". But that makes it sound like the only way to fix
// the problem is to add `T: 'r`, which isn't true. So, if there are no
// inference variables, we use a verify constraint instead of adding
// edges, which winds up enforcing the same condition.
let needs_infer = projection_ty.needs_infer();
if approx_env_bounds.is_empty() && trait_bounds.is_empty() && needs_infer {
debug!("projection_must_outlive: no declared bounds");
for component_ty in projection_ty.substs.types() {
self.type_must_outlive(origin.clone(), component_ty, region);
}
for r in projection_ty.substs.regions() {
self.delegate
.push_sub_region_constraint(origin.clone(), region, r);
}
return;
}
// If we found a unique bound `'b` from the trait, and we
// found nothing else from the environment, then the best
// action is to require that `'b: 'r`, so do that.
//
// This is best no matter what rule we use:
//
// - OutlivesProjectionEnv: these would translate to the requirement that `'b:'r`
// - OutlivesProjectionTraitDef: these would translate to the requirement that `'b:'r`
// - OutlivesProjectionComponent: this would require `'b:'r`
// in addition to other conditions
if!trait_bounds.is_empty()
&& trait_bounds[1..]
.iter()
.chain(approx_env_bounds.iter().map(|b| &b.1))
.all(|b| *b == trait_bounds[0])
{
let unique_bound = trait_bounds[0];
debug!( | .push_sub_region_constraint(origin, region, unique_bound);
return;
}
// Fallback to verifying after the fact that there exists a
// declared bound, or that all the components appearing in the
// projection outlive; in some cases, this may add insufficient
// edges into the inference graph, leading to inference failures
// even though a satisfactory solution exists.
let generic = GenericKind::Projection(projection_ty);
let verify_bound = self.verify_bound.generic_bound(generic);
self.delegate
.push_verify(origin, generic.clone(), region, verify_bound);
}
}
impl<'cx, 'gcx, 'tcx> TypeOutlivesDelegate<'tcx> for &'cx InferCtxt<'cx, 'gcx, 'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
) {
self.sub_regions(origin, a, b)
}
fn push_verify(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
) {
self.verify_generic_bound(origin, kind, a, bound)
}
} | "projection_must_outlive: unique trait bound = {:?}",
unique_bound
);
debug!("projection_must_outlive: unique declared bound appears in trait ref");
self.delegate | random_line_split |
obligations.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Code that handles "type-outlives" constraints like `T: 'a`. This
//! is based on the `push_outlives_components` function defined on the tcx,
//! but it adds a bit of heuristics on top, in particular to deal with
//! associated types and projections.
//!
//! When we process a given `T: 'a` obligation, we may produce two
//! kinds of constraints for the region inferencer:
//!
//! - Relationships between inference variables and other regions.
//! For example, if we have `&'?0 u32: 'a`, then we would produce
//! a constraint that `'a <= '?0`.
//! - "Verifys" that must be checked after inferencing is done.
//! For example, if we know that, for some type parameter `T`,
//! `T: 'a + 'b`, and we have a requirement that `T: '?1`,
//! then we add a "verify" that checks that `'?1 <= 'a || '?1 <= 'b`.
//! - Note the difference with the previous case: here, the region
//! variable must be less than something else, so this doesn't
//! affect how inference works (it finds the smallest region that
//! will do); it's just a post-condition that we have to check.
//!
//! **The key point is that once this function is done, we have
//! reduced all of our "type-region outlives" obligations into relationships
//! between individual regions.**
//!
//! One key input to this function is the set of "region-bound pairs".
//! These are basically the relationships between type parameters and
//! regions that are in scope at the point where the outlives
//! obligation was incurred. **When type-checking a function,
//! particularly in the face of closures, this is not known until
//! regionck runs!** This is because some of those bounds come
//! from things we have yet to infer.
//!
//! Consider:
//!
//! ```
//! fn bar<T>(a: T, b: impl for<'a> Fn(&'a T));
//! fn foo<T>(x: T) {
//! bar(x, |y| {... })
//! // ^ closure arg
//! }
//! ```
//!
//! Here, the type of `y` may involve inference variables and the
//! like, and it may also contain implied bounds that are needed to
//! type-check the closure body (e.g., here it informs us that `T`
//! outlives the late-bound region `'a`).
//!
//! Note that by delaying the gathering of implied bounds until all
//! inference information is known, we may find relationships between
//! bound regions and other regions in the environment. For example,
//! when we first check a closure like the one expected as argument
//! to `foo`:
//!
//! ```
//! fn foo<U, F: for<'a> FnMut(&'a U)>(_f: F) {}
//! ```
//!
//! the type of the closure's first argument would be `&'a?U`. We
//! might later infer `?U` to something like `&'b u32`, which would
//! imply that `'b: 'a`.
use infer::outlives::env::RegionBoundPairs;
use infer::outlives::verify::VerifyBoundCx;
use infer::{self, GenericKind, InferCtxt, RegionObligation, SubregionOrigin, VerifyBound};
use rustc_data_structures::fx::FxHashMap;
use syntax::ast;
use traits::ObligationCause;
use ty::outlives::Component;
use ty::{self, Region, Ty, TyCtxt, TypeFoldable};
impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> {
/// Registers that the given region obligation must be resolved
/// from within the scope of `body_id`. These regions are enqueued
/// and later processed by regionck, when full type information is
/// available (see `region_obligations` field for more
/// information).
pub fn register_region_obligation(
&self,
body_id: ast::NodeId,
obligation: RegionObligation<'tcx>,
) {
debug!(
"register_region_obligation(body_id={:?}, obligation={:?})",
body_id, obligation
);
self.region_obligations
.borrow_mut()
.push((body_id, obligation));
}
pub fn register_region_obligation_with_cause(
&self,
sup_type: Ty<'tcx>,
sub_region: Region<'tcx>,
cause: &ObligationCause<'tcx>,
) {
let origin = SubregionOrigin::from_obligation_cause(cause, || {
infer::RelateParamBound(cause.span, sup_type)
});
self.register_region_obligation(
cause.body_id,
RegionObligation {
sup_type,
sub_region,
origin,
},
);
}
/// Trait queries just want to pass back type obligations "as is"
pub fn take_registered_region_obligations(&self) -> Vec<(ast::NodeId, RegionObligation<'tcx>)> {
::std::mem::replace(&mut *self.region_obligations.borrow_mut(), vec![])
}
/// Process the region obligations that must be proven (during
/// `regionck`) for the given `body_id`, given information about
/// the region bounds in scope and so forth. This function must be
/// invoked for all relevant body-ids before region inference is
/// done (or else an assert will fire).
///
/// See the `region_obligations` field of `InferCtxt` for some
/// comments about how this function fits into the overall expected
/// flow of the inferencer. The key point is that it is
/// invoked after all type-inference variables have been bound --
/// towards the end of regionck. This also ensures that the
/// region-bound-pairs are available (see comments above regarding
/// closures).
///
/// # Parameters
///
/// - `region_bound_pairs`: the set of region bounds implied by
/// the parameters and where-clauses. In particular, each pair
/// `('a, K)` in this list tells us that the bounds in scope
/// indicate that `K: 'a`, where `K` is either a generic
/// parameter like `T` or a projection like `T::Item`.
/// - `implicit_region_bound`: if some, this is a region bound
/// that is considered to hold for all type parameters (the
/// function body).
/// - `param_env` is the parameter environment for the enclosing function.
/// - `body_id` is the body-id whose region obligations are being
/// processed.
///
/// # Returns
///
/// This function may have to perform normalizations, and hence it
/// returns an `InferOk` with subobligations that must be
/// processed.
pub fn process_registered_region_obligations(
&self,
region_bound_pairs_map: &FxHashMap<ast::NodeId, RegionBoundPairs<'tcx>>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
) {
assert!(
!self.in_snapshot.get(),
"cannot process registered region obligations in a snapshot"
);
debug!("process_registered_region_obligations()");
let my_region_obligations = self.take_registered_region_obligations();
for (
body_id,
RegionObligation {
sup_type,
sub_region,
origin,
},
) in my_region_obligations
{
debug!(
"process_registered_region_obligations: sup_type={:?} sub_region={:?} origin={:?}",
sup_type, sub_region, origin
);
let sup_type = self.resolve_type_vars_if_possible(&sup_type);
if let Some(region_bound_pairs) = region_bound_pairs_map.get(&body_id) {
let outlives = &mut TypeOutlives::new(
self,
self.tcx,
®ion_bound_pairs,
implicit_region_bound,
param_env,
);
outlives.type_must_outlive(origin, sup_type, sub_region);
} else {
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("no region-bound-pairs for {:?}", body_id),
)
}
}
}
/// Processes a single ad-hoc region obligation that was not
/// registered in advance.
pub fn type_must_outlive(
&self,
region_bound_pairs: &RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
let outlives = &mut TypeOutlives::new(
self,
self.tcx,
region_bound_pairs,
implicit_region_bound,
param_env,
);
let ty = self.resolve_type_vars_if_possible(&ty);
outlives.type_must_outlive(origin, ty, region);
}
}
/// The `TypeOutlives` struct has the job of "lowering" a `T: 'a`
/// obligation into a series of `'a: 'b` constraints and "verifys", as
/// described on the module comment. The final constraints are emitted
/// via a "delegate" of type `D` -- this is usually the `infcx`, which
/// accrues them into the `region_obligations` code, but for NLL we
/// use something else.
pub struct TypeOutlives<'cx, 'gcx: 'tcx, 'tcx: 'cx, D>
where
D: TypeOutlivesDelegate<'tcx>,
{
// See the comments on `process_registered_region_obligations` for the meaning
// of these fields.
delegate: D,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
verify_bound: VerifyBoundCx<'cx, 'gcx, 'tcx>,
}
pub trait TypeOutlivesDelegate<'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
);
fn push_verify(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
);
}
impl<'cx, 'gcx, 'tcx, D> TypeOutlives<'cx, 'gcx, 'tcx, D>
where
D: TypeOutlivesDelegate<'tcx>,
{
pub fn new(
delegate: D,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
region_bound_pairs: &'cx RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
) -> Self {
Self {
delegate,
tcx,
verify_bound: VerifyBoundCx::new(
tcx,
region_bound_pairs,
implicit_region_bound,
param_env,
),
}
}
/// Adds constraints to inference such that `T: 'a` holds (or
/// reports an error if it cannot).
///
/// # Parameters
///
/// - `origin`, the reason we need this constraint
/// - `ty`, the type `T`
/// - `region`, the region `'a`
pub fn type_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
debug!(
"type_must_outlive(ty={:?}, region={:?}, origin={:?})",
ty, region, origin
);
assert!(!ty.has_escaping_bound_vars());
let mut components = smallvec![];
self.tcx.push_outlives_components(ty, &mut components);
self.components_must_outlive(origin, &components, region);
}
fn components_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
components: &[Component<'tcx>],
region: ty::Region<'tcx>,
) {
for component in components.iter() {
let origin = origin.clone();
match component {
Component::Region(region1) => {
self.delegate
.push_sub_region_constraint(origin, region, region1);
}
Component::Param(param_ty) => {
self.param_ty_must_outlive(origin, region, *param_ty);
}
Component::Projection(projection_ty) => {
self.projection_must_outlive(origin, region, *projection_ty);
}
Component::EscapingProjection(subcomponents) => {
self.components_must_outlive(origin, &subcomponents, region);
}
Component::UnresolvedInferenceVariable(v) => {
// ignore this, we presume it will yield an error
// later, since if a type variable is not resolved by
// this point it never will be
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("unresolved inference variable in outlives: {:?}", v),
);
}
}
}
}
fn param_ty_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
param_ty: ty::ParamTy,
) {
debug!(
"param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
region, param_ty, origin
);
let generic = GenericKind::Param(param_ty);
let verify_bound = self.verify_bound.generic_bound(generic);
self.delegate
.push_verify(origin, generic, region, verify_bound);
}
fn projection_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
) {
debug!(
"projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})",
region, projection_ty, origin
);
// This case is thorny for inference. The fundamental problem is
// that there are many cases where we have choice, and inference
// doesn't like choice (the current region inference in
// particular). :) First off, we have to choose between using the
// OutlivesProjectionEnv, OutlivesProjectionTraitDef, and
// OutlivesProjectionComponent rules, any one of which is
// sufficient. If there are no inference variables involved, it's
// not hard to pick the right rule, but if there are, we're in a
// bit of a catch 22: if we picked which rule we were going to
// use, we could add constraints to the region inference graph
// that make it apply, but if we don't add those constraints, the
// rule might not apply (but another rule might). For now, we err
// on the side of adding too few edges into the graph.
// Compute the bounds we can derive from the trait definition.
// These are guaranteed to apply, no matter the inference
// results.
let trait_bounds: Vec<_> = self.verify_bound
.projection_declared_bounds_from_trait(projection_ty)
.collect();
// Compute the bounds we can derive from the environment. This
// is an "approximate" match -- in some cases, these bounds
// may not apply.
let mut approx_env_bounds = self.verify_bound
.projection_approx_declared_bounds_from_env(projection_ty);
debug!(
"projection_must_outlive: approx_env_bounds={:?}",
approx_env_bounds
);
// Remove outlives bounds that we get from the environment but
// which are also deducable from the trait. This arises (cc
// #55756) in cases where you have e.g., `<T as Foo<'a>>::Item:
// 'a` in the environment but `trait Foo<'b> { type Item: 'b
// }` in the trait definition.
approx_env_bounds.retain(|bound| {
match bound.0.sty {
ty::Projection(projection_ty) => {
self.verify_bound.projection_declared_bounds_from_trait(projection_ty)
.all(|r| r!= bound.1)
}
_ => panic!("expected only projection types from env, not {:?}", bound.0),
}
});
// If declared bounds list is empty, the only applicable rule is
// OutlivesProjectionComponent. If there are inference variables,
// then, we can break down the outlives into more primitive
// components without adding unnecessary edges.
//
// If there are *no* inference variables, however, we COULD do
// this, but we choose not to, because the error messages are less
// good. For example, a requirement like `T::Item: 'r` would be
// translated to a requirement that `T: 'r`; when this is reported
// to the user, it will thus say "T: 'r must hold so that T::Item:
// 'r holds". But that makes it sound like the only way to fix
// the problem is to add `T: 'r`, which isn't true. So, if there are no
// inference variables, we use a verify constraint instead of adding
// edges, which winds up enforcing the same condition.
let needs_infer = projection_ty.needs_infer();
if approx_env_bounds.is_empty() && trait_bounds.is_empty() && needs_infer |
// If we found a unique bound `'b` from the trait, and we
// found nothing else from the environment, then the best
// action is to require that `'b: 'r`, so do that.
//
// This is best no matter what rule we use:
//
// - OutlivesProjectionEnv: these would translate to the requirement that `'b:'r`
// - OutlivesProjectionTraitDef: these would translate to the requirement that `'b:'r`
// - OutlivesProjectionComponent: this would require `'b:'r`
// in addition to other conditions
if!trait_bounds.is_empty()
&& trait_bounds[1..]
.iter()
.chain(approx_env_bounds.iter().map(|b| &b.1))
.all(|b| *b == trait_bounds[0])
{
let unique_bound = trait_bounds[0];
debug!(
"projection_must_outlive: unique trait bound = {:?}",
unique_bound
);
debug!("projection_must_outlive: unique declared bound appears in trait ref");
self.delegate
.push_sub_region_constraint(origin, region, unique_bound);
return;
}
// Fallback to verifying after the fact that there exists a
// declared bound, or that all the components appearing in the
// projection outlive; in some cases, this may add insufficient
// edges into the inference graph, leading to inference failures
// even though a satisfactory solution exists.
let generic = GenericKind::Projection(projection_ty);
let verify_bound = self.verify_bound.generic_bound(generic);
self.delegate
.push_verify(origin, generic.clone(), region, verify_bound);
}
}
impl<'cx, 'gcx, 'tcx> TypeOutlivesDelegate<'tcx> for &'cx InferCtxt<'cx, 'gcx, 'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
) {
self.sub_regions(origin, a, b)
}
fn push_verify(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
) {
self.verify_generic_bound(origin, kind, a, bound)
}
}
| {
debug!("projection_must_outlive: no declared bounds");
for component_ty in projection_ty.substs.types() {
self.type_must_outlive(origin.clone(), component_ty, region);
}
for r in projection_ty.substs.regions() {
self.delegate
.push_sub_region_constraint(origin.clone(), region, r);
}
return;
} | conditional_block |
obligations.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Code that handles "type-outlives" constraints like `T: 'a`. This
//! is based on the `push_outlives_components` function defined on the tcx,
//! but it adds a bit of heuristics on top, in particular to deal with
//! associated types and projections.
//!
//! When we process a given `T: 'a` obligation, we may produce two
//! kinds of constraints for the region inferencer:
//!
//! - Relationships between inference variables and other regions.
//! For example, if we have `&'?0 u32: 'a`, then we would produce
//! a constraint that `'a <= '?0`.
//! - "Verifys" that must be checked after inferencing is done.
//! For example, if we know that, for some type parameter `T`,
//! `T: 'a + 'b`, and we have a requirement that `T: '?1`,
//! then we add a "verify" that checks that `'?1 <= 'a || '?1 <= 'b`.
//! - Note the difference with the previous case: here, the region
//! variable must be less than something else, so this doesn't
//! affect how inference works (it finds the smallest region that
//! will do); it's just a post-condition that we have to check.
//!
//! **The key point is that once this function is done, we have
//! reduced all of our "type-region outlives" obligations into relationships
//! between individual regions.**
//!
//! One key input to this function is the set of "region-bound pairs".
//! These are basically the relationships between type parameters and
//! regions that are in scope at the point where the outlives
//! obligation was incurred. **When type-checking a function,
//! particularly in the face of closures, this is not known until
//! regionck runs!** This is because some of those bounds come
//! from things we have yet to infer.
//!
//! Consider:
//!
//! ```
//! fn bar<T>(a: T, b: impl for<'a> Fn(&'a T));
//! fn foo<T>(x: T) {
//! bar(x, |y| {... })
//! // ^ closure arg
//! }
//! ```
//!
//! Here, the type of `y` may involve inference variables and the
//! like, and it may also contain implied bounds that are needed to
//! type-check the closure body (e.g., here it informs us that `T`
//! outlives the late-bound region `'a`).
//!
//! Note that by delaying the gathering of implied bounds until all
//! inference information is known, we may find relationships between
//! bound regions and other regions in the environment. For example,
//! when we first check a closure like the one expected as argument
//! to `foo`:
//!
//! ```
//! fn foo<U, F: for<'a> FnMut(&'a U)>(_f: F) {}
//! ```
//!
//! the type of the closure's first argument would be `&'a?U`. We
//! might later infer `?U` to something like `&'b u32`, which would
//! imply that `'b: 'a`.
use infer::outlives::env::RegionBoundPairs;
use infer::outlives::verify::VerifyBoundCx;
use infer::{self, GenericKind, InferCtxt, RegionObligation, SubregionOrigin, VerifyBound};
use rustc_data_structures::fx::FxHashMap;
use syntax::ast;
use traits::ObligationCause;
use ty::outlives::Component;
use ty::{self, Region, Ty, TyCtxt, TypeFoldable};
impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> {
/// Registers that the given region obligation must be resolved
/// from within the scope of `body_id`. These regions are enqueued
/// and later processed by regionck, when full type information is
/// available (see `region_obligations` field for more
/// information).
pub fn register_region_obligation(
&self,
body_id: ast::NodeId,
obligation: RegionObligation<'tcx>,
) |
pub fn register_region_obligation_with_cause(
&self,
sup_type: Ty<'tcx>,
sub_region: Region<'tcx>,
cause: &ObligationCause<'tcx>,
) {
let origin = SubregionOrigin::from_obligation_cause(cause, || {
infer::RelateParamBound(cause.span, sup_type)
});
self.register_region_obligation(
cause.body_id,
RegionObligation {
sup_type,
sub_region,
origin,
},
);
}
/// Trait queries just want to pass back type obligations "as is"
pub fn take_registered_region_obligations(&self) -> Vec<(ast::NodeId, RegionObligation<'tcx>)> {
::std::mem::replace(&mut *self.region_obligations.borrow_mut(), vec![])
}
/// Process the region obligations that must be proven (during
/// `regionck`) for the given `body_id`, given information about
/// the region bounds in scope and so forth. This function must be
/// invoked for all relevant body-ids before region inference is
/// done (or else an assert will fire).
///
/// See the `region_obligations` field of `InferCtxt` for some
/// comments about how this function fits into the overall expected
/// flow of the inferencer. The key point is that it is
/// invoked after all type-inference variables have been bound --
/// towards the end of regionck. This also ensures that the
/// region-bound-pairs are available (see comments above regarding
/// closures).
///
/// # Parameters
///
/// - `region_bound_pairs`: the set of region bounds implied by
/// the parameters and where-clauses. In particular, each pair
/// `('a, K)` in this list tells us that the bounds in scope
/// indicate that `K: 'a`, where `K` is either a generic
/// parameter like `T` or a projection like `T::Item`.
/// - `implicit_region_bound`: if some, this is a region bound
/// that is considered to hold for all type parameters (the
/// function body).
/// - `param_env` is the parameter environment for the enclosing function.
/// - `body_id` is the body-id whose region obligations are being
/// processed.
///
/// # Returns
///
/// This function may have to perform normalizations, and hence it
/// returns an `InferOk` with subobligations that must be
/// processed.
pub fn process_registered_region_obligations(
&self,
region_bound_pairs_map: &FxHashMap<ast::NodeId, RegionBoundPairs<'tcx>>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
) {
assert!(
!self.in_snapshot.get(),
"cannot process registered region obligations in a snapshot"
);
debug!("process_registered_region_obligations()");
let my_region_obligations = self.take_registered_region_obligations();
for (
body_id,
RegionObligation {
sup_type,
sub_region,
origin,
},
) in my_region_obligations
{
debug!(
"process_registered_region_obligations: sup_type={:?} sub_region={:?} origin={:?}",
sup_type, sub_region, origin
);
let sup_type = self.resolve_type_vars_if_possible(&sup_type);
if let Some(region_bound_pairs) = region_bound_pairs_map.get(&body_id) {
let outlives = &mut TypeOutlives::new(
self,
self.tcx,
®ion_bound_pairs,
implicit_region_bound,
param_env,
);
outlives.type_must_outlive(origin, sup_type, sub_region);
} else {
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("no region-bound-pairs for {:?}", body_id),
)
}
}
}
/// Processes a single ad-hoc region obligation that was not
/// registered in advance.
pub fn type_must_outlive(
&self,
region_bound_pairs: &RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
let outlives = &mut TypeOutlives::new(
self,
self.tcx,
region_bound_pairs,
implicit_region_bound,
param_env,
);
let ty = self.resolve_type_vars_if_possible(&ty);
outlives.type_must_outlive(origin, ty, region);
}
}
/// The `TypeOutlives` struct has the job of "lowering" a `T: 'a`
/// obligation into a series of `'a: 'b` constraints and "verifys", as
/// described on the module comment. The final constraints are emitted
/// via a "delegate" of type `D` -- this is usually the `infcx`, which
/// accrues them into the `region_obligations` code, but for NLL we
/// use something else.
pub struct TypeOutlives<'cx, 'gcx: 'tcx, 'tcx: 'cx, D>
where
D: TypeOutlivesDelegate<'tcx>,
{
// See the comments on `process_registered_region_obligations` for the meaning
// of these fields.
delegate: D,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
verify_bound: VerifyBoundCx<'cx, 'gcx, 'tcx>,
}
pub trait TypeOutlivesDelegate<'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
);
fn push_verify(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
);
}
impl<'cx, 'gcx, 'tcx, D> TypeOutlives<'cx, 'gcx, 'tcx, D>
where
D: TypeOutlivesDelegate<'tcx>,
{
pub fn new(
delegate: D,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
region_bound_pairs: &'cx RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
) -> Self {
Self {
delegate,
tcx,
verify_bound: VerifyBoundCx::new(
tcx,
region_bound_pairs,
implicit_region_bound,
param_env,
),
}
}
/// Adds constraints to inference such that `T: 'a` holds (or
/// reports an error if it cannot).
///
/// # Parameters
///
/// - `origin`, the reason we need this constraint
/// - `ty`, the type `T`
/// - `region`, the region `'a`
pub fn type_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
debug!(
"type_must_outlive(ty={:?}, region={:?}, origin={:?})",
ty, region, origin
);
assert!(!ty.has_escaping_bound_vars());
let mut components = smallvec![];
self.tcx.push_outlives_components(ty, &mut components);
self.components_must_outlive(origin, &components, region);
}
fn components_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
components: &[Component<'tcx>],
region: ty::Region<'tcx>,
) {
for component in components.iter() {
let origin = origin.clone();
match component {
Component::Region(region1) => {
self.delegate
.push_sub_region_constraint(origin, region, region1);
}
Component::Param(param_ty) => {
self.param_ty_must_outlive(origin, region, *param_ty);
}
Component::Projection(projection_ty) => {
self.projection_must_outlive(origin, region, *projection_ty);
}
Component::EscapingProjection(subcomponents) => {
self.components_must_outlive(origin, &subcomponents, region);
}
Component::UnresolvedInferenceVariable(v) => {
// ignore this, we presume it will yield an error
// later, since if a type variable is not resolved by
// this point it never will be
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("unresolved inference variable in outlives: {:?}", v),
);
}
}
}
}
fn param_ty_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
param_ty: ty::ParamTy,
) {
debug!(
"param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
region, param_ty, origin
);
let generic = GenericKind::Param(param_ty);
let verify_bound = self.verify_bound.generic_bound(generic);
self.delegate
.push_verify(origin, generic, region, verify_bound);
}
fn projection_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
) {
debug!(
"projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})",
region, projection_ty, origin
);
// This case is thorny for inference. The fundamental problem is
// that there are many cases where we have choice, and inference
// doesn't like choice (the current region inference in
// particular). :) First off, we have to choose between using the
// OutlivesProjectionEnv, OutlivesProjectionTraitDef, and
// OutlivesProjectionComponent rules, any one of which is
// sufficient. If there are no inference variables involved, it's
// not hard to pick the right rule, but if there are, we're in a
// bit of a catch 22: if we picked which rule we were going to
// use, we could add constraints to the region inference graph
// that make it apply, but if we don't add those constraints, the
// rule might not apply (but another rule might). For now, we err
// on the side of adding too few edges into the graph.
// Compute the bounds we can derive from the trait definition.
// These are guaranteed to apply, no matter the inference
// results.
let trait_bounds: Vec<_> = self.verify_bound
.projection_declared_bounds_from_trait(projection_ty)
.collect();
// Compute the bounds we can derive from the environment. This
// is an "approximate" match -- in some cases, these bounds
// may not apply.
let mut approx_env_bounds = self.verify_bound
.projection_approx_declared_bounds_from_env(projection_ty);
debug!(
"projection_must_outlive: approx_env_bounds={:?}",
approx_env_bounds
);
// Remove outlives bounds that we get from the environment but
// which are also deducable from the trait. This arises (cc
// #55756) in cases where you have e.g., `<T as Foo<'a>>::Item:
// 'a` in the environment but `trait Foo<'b> { type Item: 'b
// }` in the trait definition.
approx_env_bounds.retain(|bound| {
match bound.0.sty {
ty::Projection(projection_ty) => {
self.verify_bound.projection_declared_bounds_from_trait(projection_ty)
.all(|r| r!= bound.1)
}
_ => panic!("expected only projection types from env, not {:?}", bound.0),
}
});
// If declared bounds list is empty, the only applicable rule is
// OutlivesProjectionComponent. If there are inference variables,
// then, we can break down the outlives into more primitive
// components without adding unnecessary edges.
//
// If there are *no* inference variables, however, we COULD do
// this, but we choose not to, because the error messages are less
// good. For example, a requirement like `T::Item: 'r` would be
// translated to a requirement that `T: 'r`; when this is reported
// to the user, it will thus say "T: 'r must hold so that T::Item:
// 'r holds". But that makes it sound like the only way to fix
// the problem is to add `T: 'r`, which isn't true. So, if there are no
// inference variables, we use a verify constraint instead of adding
// edges, which winds up enforcing the same condition.
let needs_infer = projection_ty.needs_infer();
if approx_env_bounds.is_empty() && trait_bounds.is_empty() && needs_infer {
debug!("projection_must_outlive: no declared bounds");
for component_ty in projection_ty.substs.types() {
self.type_must_outlive(origin.clone(), component_ty, region);
}
for r in projection_ty.substs.regions() {
self.delegate
.push_sub_region_constraint(origin.clone(), region, r);
}
return;
}
// If we found a unique bound `'b` from the trait, and we
// found nothing else from the environment, then the best
// action is to require that `'b: 'r`, so do that.
//
// This is best no matter what rule we use:
//
// - OutlivesProjectionEnv: these would translate to the requirement that `'b:'r`
// - OutlivesProjectionTraitDef: these would translate to the requirement that `'b:'r`
// - OutlivesProjectionComponent: this would require `'b:'r`
// in addition to other conditions
if!trait_bounds.is_empty()
&& trait_bounds[1..]
.iter()
.chain(approx_env_bounds.iter().map(|b| &b.1))
.all(|b| *b == trait_bounds[0])
{
let unique_bound = trait_bounds[0];
debug!(
"projection_must_outlive: unique trait bound = {:?}",
unique_bound
);
debug!("projection_must_outlive: unique declared bound appears in trait ref");
self.delegate
.push_sub_region_constraint(origin, region, unique_bound);
return;
}
// Fallback to verifying after the fact that there exists a
// declared bound, or that all the components appearing in the
// projection outlive; in some cases, this may add insufficient
// edges into the inference graph, leading to inference failures
// even though a satisfactory solution exists.
let generic = GenericKind::Projection(projection_ty);
let verify_bound = self.verify_bound.generic_bound(generic);
self.delegate
.push_verify(origin, generic.clone(), region, verify_bound);
}
}
impl<'cx, 'gcx, 'tcx> TypeOutlivesDelegate<'tcx> for &'cx InferCtxt<'cx, 'gcx, 'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
) {
self.sub_regions(origin, a, b)
}
fn push_verify(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
) {
self.verify_generic_bound(origin, kind, a, bound)
}
}
| {
debug!(
"register_region_obligation(body_id={:?}, obligation={:?})",
body_id, obligation
);
self.region_obligations
.borrow_mut()
.push((body_id, obligation));
} | identifier_body |
obligations.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Code that handles "type-outlives" constraints like `T: 'a`. This
//! is based on the `push_outlives_components` function defined on the tcx,
//! but it adds a bit of heuristics on top, in particular to deal with
//! associated types and projections.
//!
//! When we process a given `T: 'a` obligation, we may produce two
//! kinds of constraints for the region inferencer:
//!
//! - Relationships between inference variables and other regions.
//! For example, if we have `&'?0 u32: 'a`, then we would produce
//! a constraint that `'a <= '?0`.
//! - "Verifys" that must be checked after inferencing is done.
//! For example, if we know that, for some type parameter `T`,
//! `T: 'a + 'b`, and we have a requirement that `T: '?1`,
//! then we add a "verify" that checks that `'?1 <= 'a || '?1 <= 'b`.
//! - Note the difference with the previous case: here, the region
//! variable must be less than something else, so this doesn't
//! affect how inference works (it finds the smallest region that
//! will do); it's just a post-condition that we have to check.
//!
//! **The key point is that once this function is done, we have
//! reduced all of our "type-region outlives" obligations into relationships
//! between individual regions.**
//!
//! One key input to this function is the set of "region-bound pairs".
//! These are basically the relationships between type parameters and
//! regions that are in scope at the point where the outlives
//! obligation was incurred. **When type-checking a function,
//! particularly in the face of closures, this is not known until
//! regionck runs!** This is because some of those bounds come
//! from things we have yet to infer.
//!
//! Consider:
//!
//! ```
//! fn bar<T>(a: T, b: impl for<'a> Fn(&'a T));
//! fn foo<T>(x: T) {
//! bar(x, |y| {... })
//! // ^ closure arg
//! }
//! ```
//!
//! Here, the type of `y` may involve inference variables and the
//! like, and it may also contain implied bounds that are needed to
//! type-check the closure body (e.g., here it informs us that `T`
//! outlives the late-bound region `'a`).
//!
//! Note that by delaying the gathering of implied bounds until all
//! inference information is known, we may find relationships between
//! bound regions and other regions in the environment. For example,
//! when we first check a closure like the one expected as argument
//! to `foo`:
//!
//! ```
//! fn foo<U, F: for<'a> FnMut(&'a U)>(_f: F) {}
//! ```
//!
//! the type of the closure's first argument would be `&'a?U`. We
//! might later infer `?U` to something like `&'b u32`, which would
//! imply that `'b: 'a`.
use infer::outlives::env::RegionBoundPairs;
use infer::outlives::verify::VerifyBoundCx;
use infer::{self, GenericKind, InferCtxt, RegionObligation, SubregionOrigin, VerifyBound};
use rustc_data_structures::fx::FxHashMap;
use syntax::ast;
use traits::ObligationCause;
use ty::outlives::Component;
use ty::{self, Region, Ty, TyCtxt, TypeFoldable};
impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> {
/// Registers that the given region obligation must be resolved
/// from within the scope of `body_id`. These regions are enqueued
/// and later processed by regionck, when full type information is
/// available (see `region_obligations` field for more
/// information).
pub fn | (
&self,
body_id: ast::NodeId,
obligation: RegionObligation<'tcx>,
) {
debug!(
"register_region_obligation(body_id={:?}, obligation={:?})",
body_id, obligation
);
self.region_obligations
.borrow_mut()
.push((body_id, obligation));
}
pub fn register_region_obligation_with_cause(
&self,
sup_type: Ty<'tcx>,
sub_region: Region<'tcx>,
cause: &ObligationCause<'tcx>,
) {
let origin = SubregionOrigin::from_obligation_cause(cause, || {
infer::RelateParamBound(cause.span, sup_type)
});
self.register_region_obligation(
cause.body_id,
RegionObligation {
sup_type,
sub_region,
origin,
},
);
}
/// Trait queries just want to pass back type obligations "as is"
pub fn take_registered_region_obligations(&self) -> Vec<(ast::NodeId, RegionObligation<'tcx>)> {
::std::mem::replace(&mut *self.region_obligations.borrow_mut(), vec![])
}
/// Process the region obligations that must be proven (during
/// `regionck`) for the given `body_id`, given information about
/// the region bounds in scope and so forth. This function must be
/// invoked for all relevant body-ids before region inference is
/// done (or else an assert will fire).
///
/// See the `region_obligations` field of `InferCtxt` for some
/// comments about how this function fits into the overall expected
/// flow of the inferencer. The key point is that it is
/// invoked after all type-inference variables have been bound --
/// towards the end of regionck. This also ensures that the
/// region-bound-pairs are available (see comments above regarding
/// closures).
///
/// # Parameters
///
/// - `region_bound_pairs`: the set of region bounds implied by
/// the parameters and where-clauses. In particular, each pair
/// `('a, K)` in this list tells us that the bounds in scope
/// indicate that `K: 'a`, where `K` is either a generic
/// parameter like `T` or a projection like `T::Item`.
/// - `implicit_region_bound`: if some, this is a region bound
/// that is considered to hold for all type parameters (the
/// function body).
/// - `param_env` is the parameter environment for the enclosing function.
/// - `body_id` is the body-id whose region obligations are being
/// processed.
///
/// # Returns
///
/// This function may have to perform normalizations, and hence it
/// returns an `InferOk` with subobligations that must be
/// processed.
pub fn process_registered_region_obligations(
&self,
region_bound_pairs_map: &FxHashMap<ast::NodeId, RegionBoundPairs<'tcx>>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
) {
assert!(
!self.in_snapshot.get(),
"cannot process registered region obligations in a snapshot"
);
debug!("process_registered_region_obligations()");
let my_region_obligations = self.take_registered_region_obligations();
for (
body_id,
RegionObligation {
sup_type,
sub_region,
origin,
},
) in my_region_obligations
{
debug!(
"process_registered_region_obligations: sup_type={:?} sub_region={:?} origin={:?}",
sup_type, sub_region, origin
);
let sup_type = self.resolve_type_vars_if_possible(&sup_type);
if let Some(region_bound_pairs) = region_bound_pairs_map.get(&body_id) {
let outlives = &mut TypeOutlives::new(
self,
self.tcx,
®ion_bound_pairs,
implicit_region_bound,
param_env,
);
outlives.type_must_outlive(origin, sup_type, sub_region);
} else {
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("no region-bound-pairs for {:?}", body_id),
)
}
}
}
/// Processes a single ad-hoc region obligation that was not
/// registered in advance.
pub fn type_must_outlive(
&self,
region_bound_pairs: &RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
let outlives = &mut TypeOutlives::new(
self,
self.tcx,
region_bound_pairs,
implicit_region_bound,
param_env,
);
let ty = self.resolve_type_vars_if_possible(&ty);
outlives.type_must_outlive(origin, ty, region);
}
}
/// The `TypeOutlives` struct has the job of "lowering" a `T: 'a`
/// obligation into a series of `'a: 'b` constraints and "verifys", as
/// described on the module comment. The final constraints are emitted
/// via a "delegate" of type `D` -- this is usually the `infcx`, which
/// accrues them into the `region_obligations` code, but for NLL we
/// use something else.
pub struct TypeOutlives<'cx, 'gcx: 'tcx, 'tcx: 'cx, D>
where
D: TypeOutlivesDelegate<'tcx>,
{
// See the comments on `process_registered_region_obligations` for the meaning
// of these fields.
delegate: D,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
verify_bound: VerifyBoundCx<'cx, 'gcx, 'tcx>,
}
pub trait TypeOutlivesDelegate<'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
);
fn push_verify(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
);
}
impl<'cx, 'gcx, 'tcx, D> TypeOutlives<'cx, 'gcx, 'tcx, D>
where
D: TypeOutlivesDelegate<'tcx>,
{
pub fn new(
delegate: D,
tcx: TyCtxt<'cx, 'gcx, 'tcx>,
region_bound_pairs: &'cx RegionBoundPairs<'tcx>,
implicit_region_bound: Option<ty::Region<'tcx>>,
param_env: ty::ParamEnv<'tcx>,
) -> Self {
Self {
delegate,
tcx,
verify_bound: VerifyBoundCx::new(
tcx,
region_bound_pairs,
implicit_region_bound,
param_env,
),
}
}
/// Adds constraints to inference such that `T: 'a` holds (or
/// reports an error if it cannot).
///
/// # Parameters
///
/// - `origin`, the reason we need this constraint
/// - `ty`, the type `T`
/// - `region`, the region `'a`
pub fn type_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
debug!(
"type_must_outlive(ty={:?}, region={:?}, origin={:?})",
ty, region, origin
);
assert!(!ty.has_escaping_bound_vars());
let mut components = smallvec![];
self.tcx.push_outlives_components(ty, &mut components);
self.components_must_outlive(origin, &components, region);
}
fn components_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
components: &[Component<'tcx>],
region: ty::Region<'tcx>,
) {
for component in components.iter() {
let origin = origin.clone();
match component {
Component::Region(region1) => {
self.delegate
.push_sub_region_constraint(origin, region, region1);
}
Component::Param(param_ty) => {
self.param_ty_must_outlive(origin, region, *param_ty);
}
Component::Projection(projection_ty) => {
self.projection_must_outlive(origin, region, *projection_ty);
}
Component::EscapingProjection(subcomponents) => {
self.components_must_outlive(origin, &subcomponents, region);
}
Component::UnresolvedInferenceVariable(v) => {
// ignore this, we presume it will yield an error
// later, since if a type variable is not resolved by
// this point it never will be
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("unresolved inference variable in outlives: {:?}", v),
);
}
}
}
}
fn param_ty_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
param_ty: ty::ParamTy,
) {
debug!(
"param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
region, param_ty, origin
);
let generic = GenericKind::Param(param_ty);
let verify_bound = self.verify_bound.generic_bound(generic);
self.delegate
.push_verify(origin, generic, region, verify_bound);
}
fn projection_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
region: ty::Region<'tcx>,
projection_ty: ty::ProjectionTy<'tcx>,
) {
debug!(
"projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})",
region, projection_ty, origin
);
// This case is thorny for inference. The fundamental problem is
// that there are many cases where we have choice, and inference
// doesn't like choice (the current region inference in
// particular). :) First off, we have to choose between using the
// OutlivesProjectionEnv, OutlivesProjectionTraitDef, and
// OutlivesProjectionComponent rules, any one of which is
// sufficient. If there are no inference variables involved, it's
// not hard to pick the right rule, but if there are, we're in a
// bit of a catch 22: if we picked which rule we were going to
// use, we could add constraints to the region inference graph
// that make it apply, but if we don't add those constraints, the
// rule might not apply (but another rule might). For now, we err
// on the side of adding too few edges into the graph.
// Compute the bounds we can derive from the trait definition.
// These are guaranteed to apply, no matter the inference
// results.
let trait_bounds: Vec<_> = self.verify_bound
.projection_declared_bounds_from_trait(projection_ty)
.collect();
// Compute the bounds we can derive from the environment. This
// is an "approximate" match -- in some cases, these bounds
// may not apply.
let mut approx_env_bounds = self.verify_bound
.projection_approx_declared_bounds_from_env(projection_ty);
debug!(
"projection_must_outlive: approx_env_bounds={:?}",
approx_env_bounds
);
// Remove outlives bounds that we get from the environment but
// which are also deducable from the trait. This arises (cc
// #55756) in cases where you have e.g., `<T as Foo<'a>>::Item:
// 'a` in the environment but `trait Foo<'b> { type Item: 'b
// }` in the trait definition.
approx_env_bounds.retain(|bound| {
match bound.0.sty {
ty::Projection(projection_ty) => {
self.verify_bound.projection_declared_bounds_from_trait(projection_ty)
.all(|r| r!= bound.1)
}
_ => panic!("expected only projection types from env, not {:?}", bound.0),
}
});
// If declared bounds list is empty, the only applicable rule is
// OutlivesProjectionComponent. If there are inference variables,
// then, we can break down the outlives into more primitive
// components without adding unnecessary edges.
//
// If there are *no* inference variables, however, we COULD do
// this, but we choose not to, because the error messages are less
// good. For example, a requirement like `T::Item: 'r` would be
// translated to a requirement that `T: 'r`; when this is reported
// to the user, it will thus say "T: 'r must hold so that T::Item:
// 'r holds". But that makes it sound like the only way to fix
// the problem is to add `T: 'r`, which isn't true. So, if there are no
// inference variables, we use a verify constraint instead of adding
// edges, which winds up enforcing the same condition.
let needs_infer = projection_ty.needs_infer();
if approx_env_bounds.is_empty() && trait_bounds.is_empty() && needs_infer {
debug!("projection_must_outlive: no declared bounds");
for component_ty in projection_ty.substs.types() {
self.type_must_outlive(origin.clone(), component_ty, region);
}
for r in projection_ty.substs.regions() {
self.delegate
.push_sub_region_constraint(origin.clone(), region, r);
}
return;
}
// If we found a unique bound `'b` from the trait, and we
// found nothing else from the environment, then the best
// action is to require that `'b: 'r`, so do that.
//
// This is best no matter what rule we use:
//
// - OutlivesProjectionEnv: these would translate to the requirement that `'b:'r`
// - OutlivesProjectionTraitDef: these would translate to the requirement that `'b:'r`
// - OutlivesProjectionComponent: this would require `'b:'r`
// in addition to other conditions
if!trait_bounds.is_empty()
&& trait_bounds[1..]
.iter()
.chain(approx_env_bounds.iter().map(|b| &b.1))
.all(|b| *b == trait_bounds[0])
{
let unique_bound = trait_bounds[0];
debug!(
"projection_must_outlive: unique trait bound = {:?}",
unique_bound
);
debug!("projection_must_outlive: unique declared bound appears in trait ref");
self.delegate
.push_sub_region_constraint(origin, region, unique_bound);
return;
}
// Fallback to verifying after the fact that there exists a
// declared bound, or that all the components appearing in the
// projection outlive; in some cases, this may add insufficient
// edges into the inference graph, leading to inference failures
// even though a satisfactory solution exists.
let generic = GenericKind::Projection(projection_ty);
let verify_bound = self.verify_bound.generic_bound(generic);
self.delegate
.push_verify(origin, generic.clone(), region, verify_bound);
}
}
impl<'cx, 'gcx, 'tcx> TypeOutlivesDelegate<'tcx> for &'cx InferCtxt<'cx, 'gcx, 'tcx> {
fn push_sub_region_constraint(
&mut self,
origin: SubregionOrigin<'tcx>,
a: ty::Region<'tcx>,
b: ty::Region<'tcx>,
) {
self.sub_regions(origin, a, b)
}
fn push_verify(
&mut self,
origin: SubregionOrigin<'tcx>,
kind: GenericKind<'tcx>,
a: ty::Region<'tcx>,
bound: VerifyBound<'tcx>,
) {
self.verify_generic_bound(origin, kind, a, bound)
}
}
| register_region_obligation | identifier_name |
registers.rs | = 0,
Usb11FullSpeed = 1
],
UlpiDdrSelect OFFSET(7) NUMBITS(1) [
SingleDataRate8bit = 0,
DoubleDataRate4bit = 1
],
SrpCapable OFFSET(8) NUMBITS(1) [],
HnpCapable OFFSET(9) NUMBITS(1) [],
UsbTurnaroundTime OFFSET(10) NUMBITS(4) []
// Bit 14 reserved
// Bits 15+ not used by SW; not included because they won't be tested
],
pub Reset [ // OTG Databook, Table 5-11
AhbMasterIdle OFFSET(31) NUMBITS(1) [],
DmaRequestSignal OFFSET(30) NUMBITS(1) [],
TxFifoNumber OFFSET(6) NUMBITS(5) [
Fifo0 = 0,
Fifo1 = 1,
Fifo2 = 2,
Fifo3 = 3,
Fifo4 = 4,
Fifo5 = 5,
Fifo6 = 6,
Fifo7 = 7,
Fifo8 = 8,
Fifo9 = 9,
Fifo10 = 10,
Fifo11 = 11,
Fifo12 = 12,
Fifo13 = 13,
Fifo14 = 14,
Fifo15 = 15,
AllFifos = 16 // It's 5 bits, 0x10 means all FIFOs
],
TxFifoFlush OFFSET(5) NUMBITS(1) [],
RxFifoFlush OFFSET(4) NUMBITS(1) [],
InTokenSequenceLearningQueueFlush OFFSET(3) NUMBITS(1) [],
HostFrameCounterReset OFFSET(2) NUMBITS(1) [],
PiuFsDedicatedControllerSoftReset OFFSET(1) NUMBITS(1) []
],
pub Interrupt [ // OTG Databook, Table 5-13
// Note this field is not valid on the Mask register
CurrentMode OFFSET(0) NUMBITS(1) [
Host = 0b0,
Device = 0b1
],
ModeMismatch OFFSET(1) NUMBITS(1) [],
OTG OFFSET(2) NUMBITS(1) [],
StartOfFrame OFFSET(3) NUMBITS(1) [],
RxFifoNotEmpty OFFSET(4) NUMBITS(1) [],
NonPeriodicTxFifoEmpty OFFSET(5) NUMBITS(1) [],
GlobalInNak OFFSET(6) NUMBITS(1) [],
GlobalOutNak OFFSET(7) NUMBITS(1) [],
EarlySuspend OFFSET(10) NUMBITS(1) [],
Suspend OFFSET(11) NUMBITS(1) [],
Reset OFFSET(12) NUMBITS(1) [],
EnumerationDone OFFSET(13) NUMBITS(1) [],
OutIsochronousPacketDropped OFFSET(14) NUMBITS(1) [],
EndOfPeriodicFrame OFFSET(15) NUMBITS(1) [],
RestoreDone OFFSET(16) NUMBITS(1) [],
EndpointMismatch OFFSET(17) NUMBITS(1) [],
InEndpoints OFFSET(18) NUMBITS(1) [],
OutEndpoints OFFSET(19) NUMBITS(1) [],
IncompleteIsochronousInTransfer OFFSET(20) NUMBITS(1) [],
IncompletePeriodicTransfer OFFSET(21) NUMBITS(1) [],
DataFetchSuspended OFFSET(22) NUMBITS(1) [],
ResetDetected OFFSET(23) NUMBITS(1) [],
ConnectIDChange OFFSET(28) NUMBITS(1) [],
DisconnectDetected OFFSET(29) NUMBITS(1) [],
SessionRequest OFFSET(30) NUMBITS(1) [],
ResumeWakeup OFFSET(31) NUMBITS(1) []
],
pub Gpio [ // OTG Databook, Table 5-22
Gpi OFFSET(0) NUMBITS(16) [],
GpoRegister OFFSET(16) NUMBITS(4) [],
GpoValue OFFSET(20) NUMBITS(8) [],
GpoOperation OFFSET(31) NUMBITS(1) [
Read = 0,
Write = 1
]
],
pub DeviceConfig [ // OTG Databook, Table 5-53
DeviceSpeed OFFSET(0) NUMBITS(2) [
High = 0b00,
Full2 = 0b01,
Low = 0b10,
Full1 = 0b11
],
DeviceAddress OFFSET(4) NUMBITS(7) [],
PeriodicFrameInterval OFFSET(11) NUMBITS(2) [
Interval80 = 0b00,
Interval85 = 0b01,
Interval90 = 0b10,
Interval95 = 0b11
],
EnableDeviceOutNak OFFSET(13) NUMBITS(1) [],
XcvrDelay OFFSET(14) NUMBITS(1) [],
ErraticErrorInterruptMask OFFSET(15) NUMBITS(1) [],
InEndpointMismatchCount OFFSET(18) NUMBITS(5) [],
EnableScatterGatherDMAInDeviceMode OFFSET(23) NUMBITS(1) [],
PeriodicScheduling OFFSET(24) NUMBITS(2) [
Interval25 = 0b00,
Interval50 = 0b01,
Interval75 = 0b10
],
ResumeValidationPeriod OFFSET(26) NUMBITS(6) []
],
pub DeviceControl [ // OTG Databook, Table 5-54
RemoteWakeupSignaling OFFSET(0) NUMBITS(1) [],
SoftDisconnect OFFSET(1) NUMBITS(1) [],
GlobalNonPeriodicInNakStatus OFFSET(2) NUMBITS(1) [],
GlobalOutNakStatus OFFSET(3) NUMBITS(1) [],
TestControl OFFSET(4) NUMBITS(3) [
Disabled = 0b000,
ModeJ = 0b001,
ModeK = 0b010,
ModeSE0Nak = 0b011,
ModePacket = 0b100,
ModeForceEnable = 0b101
],
SetGlobalNonPeriodicInNak OFFSET(7) NUMBITS(1) [],
ClearGlobalNonPeriodicInNak OFFSET(8) NUMBITS(1) [],
SetGlobalOutNak OFFSET(9) NUMBITS(1) [],
ClearGlobalOutNak OFFSET(10) NUMBITS(1) [],
PowerOnProgrammingDone OFFSET(11) NUMBITS(1) [],
GlobalMultiCount OFFSET(13) NUMBITS(2) [
CountInvalid = 0b00,
Count1Packet = 0b01,
Count2Packets = 0b10,
Count3Packets = 0b11
],
IgnoreFrameNumber OFFSET(15) NUMBITS(1) [],
NakOnBabbleError OFFSET(16) NUMBITS(1) [],
EnableContinueOnBna OFFSET(17) NUMBITS(1) [],
DeepSleepBESLReject OFFSET(18) NUMBITS(1) []
],
pub InEndpointInterruptMask [ // OTG Databook, Table 5-57
TransferCompleted 0,
EndpointDisabled 1,
AhbError 2,
Timeout 3,
InTokenReceivedWhenTxFifoEmpty 4,
InTokenEndpointMismatched 5,
InEndpointNakEffective 6,
// Bit 7 reserved
TxFifoUnderrun 8,
BufferNotAvailable 9,
// Bits 10-12 reserved
NAK 13
// Bits 14-31 reserved
],
pub OutEndpointInterruptMask [ // OTG Databook, Table 5-58
TransferCompleted 0,
EndpointDisabled 1,
AhbError 2,
SetupPhaseDone 3,
OutTokenReceivedWhenEndpointDisabled 4,
StatusPhaseReceived 5,
BackToBackSetupPacketsReceived 6,
// Bit 7 reserved
OutPacketError 8,
BnaInterrupt 9,
// Bits 10-11 reserved
BabbleError 12,
Nak 13,
Nyet 14
// Bits 15-31 reserved
],
pub AllEndpointInterrupt [ // OTG Databook Table 5-59
IN0 0,
IN1 1,
IN2 2,
IN3 3,
IN4 4,
IN5 5,
IN6 6,
IN7 7,
IN8 8,
IN9 9,
IN10 10,
IN11 11,
IN12 12,
IN13 13,
IN14 14,
IN15 15,
OUT0 16,
OUT1 17,
OUT2 18,
OUT3 19,
OUT4 20,
OUT5 21,
OUT6 22,
OUT7 23,
OUT8 24,
OUT9 25,
OUT10 26,
OUT11 27,
OUT12 28,
OUT13 29,
OUT14 30,
OUT15 31
],
pub EndpointControl [
MaximumPacketSize OFFSET(0) NUMBITS(11) [],
NextEndpoint OFFSET(11) NUMBITS(4) [],
UsbActiveEndpoint OFFSET(15) NUMBITS(1) [],
NakStatus OFFSET(17) NUMBITS(1) [
TransmittingNonNakHandshakes = 0,
TransmittingNakHandshakes = 1
],
EndpointType OFFSET(18) NUMBITS(2) [
Control = 0b00,
Isochronous = 0b01,
Bulk = 0b10,
Interrupt = 0b11
],
SnoopMode OFFSET(20) NUMBITS(1) [],
Stall OFFSET(21) NUMBITS(1) [],
TxFifoNumber OFFSET(22) NUMBITS(4) [],
ClearNak OFFSET(26) NUMBITS(1) [],
SetNak OFFSET(27) NUMBITS(1) [],
Disable OFFSET(30) NUMBITS(1) [],
Enable OFFSET(31) NUMBITS(1) []
]
];
#[repr(C)]
pub struct Registers {
pub _otg_control: VolatileCell<u32>,
pub _otg_interrupt: VolatileCell<u32>,
pub ahb_config: ReadWrite<u32, AhbConfig::Register>,
pub configuration: ReadWrite<u32, UsbConfiguration::Register>,
pub reset: ReadWrite<u32, Reset::Register>,
pub interrupt_status: ReadWrite<u32, Interrupt::Register>,
pub interrupt_mask: ReadWrite<u32, Interrupt::Register>,
pub _grxstsr: VolatileCell<u32>,
pub _grxstsp: VolatileCell<u32>,
pub receive_fifo_size: VolatileCell<u32>,
pub transmit_fifo_size: VolatileCell<u32>,
_reserved: [u32; 3],
// 0x38
/// The `gpio` register is a portal to a set of custom 8-bit registers.
///
/// Logically it is split into a GP_OUT part and a GP_IN part. Writing to a
/// custom register can be done in a single operation, with all data
/// transferred in GP_OUT. Reading requires a GP_OUT write to select the
/// register to read, then a read or GP_IN to see what the register holds.
/// GP_OUT:
/// bit 15 direction: 1=write, 0=read
/// bits 11:4 value to write to register when bit 15 is set
/// bits 3:0 custom register to access
/// GP_IN:
/// bits 7:0 value read back from register when GP_OUT[15] is clear
pub gpio: ReadWrite<u32, Gpio::Register>,
pub _guid: VolatileCell<u32>,
pub _gsnpsid: VolatileCell<u32>,
pub _user_hw_config: [VolatileCell<u32>; 4],
_reserved0: [u32; 2],
pub _gdfifocfg: VolatileCell<u32>,
_reserved1: [u32; 41],
pub device_in_ep_tx_fifo_size: [VolatileCell<u32>; 15],
_reserved2: [u32; 432],
pub device_config: ReadWrite<u32, DeviceConfig::Register>,
pub device_control: ReadWrite<u32, DeviceControl::Register>,
pub _device_status: VolatileCell<u32>,
_reserved_3: u32,
// 0x810
pub device_in_ep_interrupt_mask: ReadWrite<u32, InEndpointInterruptMask::Register>, // DIEPMASK
pub device_out_ep_interrupt_mask: ReadWrite<u32, OutEndpointInterruptMask::Register>, // DOEPMASK
pub device_all_ep_interrupt: ReadWrite<u32, AllEndpointInterrupt::Register>, // DAINT
pub device_all_ep_interrupt_mask: ReadWrite<u32, AllEndpointInterrupt::Register>, // DAINTMASK
_reserved_4: [u32; 2],
// 0x828
pub _device_vbus_discharge_time: VolatileCell<u32>,
pub _device_vbus_pulsing_time: VolatileCell<u32>,
pub _device_threshold_control: VolatileCell<u32>,
pub _device_in_ep_fifo_empty_interrupt_mask: VolatileCell<u32>,
_reserved_5: [u32; 50],
// 0x900
pub in_endpoints: [InEndpoint; 16],
// 0xb00
pub out_endpoints: [OutEndpoint; 16],
// 0xd00
_reserved6: [u32; 64],
// 0xe00
pub _power_clock_gating_control: VolatileCell<u32>,
}
#[repr(C)]
pub struct InEndpoint {
pub control: ReadWrite<u32, EndpointControl::Register>,
_reserved0: u32,
pub interrupt: ReadWrite<u32, InEndpointInterruptMask::Register>,
_reserved1: u32,
// We use scatter-gather mode so transfer-size isn't used
_transfer_size: VolatileCell<u32>,
pub dma_address: VolatileCell<&'static DMADescriptor>,
pub tx_fifo_status: VolatileCell<u32>,
pub buffer_address: VolatileCell<u32>,
}
#[repr(C)]
pub struct OutEndpoint {
pub control: ReadWrite<u32, EndpointControl::Register>,
_reserved0: u32,
pub interrupt: ReadWrite<u32, OutEndpointInterruptMask::Register>,
_reserved1: u32,
_transfer_size: VolatileCell<u32>,
pub dma_address: VolatileCell<&'static DMADescriptor>,
_reserved2: u32,
pub buffer_address: VolatileCell<u32>,
}
/// In/Out Endpoint Control flags
#[repr(C)]
#[derive(Clone, Copy)]
pub struct EpCtl(pub u32);
impl EpCtl {
/// Enable the endpoint
pub const ENABLE: EpCtl = EpCtl(1 << 31);
/// Clear endpoint NAK
pub const CNAK: EpCtl = EpCtl(1 << 26);
/// Stall endpoint
pub const STALL: EpCtl = EpCtl(1 << 21);
/// Snoop on bad frames
pub const SNOOP: EpCtl = EpCtl(1 << 20);
/// Make an endpoint of type Interrupt
pub const INTERRUPT: EpCtl = EpCtl(3 << 18);
/// Denotes whether endpoint is active
pub const USBACTEP: EpCtl = EpCtl(1 << 15);
pub const TXFNUM_0: EpCtl = EpCtl(0 << 22);
pub const TXFNUM_1: EpCtl = EpCtl(1 << 22);
pub const TXFNUM_2: EpCtl = EpCtl(2 << 22);
pub const TXFNUM_3: EpCtl = EpCtl(3 << 22);
pub const TXFNUM_4: EpCtl = EpCtl(4 << 22);
pub const TXFNUM_5: EpCtl = EpCtl(5 << 22);
pub const TXFNUM_6: EpCtl = EpCtl(6 << 22);
pub const TXFNUM_7: EpCtl = EpCtl(7 << 22);
pub const TXFNUM_8: EpCtl = EpCtl(8 << 22);
pub const TXFNUM_9: EpCtl = EpCtl(9 << 22);
pub const TXFNUM_10: EpCtl = EpCtl(10 << 22);
pub const TXFNUM_11: EpCtl = EpCtl(11 << 22);
pub const TXFNUM_12: EpCtl = EpCtl(12 << 22);
pub const TXFNUM_13: EpCtl = EpCtl(13 << 22);
pub const TXFNUM_14: EpCtl = EpCtl(14 << 22);
pub const TXFNUM_15: EpCtl = EpCtl(15 << 22);
// EP0 has a different control register layout than the other
// endpoints (EPN). In EP0, the MPS field is 2 bits; in EPN, it is
// 10 bits (sections 5.3.5.21 and 5.3.5.22 in the OTG databook. A
// better implementation would type check this. -pal
pub const MPS_EP0_64: EpCtl = EpCtl(0 << 0);
pub const MPS_EP0_32: EpCtl = EpCtl(1 << 0);
pub const MPS_EP0_16: EpCtl = EpCtl(2 << 0);
pub const MPS_EP0_8: EpCtl = EpCtl(3 << 0);
pub fn epn_mps(self, cnt: u32) -> EpCtl {
self | EpCtl(cnt & 0x3ff)
}
pub const fn to_u32(self) -> u32 {
self.0
}
}
impl BitOr for EpCtl {
type Output = Self;
fn | bitor | identifier_name |
|
registers.rs | OFFSET(10) NUMBITS(4) []
// Bit 14 reserved
// Bits 15+ not used by SW; not included because they won't be tested
],
pub Reset [ // OTG Databook, Table 5-11
AhbMasterIdle OFFSET(31) NUMBITS(1) [],
DmaRequestSignal OFFSET(30) NUMBITS(1) [],
TxFifoNumber OFFSET(6) NUMBITS(5) [
Fifo0 = 0,
Fifo1 = 1,
Fifo2 = 2,
Fifo3 = 3,
Fifo4 = 4,
Fifo5 = 5,
Fifo6 = 6,
Fifo7 = 7,
Fifo8 = 8,
Fifo9 = 9,
Fifo10 = 10,
Fifo11 = 11,
Fifo12 = 12,
Fifo13 = 13,
Fifo14 = 14,
Fifo15 = 15,
AllFifos = 16 // It's 5 bits, 0x10 means all FIFOs
],
TxFifoFlush OFFSET(5) NUMBITS(1) [],
RxFifoFlush OFFSET(4) NUMBITS(1) [],
InTokenSequenceLearningQueueFlush OFFSET(3) NUMBITS(1) [],
HostFrameCounterReset OFFSET(2) NUMBITS(1) [],
PiuFsDedicatedControllerSoftReset OFFSET(1) NUMBITS(1) []
],
pub Interrupt [ // OTG Databook, Table 5-13
// Note this field is not valid on the Mask register
CurrentMode OFFSET(0) NUMBITS(1) [
Host = 0b0,
Device = 0b1
],
ModeMismatch OFFSET(1) NUMBITS(1) [],
OTG OFFSET(2) NUMBITS(1) [],
StartOfFrame OFFSET(3) NUMBITS(1) [],
RxFifoNotEmpty OFFSET(4) NUMBITS(1) [],
NonPeriodicTxFifoEmpty OFFSET(5) NUMBITS(1) [],
GlobalInNak OFFSET(6) NUMBITS(1) [],
GlobalOutNak OFFSET(7) NUMBITS(1) [],
EarlySuspend OFFSET(10) NUMBITS(1) [],
Suspend OFFSET(11) NUMBITS(1) [],
Reset OFFSET(12) NUMBITS(1) [],
EnumerationDone OFFSET(13) NUMBITS(1) [],
OutIsochronousPacketDropped OFFSET(14) NUMBITS(1) [],
EndOfPeriodicFrame OFFSET(15) NUMBITS(1) [],
RestoreDone OFFSET(16) NUMBITS(1) [],
EndpointMismatch OFFSET(17) NUMBITS(1) [],
InEndpoints OFFSET(18) NUMBITS(1) [],
OutEndpoints OFFSET(19) NUMBITS(1) [],
IncompleteIsochronousInTransfer OFFSET(20) NUMBITS(1) [],
IncompletePeriodicTransfer OFFSET(21) NUMBITS(1) [],
DataFetchSuspended OFFSET(22) NUMBITS(1) [],
ResetDetected OFFSET(23) NUMBITS(1) [],
ConnectIDChange OFFSET(28) NUMBITS(1) [],
DisconnectDetected OFFSET(29) NUMBITS(1) [],
SessionRequest OFFSET(30) NUMBITS(1) [],
ResumeWakeup OFFSET(31) NUMBITS(1) []
],
pub Gpio [ // OTG Databook, Table 5-22
Gpi OFFSET(0) NUMBITS(16) [],
GpoRegister OFFSET(16) NUMBITS(4) [],
GpoValue OFFSET(20) NUMBITS(8) [],
GpoOperation OFFSET(31) NUMBITS(1) [
Read = 0,
Write = 1
]
],
pub DeviceConfig [ // OTG Databook, Table 5-53
DeviceSpeed OFFSET(0) NUMBITS(2) [
High = 0b00,
Full2 = 0b01,
Low = 0b10,
Full1 = 0b11
],
DeviceAddress OFFSET(4) NUMBITS(7) [],
PeriodicFrameInterval OFFSET(11) NUMBITS(2) [
Interval80 = 0b00,
Interval85 = 0b01,
Interval90 = 0b10,
Interval95 = 0b11
],
EnableDeviceOutNak OFFSET(13) NUMBITS(1) [],
XcvrDelay OFFSET(14) NUMBITS(1) [],
ErraticErrorInterruptMask OFFSET(15) NUMBITS(1) [],
InEndpointMismatchCount OFFSET(18) NUMBITS(5) [],
EnableScatterGatherDMAInDeviceMode OFFSET(23) NUMBITS(1) [],
PeriodicScheduling OFFSET(24) NUMBITS(2) [
Interval25 = 0b00,
Interval50 = 0b01,
Interval75 = 0b10
],
ResumeValidationPeriod OFFSET(26) NUMBITS(6) []
],
pub DeviceControl [ // OTG Databook, Table 5-54
RemoteWakeupSignaling OFFSET(0) NUMBITS(1) [],
SoftDisconnect OFFSET(1) NUMBITS(1) [],
GlobalNonPeriodicInNakStatus OFFSET(2) NUMBITS(1) [],
GlobalOutNakStatus OFFSET(3) NUMBITS(1) [],
TestControl OFFSET(4) NUMBITS(3) [
Disabled = 0b000,
ModeJ = 0b001,
ModeK = 0b010,
ModeSE0Nak = 0b011,
ModePacket = 0b100,
ModeForceEnable = 0b101
],
SetGlobalNonPeriodicInNak OFFSET(7) NUMBITS(1) [],
ClearGlobalNonPeriodicInNak OFFSET(8) NUMBITS(1) [],
SetGlobalOutNak OFFSET(9) NUMBITS(1) [],
ClearGlobalOutNak OFFSET(10) NUMBITS(1) [],
PowerOnProgrammingDone OFFSET(11) NUMBITS(1) [],
GlobalMultiCount OFFSET(13) NUMBITS(2) [
CountInvalid = 0b00,
Count1Packet = 0b01,
Count2Packets = 0b10,
Count3Packets = 0b11
],
IgnoreFrameNumber OFFSET(15) NUMBITS(1) [],
NakOnBabbleError OFFSET(16) NUMBITS(1) [],
EnableContinueOnBna OFFSET(17) NUMBITS(1) [],
DeepSleepBESLReject OFFSET(18) NUMBITS(1) []
],
pub InEndpointInterruptMask [ // OTG Databook, Table 5-57
TransferCompleted 0,
EndpointDisabled 1,
AhbError 2,
Timeout 3,
InTokenReceivedWhenTxFifoEmpty 4,
InTokenEndpointMismatched 5,
InEndpointNakEffective 6,
// Bit 7 reserved
TxFifoUnderrun 8,
BufferNotAvailable 9,
// Bits 10-12 reserved
NAK 13
// Bits 14-31 reserved
],
pub OutEndpointInterruptMask [ // OTG Databook, Table 5-58
TransferCompleted 0,
EndpointDisabled 1,
AhbError 2,
SetupPhaseDone 3,
OutTokenReceivedWhenEndpointDisabled 4,
StatusPhaseReceived 5,
BackToBackSetupPacketsReceived 6,
// Bit 7 reserved
OutPacketError 8,
BnaInterrupt 9,
// Bits 10-11 reserved
BabbleError 12,
Nak 13,
Nyet 14
// Bits 15-31 reserved
],
pub AllEndpointInterrupt [ // OTG Databook Table 5-59
IN0 0,
IN1 1,
IN2 2,
IN3 3,
IN4 4,
IN5 5,
IN6 6,
IN7 7,
IN8 8,
IN9 9,
IN10 10,
IN11 11,
IN12 12,
IN13 13,
IN14 14,
IN15 15,
OUT0 16,
OUT1 17,
OUT2 18,
OUT3 19,
OUT4 20,
OUT5 21,
OUT6 22,
OUT7 23,
OUT8 24,
OUT9 25,
OUT10 26,
OUT11 27,
OUT12 28,
OUT13 29,
OUT14 30,
OUT15 31
],
pub EndpointControl [
MaximumPacketSize OFFSET(0) NUMBITS(11) [],
NextEndpoint OFFSET(11) NUMBITS(4) [],
UsbActiveEndpoint OFFSET(15) NUMBITS(1) [],
NakStatus OFFSET(17) NUMBITS(1) [
TransmittingNonNakHandshakes = 0,
TransmittingNakHandshakes = 1
],
EndpointType OFFSET(18) NUMBITS(2) [
Control = 0b00,
Isochronous = 0b01,
Bulk = 0b10,
Interrupt = 0b11
],
SnoopMode OFFSET(20) NUMBITS(1) [],
Stall OFFSET(21) NUMBITS(1) [],
TxFifoNumber OFFSET(22) NUMBITS(4) [],
ClearNak OFFSET(26) NUMBITS(1) [],
SetNak OFFSET(27) NUMBITS(1) [],
Disable OFFSET(30) NUMBITS(1) [],
Enable OFFSET(31) NUMBITS(1) []
]
];
#[repr(C)]
pub struct Registers {
pub _otg_control: VolatileCell<u32>,
pub _otg_interrupt: VolatileCell<u32>,
pub ahb_config: ReadWrite<u32, AhbConfig::Register>,
pub configuration: ReadWrite<u32, UsbConfiguration::Register>,
pub reset: ReadWrite<u32, Reset::Register>,
pub interrupt_status: ReadWrite<u32, Interrupt::Register>,
pub interrupt_mask: ReadWrite<u32, Interrupt::Register>,
pub _grxstsr: VolatileCell<u32>,
pub _grxstsp: VolatileCell<u32>,
pub receive_fifo_size: VolatileCell<u32>,
pub transmit_fifo_size: VolatileCell<u32>,
_reserved: [u32; 3],
// 0x38
/// The `gpio` register is a portal to a set of custom 8-bit registers.
///
/// Logically it is split into a GP_OUT part and a GP_IN part. Writing to a
/// custom register can be done in a single operation, with all data
/// transferred in GP_OUT. Reading requires a GP_OUT write to select the
/// register to read, then a read or GP_IN to see what the register holds.
/// GP_OUT:
/// bit 15 direction: 1=write, 0=read
/// bits 11:4 value to write to register when bit 15 is set
/// bits 3:0 custom register to access
/// GP_IN:
/// bits 7:0 value read back from register when GP_OUT[15] is clear
pub gpio: ReadWrite<u32, Gpio::Register>,
pub _guid: VolatileCell<u32>,
pub _gsnpsid: VolatileCell<u32>,
pub _user_hw_config: [VolatileCell<u32>; 4],
_reserved0: [u32; 2],
pub _gdfifocfg: VolatileCell<u32>,
_reserved1: [u32; 41],
pub device_in_ep_tx_fifo_size: [VolatileCell<u32>; 15],
_reserved2: [u32; 432],
pub device_config: ReadWrite<u32, DeviceConfig::Register>,
pub device_control: ReadWrite<u32, DeviceControl::Register>,
pub _device_status: VolatileCell<u32>,
_reserved_3: u32,
// 0x810
pub device_in_ep_interrupt_mask: ReadWrite<u32, InEndpointInterruptMask::Register>, // DIEPMASK
pub device_out_ep_interrupt_mask: ReadWrite<u32, OutEndpointInterruptMask::Register>, // DOEPMASK
pub device_all_ep_interrupt: ReadWrite<u32, AllEndpointInterrupt::Register>, // DAINT
pub device_all_ep_interrupt_mask: ReadWrite<u32, AllEndpointInterrupt::Register>, // DAINTMASK
_reserved_4: [u32; 2],
// 0x828
pub _device_vbus_discharge_time: VolatileCell<u32>,
pub _device_vbus_pulsing_time: VolatileCell<u32>,
pub _device_threshold_control: VolatileCell<u32>,
pub _device_in_ep_fifo_empty_interrupt_mask: VolatileCell<u32>,
_reserved_5: [u32; 50],
// 0x900
pub in_endpoints: [InEndpoint; 16],
// 0xb00
pub out_endpoints: [OutEndpoint; 16],
// 0xd00
_reserved6: [u32; 64],
// 0xe00
pub _power_clock_gating_control: VolatileCell<u32>,
}
#[repr(C)]
pub struct InEndpoint {
pub control: ReadWrite<u32, EndpointControl::Register>,
_reserved0: u32,
pub interrupt: ReadWrite<u32, InEndpointInterruptMask::Register>,
_reserved1: u32,
// We use scatter-gather mode so transfer-size isn't used
_transfer_size: VolatileCell<u32>,
pub dma_address: VolatileCell<&'static DMADescriptor>,
pub tx_fifo_status: VolatileCell<u32>,
pub buffer_address: VolatileCell<u32>,
}
#[repr(C)]
pub struct OutEndpoint {
pub control: ReadWrite<u32, EndpointControl::Register>,
_reserved0: u32,
pub interrupt: ReadWrite<u32, OutEndpointInterruptMask::Register>,
_reserved1: u32,
_transfer_size: VolatileCell<u32>,
pub dma_address: VolatileCell<&'static DMADescriptor>,
_reserved2: u32,
pub buffer_address: VolatileCell<u32>,
}
/// In/Out Endpoint Control flags
#[repr(C)]
#[derive(Clone, Copy)]
pub struct EpCtl(pub u32);
impl EpCtl {
/// Enable the endpoint
pub const ENABLE: EpCtl = EpCtl(1 << 31);
/// Clear endpoint NAK
pub const CNAK: EpCtl = EpCtl(1 << 26);
/// Stall endpoint
pub const STALL: EpCtl = EpCtl(1 << 21);
/// Snoop on bad frames
pub const SNOOP: EpCtl = EpCtl(1 << 20);
/// Make an endpoint of type Interrupt
pub const INTERRUPT: EpCtl = EpCtl(3 << 18);
/// Denotes whether endpoint is active
pub const USBACTEP: EpCtl = EpCtl(1 << 15);
pub const TXFNUM_0: EpCtl = EpCtl(0 << 22);
pub const TXFNUM_1: EpCtl = EpCtl(1 << 22);
pub const TXFNUM_2: EpCtl = EpCtl(2 << 22);
pub const TXFNUM_3: EpCtl = EpCtl(3 << 22);
pub const TXFNUM_4: EpCtl = EpCtl(4 << 22);
pub const TXFNUM_5: EpCtl = EpCtl(5 << 22);
pub const TXFNUM_6: EpCtl = EpCtl(6 << 22);
pub const TXFNUM_7: EpCtl = EpCtl(7 << 22);
pub const TXFNUM_8: EpCtl = EpCtl(8 << 22);
pub const TXFNUM_9: EpCtl = EpCtl(9 << 22);
pub const TXFNUM_10: EpCtl = EpCtl(10 << 22);
pub const TXFNUM_11: EpCtl = EpCtl(11 << 22);
pub const TXFNUM_12: EpCtl = EpCtl(12 << 22);
pub const TXFNUM_13: EpCtl = EpCtl(13 << 22);
pub const TXFNUM_14: EpCtl = EpCtl(14 << 22);
pub const TXFNUM_15: EpCtl = EpCtl(15 << 22);
// EP0 has a different control register layout than the other
// endpoints (EPN). In EP0, the MPS field is 2 bits; in EPN, it is
// 10 bits (sections 5.3.5.21 and 5.3.5.22 in the OTG databook. A
// better implementation would type check this. -pal
pub const MPS_EP0_64: EpCtl = EpCtl(0 << 0);
pub const MPS_EP0_32: EpCtl = EpCtl(1 << 0);
pub const MPS_EP0_16: EpCtl = EpCtl(2 << 0);
pub const MPS_EP0_8: EpCtl = EpCtl(3 << 0);
pub fn epn_mps(self, cnt: u32) -> EpCtl {
self | EpCtl(cnt & 0x3ff)
}
pub const fn to_u32(self) -> u32 {
self.0
}
}
impl BitOr for EpCtl {
type Output = Self;
fn bitor(self, rhs: EpCtl) -> EpCtl {
EpCtl(self.0 | rhs.0)
}
}
impl BitAnd for EpCtl {
type Output = Self;
fn bitand(self, rhs: EpCtl) -> EpCtl {
EpCtl(self.0 & rhs.0)
}
}
#[repr(C)]
#[repr(align(4))]
#[derive(Clone, Copy, Debug)]
pub struct DMADescriptor {
pub flags: DescFlag,
pub addr: usize,
}
/// Status quadlet for a DMA descriptor
///
/// The status quadlet is a 32-bit flag register in the DMA descriptor that
/// reflects the status of the descriptor. It can mark whether the Host/DMA is
/// ready to transmit/receive this descriptor and describes how large the buffer
/// is.
#[repr(C)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct DescFlag(pub u32);
| random_line_split |
||
lib.rs | #![feature(core_intrinsics, lang_items)]
#![no_std]
mod gpio;
mod watchdog;
use core::intrinsics::{volatile_store, volatile_load};
const GPFSEL2: u32 = 0x3F20_0008;
const GPSET0: u32 = 0x3F20_001C;
const GPCLR0: u32 = 0x3F20_0028;
const GPIO20: u32 = 1 << 20;
const GPIO21: u32 = 1 << 21;
const GPIO22: u32 = 1 << 22;
const IRQ_BASIC_ENABLE: u32 = 0x3F00_B218;
const ARM_TIMER_LOD: u32 = 0x3F00_B400;
const ARM_TIMER_CTL: u32 = 0x3F00_B408;
const ARM_TIMER_CLI: u32 = 0x3F00_B40C;
const ARM_TIMER_RLD: u32 = 0x3F00_B418;
const ARM_TIMER_DIV: u32 = 0x3F00_B41C;
const SHORT_TIMEOUT: u32 = 500_000;
static mut STATE_COUNTER: u32 = 0;
extern {
fn enable_irq();
}
#[no_mangle]
pub extern fn rust_main() {
gpio::write_register(ARM_TIMER_LOD, SHORT_TIMEOUT - 1);
gpio::write_register(ARM_TIMER_RLD, SHORT_TIMEOUT - 1);
// // Set the timer pre-divider to 0xF9. System clock freq (~250MHz) / 0xF9 = ~1 million ticks/sec
gpio::write_register(ARM_TIMER_DIV, 0x0000_00F9);
gpio::write_register(ARM_TIMER_CLI, 0);
gpio::write_register(ARM_TIMER_CTL, 0x003E_00A2);
gpio::write_register(IRQ_BASIC_ENABLE, 0x1);
initialize_leds();
unsafe { enable_irq(); }
watchdog::start(0x000F_FFFF);
loop {
if watchdog::remaining_time() < 0x67697 {
gpio::write_register(GPSET0, GPIO22);
break;
}
}
loop {}
}
#[no_mangle]
pub extern fn | () {
let state_counter = unsafe { volatile_load::<u32>(&STATE_COUNTER as *const u32 as *mut u32) };
if state_counter & 0x1 == 0 {
gpio::write_register(GPSET0, GPIO20);
gpio::write_register(GPCLR0, GPIO21);
} else {
gpio::write_register(GPCLR0, GPIO20);
gpio::write_register(GPSET0, GPIO21);
}
unsafe {
volatile_store(&STATE_COUNTER as *const u32 as *mut u32, state_counter.wrapping_add(1));
}
gpio::write_register(ARM_TIMER_CLI, 0);
}
fn initialize_leds() {
let mut gpfsel2_val = gpio::read_register(GPFSEL2);
gpfsel2_val &=!0x1FF; // mask out other GPIO pins
gpfsel2_val |= 0x49; // 0b01001001
gpio::write_register(GPFSEL2, gpfsel2_val);
}
#[no_mangle]
pub unsafe fn __aeabi_unwind_cpp_pr0() {
loop {}
}
#[lang = "eh_personality"]
extern fn eh_personality() {}
#[lang = "panic_fmt"]
extern fn panic_fmt() ->! {
loop {}
}
| rust_irq_handler | identifier_name |
lib.rs | #![feature(core_intrinsics, lang_items)]
#![no_std]
mod gpio;
mod watchdog;
use core::intrinsics::{volatile_store, volatile_load};
const GPFSEL2: u32 = 0x3F20_0008;
const GPSET0: u32 = 0x3F20_001C;
const GPCLR0: u32 = 0x3F20_0028;
const GPIO20: u32 = 1 << 20;
const GPIO21: u32 = 1 << 21;
const GPIO22: u32 = 1 << 22;
const IRQ_BASIC_ENABLE: u32 = 0x3F00_B218;
const ARM_TIMER_LOD: u32 = 0x3F00_B400;
const ARM_TIMER_CTL: u32 = 0x3F00_B408;
const ARM_TIMER_CLI: u32 = 0x3F00_B40C;
const ARM_TIMER_RLD: u32 = 0x3F00_B418;
const ARM_TIMER_DIV: u32 = 0x3F00_B41C;
const SHORT_TIMEOUT: u32 = 500_000;
static mut STATE_COUNTER: u32 = 0;
extern {
fn enable_irq(); |
#[no_mangle]
pub extern fn rust_main() {
gpio::write_register(ARM_TIMER_LOD, SHORT_TIMEOUT - 1);
gpio::write_register(ARM_TIMER_RLD, SHORT_TIMEOUT - 1);
// // Set the timer pre-divider to 0xF9. System clock freq (~250MHz) / 0xF9 = ~1 million ticks/sec
gpio::write_register(ARM_TIMER_DIV, 0x0000_00F9);
gpio::write_register(ARM_TIMER_CLI, 0);
gpio::write_register(ARM_TIMER_CTL, 0x003E_00A2);
gpio::write_register(IRQ_BASIC_ENABLE, 0x1);
initialize_leds();
unsafe { enable_irq(); }
watchdog::start(0x000F_FFFF);
loop {
if watchdog::remaining_time() < 0x67697 {
gpio::write_register(GPSET0, GPIO22);
break;
}
}
loop {}
}
#[no_mangle]
pub extern fn rust_irq_handler() {
let state_counter = unsafe { volatile_load::<u32>(&STATE_COUNTER as *const u32 as *mut u32) };
if state_counter & 0x1 == 0 {
gpio::write_register(GPSET0, GPIO20);
gpio::write_register(GPCLR0, GPIO21);
} else {
gpio::write_register(GPCLR0, GPIO20);
gpio::write_register(GPSET0, GPIO21);
}
unsafe {
volatile_store(&STATE_COUNTER as *const u32 as *mut u32, state_counter.wrapping_add(1));
}
gpio::write_register(ARM_TIMER_CLI, 0);
}
fn initialize_leds() {
let mut gpfsel2_val = gpio::read_register(GPFSEL2);
gpfsel2_val &=!0x1FF; // mask out other GPIO pins
gpfsel2_val |= 0x49; // 0b01001001
gpio::write_register(GPFSEL2, gpfsel2_val);
}
#[no_mangle]
pub unsafe fn __aeabi_unwind_cpp_pr0() {
loop {}
}
#[lang = "eh_personality"]
extern fn eh_personality() {}
#[lang = "panic_fmt"]
extern fn panic_fmt() ->! {
loop {}
} | } | random_line_split |
lib.rs | #![feature(core_intrinsics, lang_items)]
#![no_std]
mod gpio;
mod watchdog;
use core::intrinsics::{volatile_store, volatile_load};
const GPFSEL2: u32 = 0x3F20_0008;
const GPSET0: u32 = 0x3F20_001C;
const GPCLR0: u32 = 0x3F20_0028;
const GPIO20: u32 = 1 << 20;
const GPIO21: u32 = 1 << 21;
const GPIO22: u32 = 1 << 22;
const IRQ_BASIC_ENABLE: u32 = 0x3F00_B218;
const ARM_TIMER_LOD: u32 = 0x3F00_B400;
const ARM_TIMER_CTL: u32 = 0x3F00_B408;
const ARM_TIMER_CLI: u32 = 0x3F00_B40C;
const ARM_TIMER_RLD: u32 = 0x3F00_B418;
const ARM_TIMER_DIV: u32 = 0x3F00_B41C;
const SHORT_TIMEOUT: u32 = 500_000;
static mut STATE_COUNTER: u32 = 0;
extern {
fn enable_irq();
}
#[no_mangle]
pub extern fn rust_main() {
gpio::write_register(ARM_TIMER_LOD, SHORT_TIMEOUT - 1);
gpio::write_register(ARM_TIMER_RLD, SHORT_TIMEOUT - 1);
// // Set the timer pre-divider to 0xF9. System clock freq (~250MHz) / 0xF9 = ~1 million ticks/sec
gpio::write_register(ARM_TIMER_DIV, 0x0000_00F9);
gpio::write_register(ARM_TIMER_CLI, 0);
gpio::write_register(ARM_TIMER_CTL, 0x003E_00A2);
gpio::write_register(IRQ_BASIC_ENABLE, 0x1);
initialize_leds();
unsafe { enable_irq(); }
watchdog::start(0x000F_FFFF);
loop {
if watchdog::remaining_time() < 0x67697 |
}
loop {}
}
#[no_mangle]
pub extern fn rust_irq_handler() {
let state_counter = unsafe { volatile_load::<u32>(&STATE_COUNTER as *const u32 as *mut u32) };
if state_counter & 0x1 == 0 {
gpio::write_register(GPSET0, GPIO20);
gpio::write_register(GPCLR0, GPIO21);
} else {
gpio::write_register(GPCLR0, GPIO20);
gpio::write_register(GPSET0, GPIO21);
}
unsafe {
volatile_store(&STATE_COUNTER as *const u32 as *mut u32, state_counter.wrapping_add(1));
}
gpio::write_register(ARM_TIMER_CLI, 0);
}
fn initialize_leds() {
let mut gpfsel2_val = gpio::read_register(GPFSEL2);
gpfsel2_val &=!0x1FF; // mask out other GPIO pins
gpfsel2_val |= 0x49; // 0b01001001
gpio::write_register(GPFSEL2, gpfsel2_val);
}
#[no_mangle]
pub unsafe fn __aeabi_unwind_cpp_pr0() {
loop {}
}
#[lang = "eh_personality"]
extern fn eh_personality() {}
#[lang = "panic_fmt"]
extern fn panic_fmt() ->! {
loop {}
}
| {
gpio::write_register(GPSET0, GPIO22);
break;
} | conditional_block |
lib.rs | #![feature(core_intrinsics, lang_items)]
#![no_std]
mod gpio;
mod watchdog;
use core::intrinsics::{volatile_store, volatile_load};
const GPFSEL2: u32 = 0x3F20_0008;
const GPSET0: u32 = 0x3F20_001C;
const GPCLR0: u32 = 0x3F20_0028;
const GPIO20: u32 = 1 << 20;
const GPIO21: u32 = 1 << 21;
const GPIO22: u32 = 1 << 22;
const IRQ_BASIC_ENABLE: u32 = 0x3F00_B218;
const ARM_TIMER_LOD: u32 = 0x3F00_B400;
const ARM_TIMER_CTL: u32 = 0x3F00_B408;
const ARM_TIMER_CLI: u32 = 0x3F00_B40C;
const ARM_TIMER_RLD: u32 = 0x3F00_B418;
const ARM_TIMER_DIV: u32 = 0x3F00_B41C;
const SHORT_TIMEOUT: u32 = 500_000;
static mut STATE_COUNTER: u32 = 0;
extern {
fn enable_irq();
}
#[no_mangle]
pub extern fn rust_main() | }
}
loop {}
}
#[no_mangle]
pub extern fn rust_irq_handler() {
let state_counter = unsafe { volatile_load::<u32>(&STATE_COUNTER as *const u32 as *mut u32) };
if state_counter & 0x1 == 0 {
gpio::write_register(GPSET0, GPIO20);
gpio::write_register(GPCLR0, GPIO21);
} else {
gpio::write_register(GPCLR0, GPIO20);
gpio::write_register(GPSET0, GPIO21);
}
unsafe {
volatile_store(&STATE_COUNTER as *const u32 as *mut u32, state_counter.wrapping_add(1));
}
gpio::write_register(ARM_TIMER_CLI, 0);
}
fn initialize_leds() {
let mut gpfsel2_val = gpio::read_register(GPFSEL2);
gpfsel2_val &=!0x1FF; // mask out other GPIO pins
gpfsel2_val |= 0x49; // 0b01001001
gpio::write_register(GPFSEL2, gpfsel2_val);
}
#[no_mangle]
pub unsafe fn __aeabi_unwind_cpp_pr0() {
loop {}
}
#[lang = "eh_personality"]
extern fn eh_personality() {}
#[lang = "panic_fmt"]
extern fn panic_fmt() ->! {
loop {}
}
| {
gpio::write_register(ARM_TIMER_LOD, SHORT_TIMEOUT - 1);
gpio::write_register(ARM_TIMER_RLD, SHORT_TIMEOUT - 1);
// // Set the timer pre-divider to 0xF9. System clock freq (~250MHz) / 0xF9 = ~1 million ticks/sec
gpio::write_register(ARM_TIMER_DIV, 0x0000_00F9);
gpio::write_register(ARM_TIMER_CLI, 0);
gpio::write_register(ARM_TIMER_CTL, 0x003E_00A2);
gpio::write_register(IRQ_BASIC_ENABLE, 0x1);
initialize_leds();
unsafe { enable_irq(); }
watchdog::start(0x000F_FFFF);
loop {
if watchdog::remaining_time() < 0x67697 {
gpio::write_register(GPSET0, GPIO22);
break; | identifier_body |
env.rs | // Copyright (C) 2018 Pietro Albini
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::cell::RefCell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf};
use tempdir::TempDir;
use common::prelude::*;
pub struct TestingEnv {
tempdirs: RefCell<Vec<PathBuf>>,
scripts_dir: PathBuf,
}
impl TestingEnv {
fn new() -> Result<Self> {
let scripts_dir = TempDir::new("fisher-integration")?.into_path();
Ok(TestingEnv {
tempdirs: RefCell::new(vec![scripts_dir.clone()]),
scripts_dir,
})
}
pub fn scripts_path(&self) -> &Path {
&self.scripts_dir
}
pub fn tempdir(&self) -> Result<PathBuf> {
let dir = TempDir::new("fisher-integration")?.into_path();
self.tempdirs.borrow_mut().push(dir.clone());
Ok(dir)
}
pub fn create_script(&self, name: &str, content: &[&str]) -> Result<()> {
let path = self.scripts_dir.join(name);
let mut file = OpenOptions::new()
.write(true)
.mode(0o755)
.create(true)
.open(&path)?;
writeln!(file, "{}", content.join("\n"))?;
Ok(())
}
pub fn config(&self) -> Config {
Config::new(self)
}
fn cleanup(self) -> Result<()> {
for dir in self.tempdirs.borrow().iter() {
fs::remove_dir_all(dir)?;
}
Ok(()) | let mut env = TestingEnv::new().unwrap();
let result = f(&mut env);
env.cleanup().unwrap();
result.unwrap();
} | }
}
pub fn testing_env<F: Fn(&mut TestingEnv) -> Result<()>>(f: F) { | random_line_split |
env.rs | // Copyright (C) 2018 Pietro Albini
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::cell::RefCell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf};
use tempdir::TempDir;
use common::prelude::*;
pub struct TestingEnv {
tempdirs: RefCell<Vec<PathBuf>>,
scripts_dir: PathBuf,
}
impl TestingEnv {
fn new() -> Result<Self> {
let scripts_dir = TempDir::new("fisher-integration")?.into_path();
Ok(TestingEnv {
tempdirs: RefCell::new(vec![scripts_dir.clone()]),
scripts_dir,
})
}
pub fn scripts_path(&self) -> &Path {
&self.scripts_dir
}
pub fn tempdir(&self) -> Result<PathBuf> {
let dir = TempDir::new("fisher-integration")?.into_path();
self.tempdirs.borrow_mut().push(dir.clone());
Ok(dir)
}
pub fn create_script(&self, name: &str, content: &[&str]) -> Result<()> {
let path = self.scripts_dir.join(name);
let mut file = OpenOptions::new()
.write(true)
.mode(0o755)
.create(true)
.open(&path)?;
writeln!(file, "{}", content.join("\n"))?;
Ok(())
}
pub fn config(&self) -> Config {
Config::new(self)
}
fn cleanup(self) -> Result<()> {
for dir in self.tempdirs.borrow().iter() {
fs::remove_dir_all(dir)?;
}
Ok(())
}
}
pub fn testing_env<F: Fn(&mut TestingEnv) -> Result<()>>(f: F) | {
let mut env = TestingEnv::new().unwrap();
let result = f(&mut env);
env.cleanup().unwrap();
result.unwrap();
} | identifier_body |
|
env.rs | // Copyright (C) 2018 Pietro Albini
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::cell::RefCell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf};
use tempdir::TempDir;
use common::prelude::*;
pub struct TestingEnv {
tempdirs: RefCell<Vec<PathBuf>>,
scripts_dir: PathBuf,
}
impl TestingEnv {
fn new() -> Result<Self> {
let scripts_dir = TempDir::new("fisher-integration")?.into_path();
Ok(TestingEnv {
tempdirs: RefCell::new(vec![scripts_dir.clone()]),
scripts_dir,
})
}
pub fn scripts_path(&self) -> &Path {
&self.scripts_dir
}
pub fn | (&self) -> Result<PathBuf> {
let dir = TempDir::new("fisher-integration")?.into_path();
self.tempdirs.borrow_mut().push(dir.clone());
Ok(dir)
}
pub fn create_script(&self, name: &str, content: &[&str]) -> Result<()> {
let path = self.scripts_dir.join(name);
let mut file = OpenOptions::new()
.write(true)
.mode(0o755)
.create(true)
.open(&path)?;
writeln!(file, "{}", content.join("\n"))?;
Ok(())
}
pub fn config(&self) -> Config {
Config::new(self)
}
fn cleanup(self) -> Result<()> {
for dir in self.tempdirs.borrow().iter() {
fs::remove_dir_all(dir)?;
}
Ok(())
}
}
pub fn testing_env<F: Fn(&mut TestingEnv) -> Result<()>>(f: F) {
let mut env = TestingEnv::new().unwrap();
let result = f(&mut env);
env.cleanup().unwrap();
result.unwrap();
}
| tempdir | identifier_name |
main.rs | #[macro_use]
extern crate serde_derive;
extern crate clap;
extern crate hyper;
extern crate reqwest;
extern crate serde_json;
mod json_response;
use crate::json_response::{BringResponse, ErrorConsignmentSet, Eventset, Packageset};
use clap::{App, AppSettings, Arg};
use reqwest::Error;
#[tokio::main]
async fn main() -> Result<(), reqwest::Error> {
let input = get_content(parse_input()).await;
deserialize(input.unwrap()).await;
Ok(())
}
async fn get_content(url: String) -> Result<String, Error> {
let body = reqwest::get(&url).await?.text().await?;
Ok(body)
}
fn parse_input() -> String {
let matches = App::new("Rosten")
.version("0.1.1")
.author("Stian Eklund. <[email protected]>")
.about("Get shipment status of your Bring & Posten packages")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(
Arg::with_name("track")
.short("t")
.long("track")
.help("Get package status")
.takes_value(true),
)
.get_matches();
let input = matches.value_of("track").unwrap();
String::from(format!(
"https://tracking.bring.com/api/v2/tracking.json?q={}",
input
))
}
async fn deserialize(buf: String) {
let deserialized: Result<BringResponse, serde_json::Error> = serde_json::from_str(buf.trim());
match deserialized {
Ok(deserialized) => {
let sets = deserialized.consignment_set;
for i in 0..sets.len() {
let consignment_set = &sets[i];
for x in 0..consignment_set.package_set.len() {
let package_set = &consignment_set.package_set[x];
match consignment_set.package_set[x] {
Packageset {
product_name: Some(ref product_name),
package_number: Some(ref package_number),
..
} => println!(
"Product Name: {}\nPackage number: {}",
product_name, package_number
),
_ => println!("Not covered"),
}
for n in 0..package_set.event_set.len() {
match package_set.event_set[n] {
Eventset {
description: Some(ref description),
status: Some(ref status),
..
} => println!("Description: {}\nStatus: {}", description, status),
_ => println!("Not covered"),
}
}
}
}
}
Err(_) => deserialize_err(&buf).await,
}
}
async fn deserialize_err(buf: &String) { | match deserialized {
Ok(deserialized) => {
eprintln!(
"Error: {}, Code:{}",
deserialized.error.message, deserialized.error.code
);
}
Err(e) => eprintln!(
"Error while deserializing, please check if your tracking number is valid. {}",
e
),
}
} | let deserialized: Result<ErrorConsignmentSet, serde_json::Error> = serde_json::from_str(&buf); | random_line_split |
main.rs | #[macro_use]
extern crate serde_derive;
extern crate clap;
extern crate hyper;
extern crate reqwest;
extern crate serde_json;
mod json_response;
use crate::json_response::{BringResponse, ErrorConsignmentSet, Eventset, Packageset};
use clap::{App, AppSettings, Arg};
use reqwest::Error;
#[tokio::main]
async fn main() -> Result<(), reqwest::Error> {
let input = get_content(parse_input()).await;
deserialize(input.unwrap()).await;
Ok(())
}
async fn get_content(url: String) -> Result<String, Error> {
let body = reqwest::get(&url).await?.text().await?;
Ok(body)
}
fn parse_input() -> String {
let matches = App::new("Rosten")
.version("0.1.1")
.author("Stian Eklund. <[email protected]>")
.about("Get shipment status of your Bring & Posten packages")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(
Arg::with_name("track")
.short("t")
.long("track")
.help("Get package status")
.takes_value(true),
)
.get_matches();
let input = matches.value_of("track").unwrap();
String::from(format!(
"https://tracking.bring.com/api/v2/tracking.json?q={}",
input
))
}
async fn deserialize(buf: String) {
let deserialized: Result<BringResponse, serde_json::Error> = serde_json::from_str(buf.trim());
match deserialized {
Ok(deserialized) => {
let sets = deserialized.consignment_set;
for i in 0..sets.len() {
let consignment_set = &sets[i];
for x in 0..consignment_set.package_set.len() {
let package_set = &consignment_set.package_set[x];
match consignment_set.package_set[x] {
Packageset {
product_name: Some(ref product_name),
package_number: Some(ref package_number),
..
} => println!(
"Product Name: {}\nPackage number: {}",
product_name, package_number
),
_ => println!("Not covered"),
}
for n in 0..package_set.event_set.len() {
match package_set.event_set[n] {
Eventset {
description: Some(ref description),
status: Some(ref status),
..
} => println!("Description: {}\nStatus: {}", description, status),
_ => println!("Not covered"),
}
}
}
}
}
Err(_) => deserialize_err(&buf).await,
}
}
async fn | (buf: &String) {
let deserialized: Result<ErrorConsignmentSet, serde_json::Error> = serde_json::from_str(&buf);
match deserialized {
Ok(deserialized) => {
eprintln!(
"Error: {}, Code:{}",
deserialized.error.message, deserialized.error.code
);
}
Err(e) => eprintln!(
"Error while deserializing, please check if your tracking number is valid. {}",
e
),
}
}
| deserialize_err | identifier_name |
main.rs | #[macro_use]
extern crate serde_derive;
extern crate clap;
extern crate hyper;
extern crate reqwest;
extern crate serde_json;
mod json_response;
use crate::json_response::{BringResponse, ErrorConsignmentSet, Eventset, Packageset};
use clap::{App, AppSettings, Arg};
use reqwest::Error;
#[tokio::main]
async fn main() -> Result<(), reqwest::Error> {
let input = get_content(parse_input()).await;
deserialize(input.unwrap()).await;
Ok(())
}
async fn get_content(url: String) -> Result<String, Error> {
let body = reqwest::get(&url).await?.text().await?;
Ok(body)
}
fn parse_input() -> String | }
async fn deserialize(buf: String) {
let deserialized: Result<BringResponse, serde_json::Error> = serde_json::from_str(buf.trim());
match deserialized {
Ok(deserialized) => {
let sets = deserialized.consignment_set;
for i in 0..sets.len() {
let consignment_set = &sets[i];
for x in 0..consignment_set.package_set.len() {
let package_set = &consignment_set.package_set[x];
match consignment_set.package_set[x] {
Packageset {
product_name: Some(ref product_name),
package_number: Some(ref package_number),
..
} => println!(
"Product Name: {}\nPackage number: {}",
product_name, package_number
),
_ => println!("Not covered"),
}
for n in 0..package_set.event_set.len() {
match package_set.event_set[n] {
Eventset {
description: Some(ref description),
status: Some(ref status),
..
} => println!("Description: {}\nStatus: {}", description, status),
_ => println!("Not covered"),
}
}
}
}
}
Err(_) => deserialize_err(&buf).await,
}
}
async fn deserialize_err(buf: &String) {
let deserialized: Result<ErrorConsignmentSet, serde_json::Error> = serde_json::from_str(&buf);
match deserialized {
Ok(deserialized) => {
eprintln!(
"Error: {}, Code:{}",
deserialized.error.message, deserialized.error.code
);
}
Err(e) => eprintln!(
"Error while deserializing, please check if your tracking number is valid. {}",
e
),
}
}
| {
let matches = App::new("Rosten")
.version("0.1.1")
.author("Stian Eklund. <[email protected]>")
.about("Get shipment status of your Bring & Posten packages")
.setting(AppSettings::ArgRequiredElseHelp)
.arg(
Arg::with_name("track")
.short("t")
.long("track")
.help("Get package status")
.takes_value(true),
)
.get_matches();
let input = matches.value_of("track").unwrap();
String::from(format!(
"https://tracking.bring.com/api/v2/tracking.json?q={}",
input
)) | identifier_body |
mercurial.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Write;
use anyhow::Result;
use manifest::{Entry, Manifest};
use mercurial_types::blobs::HgBlobManifest;
use unicode_truncate::{Alignment, UnicodeTruncateStr};
use unicode_width::UnicodeWidthStr;
/// Displays a Mercurial manifest, one entry per line.
pub fn display_hg_manifest(mut w: impl Write, manifest: &HgBlobManifest) -> Result<()> | ty,
)?;
}
Ok(())
}
| {
let entries = manifest
.list()
.map(|(name, entry)| (String::from_utf8_lossy(name.as_ref()).into_owned(), entry))
.collect::<Vec<_>>();
let max_width = entries
.iter()
.map(|(name, _)| name.width())
.max()
.unwrap_or(0);
for (name, entry) in entries {
let (ty, id) = match entry {
Entry::Leaf((ty, id)) => (ty.to_string(), id.to_string()),
Entry::Tree(id) => ("tree".to_string(), id.to_string()),
};
writeln!(
w,
"{} {} {}",
name.unicode_pad(max_width, Alignment::Left, false),
id, | identifier_body |
mercurial.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Write;
use anyhow::Result;
use manifest::{Entry, Manifest};
use mercurial_types::blobs::HgBlobManifest;
use unicode_truncate::{Alignment, UnicodeTruncateStr};
use unicode_width::UnicodeWidthStr;
/// Displays a Mercurial manifest, one entry per line.
pub fn | (mut w: impl Write, manifest: &HgBlobManifest) -> Result<()> {
let entries = manifest
.list()
.map(|(name, entry)| (String::from_utf8_lossy(name.as_ref()).into_owned(), entry))
.collect::<Vec<_>>();
let max_width = entries
.iter()
.map(|(name, _)| name.width())
.max()
.unwrap_or(0);
for (name, entry) in entries {
let (ty, id) = match entry {
Entry::Leaf((ty, id)) => (ty.to_string(), id.to_string()),
Entry::Tree(id) => ("tree".to_string(), id.to_string()),
};
writeln!(
w,
"{} {} {}",
name.unicode_pad(max_width, Alignment::Left, false),
id,
ty,
)?;
}
Ok(())
}
| display_hg_manifest | identifier_name |
mercurial.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::io::Write;
use anyhow::Result;
use manifest::{Entry, Manifest};
use mercurial_types::blobs::HgBlobManifest;
use unicode_truncate::{Alignment, UnicodeTruncateStr}; | .list()
.map(|(name, entry)| (String::from_utf8_lossy(name.as_ref()).into_owned(), entry))
.collect::<Vec<_>>();
let max_width = entries
.iter()
.map(|(name, _)| name.width())
.max()
.unwrap_or(0);
for (name, entry) in entries {
let (ty, id) = match entry {
Entry::Leaf((ty, id)) => (ty.to_string(), id.to_string()),
Entry::Tree(id) => ("tree".to_string(), id.to_string()),
};
writeln!(
w,
"{} {} {}",
name.unicode_pad(max_width, Alignment::Left, false),
id,
ty,
)?;
}
Ok(())
} | use unicode_width::UnicodeWidthStr;
/// Displays a Mercurial manifest, one entry per line.
pub fn display_hg_manifest(mut w: impl Write, manifest: &HgBlobManifest) -> Result<()> {
let entries = manifest | random_line_split |
fixed-point-bind-unique.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
fn fix_help<A:'static,B:Send>(f: extern fn(@fn(A) -> B, A) -> B, x: A) -> B {
return f(|a| fix_help(f, a), x);
}
fn fix<A:'static,B:Send>(f: extern fn(@fn(A) -> B, A) -> B) -> @fn(A) -> B {
return |a| fix_help(f, a);
}
fn fact_(f: @fn(v: int) -> int, n: int) -> int {
// fun fact 0 = 1
return if n == 0 | else { n * f(n - 1) };
}
pub fn main() {
let fact = fix(fact_);
assert_eq!(fact(5), 120);
assert_eq!(fact(2), 2);
}
| { 1 } | conditional_block |
fixed-point-bind-unique.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
fn fix_help<A:'static,B:Send>(f: extern fn(@fn(A) -> B, A) -> B, x: A) -> B {
return f(|a| fix_help(f, a), x);
}
fn fix<A:'static,B:Send>(f: extern fn(@fn(A) -> B, A) -> B) -> @fn(A) -> B |
fn fact_(f: @fn(v: int) -> int, n: int) -> int {
// fun fact 0 = 1
return if n == 0 { 1 } else { n * f(n - 1) };
}
pub fn main() {
let fact = fix(fact_);
assert_eq!(fact(5), 120);
assert_eq!(fact(2), 2);
}
| {
return |a| fix_help(f, a);
} | identifier_body |
fixed-point-bind-unique.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
fn fix_help<A:'static,B:Send>(f: extern fn(@fn(A) -> B, A) -> B, x: A) -> B {
return f(|a| fix_help(f, a), x);
}
fn | <A:'static,B:Send>(f: extern fn(@fn(A) -> B, A) -> B) -> @fn(A) -> B {
return |a| fix_help(f, a);
}
fn fact_(f: @fn(v: int) -> int, n: int) -> int {
// fun fact 0 = 1
return if n == 0 { 1 } else { n * f(n - 1) };
}
pub fn main() {
let fact = fix(fact_);
assert_eq!(fact(5), 120);
assert_eq!(fact(2), 2);
}
| fix | identifier_name |
fixed-point-bind-unique.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
fn fix_help<A:'static,B:Send>(f: extern fn(@fn(A) -> B, A) -> B, x: A) -> B {
return f(|a| fix_help(f, a), x);
}
fn fix<A:'static,B:Send>(f: extern fn(@fn(A) -> B, A) -> B) -> @fn(A) -> B {
return |a| fix_help(f, a);
}
fn fact_(f: @fn(v: int) -> int, n: int) -> int {
// fun fact 0 = 1
return if n == 0 { 1 } else { n * f(n - 1) };
}
pub fn main() { | let fact = fix(fact_);
assert_eq!(fact(5), 120);
assert_eq!(fact(2), 2);
} | random_line_split |
|
issue-15167.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// macro f should not be able to inject a reference to 'n'.
//
// Ignored because `for` loops are not hygienic yet; they will require special
// handling since they introduce a new pattern binding position.
// ignore-test
macro_rules! f { () => (n) }
fn | () -> (){
for n in 0..1 {
println!("{}", f!()); //~ ERROR unresolved name `n`
}
}
| main | identifier_name |
issue-15167.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// macro f should not be able to inject a reference to 'n'.
//
// Ignored because `for` loops are not hygienic yet; they will require special
// handling since they introduce a new pattern binding position.
// ignore-test
macro_rules! f { () => (n) }
fn main() -> () | {
for n in 0..1 {
println!("{}", f!()); //~ ERROR unresolved name `n`
}
} | identifier_body |
|
issue-15167.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | //
// Ignored because `for` loops are not hygienic yet; they will require special
// handling since they introduce a new pattern binding position.
// ignore-test
macro_rules! f { () => (n) }
fn main() -> (){
for n in 0..1 {
println!("{}", f!()); //~ ERROR unresolved name `n`
}
} | // except according to those terms.
// macro f should not be able to inject a reference to 'n'. | random_line_split |
reminder.rs | use typemap_rev::TypeMapKey;
use crate::database::reminders::{Reminder, RemindersRepository};
use crate::util::now;
use serenity::model::id::{ChannelId, GuildId};
use serenity::model::user::User;
use std::time::Duration;
use tracing::error;
impl TypeMapKey for ReminderService {
type Value = ReminderService;
}
pub struct ReminderService {
pub repository: RemindersRepository,
}
pub enum CreateReminderFailure {
Unknown,
}
impl ReminderService {
pub async fn fetch_expired_reminders(&self) -> Vec<Reminder> {
self.repository
.fetch_expired_reminders()
.await
.map_err(|err| {
error!("failed to fetch expired reminders {:?}", err);
err
})
.ok()
.unwrap_or_else(Vec::new)
}
pub async fn create_reminder(
&self,
guild_id: GuildId,
channel_id: ChannelId,
user: &User,
duration: Option<Duration>,
message: String,
) -> Result<(), CreateReminderFailure> {
let now = now();
let remind_time = duration
.map(|duration| now + duration.as_secs())
.unwrap_or_else(|| now + 1000 * 60 * 60 * 24);
let reminder = Reminder {
id: 0,
user_id: user.id.0 as i64,
channel_id: channel_id.0 as i64,
guild_id: guild_id.0 as i64,
create_time: now as i64,
remind_time: remind_time as i64,
reminded: false,
message,
};
return match self.repository.insert_reminder(reminder).await {
Ok(_) => Ok(()),
Err(err) => {
error!("failed to insert reminder {:?}", err);
Err(CreateReminderFailure::Unknown)
}
};
}
pub async fn invalidate_reminder(&self, id: i32) |
}
| {
let _ = self
.repository
.invalidate_reminder(id)
.await
.map_err(|err| {
error!("failed to invalidate reminder {:?}", err);
err
});
} | identifier_body |
reminder.rs | use typemap_rev::TypeMapKey;
use crate::database::reminders::{Reminder, RemindersRepository};
use crate::util::now;
use serenity::model::id::{ChannelId, GuildId};
use serenity::model::user::User;
use std::time::Duration;
use tracing::error;
impl TypeMapKey for ReminderService {
type Value = ReminderService;
}
pub struct ReminderService {
pub repository: RemindersRepository,
}
pub enum CreateReminderFailure {
Unknown,
}
impl ReminderService {
pub async fn fetch_expired_reminders(&self) -> Vec<Reminder> {
self.repository
.fetch_expired_reminders()
.await
.map_err(|err| {
error!("failed to fetch expired reminders {:?}", err);
err
})
.ok()
.unwrap_or_else(Vec::new)
} | pub async fn create_reminder(
&self,
guild_id: GuildId,
channel_id: ChannelId,
user: &User,
duration: Option<Duration>,
message: String,
) -> Result<(), CreateReminderFailure> {
let now = now();
let remind_time = duration
.map(|duration| now + duration.as_secs())
.unwrap_or_else(|| now + 1000 * 60 * 60 * 24);
let reminder = Reminder {
id: 0,
user_id: user.id.0 as i64,
channel_id: channel_id.0 as i64,
guild_id: guild_id.0 as i64,
create_time: now as i64,
remind_time: remind_time as i64,
reminded: false,
message,
};
return match self.repository.insert_reminder(reminder).await {
Ok(_) => Ok(()),
Err(err) => {
error!("failed to insert reminder {:?}", err);
Err(CreateReminderFailure::Unknown)
}
};
}
pub async fn invalidate_reminder(&self, id: i32) {
let _ = self
.repository
.invalidate_reminder(id)
.await
.map_err(|err| {
error!("failed to invalidate reminder {:?}", err);
err
});
}
} | random_line_split |
|
reminder.rs | use typemap_rev::TypeMapKey;
use crate::database::reminders::{Reminder, RemindersRepository};
use crate::util::now;
use serenity::model::id::{ChannelId, GuildId};
use serenity::model::user::User;
use std::time::Duration;
use tracing::error;
impl TypeMapKey for ReminderService {
type Value = ReminderService;
}
pub struct | {
pub repository: RemindersRepository,
}
pub enum CreateReminderFailure {
Unknown,
}
impl ReminderService {
pub async fn fetch_expired_reminders(&self) -> Vec<Reminder> {
self.repository
.fetch_expired_reminders()
.await
.map_err(|err| {
error!("failed to fetch expired reminders {:?}", err);
err
})
.ok()
.unwrap_or_else(Vec::new)
}
pub async fn create_reminder(
&self,
guild_id: GuildId,
channel_id: ChannelId,
user: &User,
duration: Option<Duration>,
message: String,
) -> Result<(), CreateReminderFailure> {
let now = now();
let remind_time = duration
.map(|duration| now + duration.as_secs())
.unwrap_or_else(|| now + 1000 * 60 * 60 * 24);
let reminder = Reminder {
id: 0,
user_id: user.id.0 as i64,
channel_id: channel_id.0 as i64,
guild_id: guild_id.0 as i64,
create_time: now as i64,
remind_time: remind_time as i64,
reminded: false,
message,
};
return match self.repository.insert_reminder(reminder).await {
Ok(_) => Ok(()),
Err(err) => {
error!("failed to insert reminder {:?}", err);
Err(CreateReminderFailure::Unknown)
}
};
}
pub async fn invalidate_reminder(&self, id: i32) {
let _ = self
.repository
.invalidate_reminder(id)
.await
.map_err(|err| {
error!("failed to invalidate reminder {:?}", err);
err
});
}
}
| ReminderService | identifier_name |
util.rs | use std::collections::HashSet;
use std::fmt;
use std::net::IpAddr;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{Async, Future, Poll};
use ifaces;
use url::Url;
use pb::HostPortPb;
use timestamp::DateTime;
use DataType;
use Error;
use Row;
pub fn time_to_us(time: SystemTime) -> i64 {
// TODO: do overflow checking
match time.duration_since(UNIX_EPOCH) {
Ok(duration) => {
(duration.as_secs() * 1_000_000 + u64::from(duration.subsec_nanos()) / 1000) as i64
}
Err(error) => {
let duration = error.duration();
(-((duration.as_secs() * 1_000_000 + u64::from(duration.subsec_nanos()) / 1000) as i64))
}
}
}
pub fn us_to_time(us: i64) -> SystemTime {
let abs = us.abs() as u64;
let s = abs / 1_000_000;
let ns = (abs % 1_000_000) as u32 * 1000;
if us.is_negative() {
UNIX_EPOCH - Duration::new(s, ns)
} else |
}
pub fn fmt_hex<T>(f: &mut fmt::Formatter, bytes: &[T]) -> fmt::Result
where
T: fmt::LowerHex,
{
write!(f, "0x")?;
for b in bytes {
write!(f, "{:02x}", b)?;
}
Ok(())
}
fn fmt_timestamp(timestamp: SystemTime) -> impl fmt::Display {
DateTime::from(timestamp)
}
pub fn fmt_cell(f: &mut fmt::Formatter, row: &Row, idx: usize) -> fmt::Result {
debug_assert!(row.is_set(idx).unwrap());
if row.is_null(idx).unwrap() {
return write!(f, "NULL");
}
match row.schema().columns()[idx].data_type() {
DataType::Bool => write!(f, "{}", row.get::<_, bool>(idx).unwrap()),
DataType::Int8 => write!(f, "{}", row.get::<_, i8>(idx).unwrap()),
DataType::Int16 => write!(f, "{}", row.get::<_, i16>(idx).unwrap()),
DataType::Int32 => write!(f, "{}", row.get::<_, i32>(idx).unwrap()),
DataType::Int64 => write!(f, "{}", row.get::<_, i64>(idx).unwrap()),
DataType::Timestamp => write!(
f,
"{}",
fmt_timestamp(row.get::<_, SystemTime>(idx).unwrap())
),
DataType::Float => write!(f, "{}", row.get::<_, f32>(idx).unwrap()),
DataType::Double => write!(f, "{}", row.get::<_, f64>(idx).unwrap()),
DataType::Binary => fmt_hex(f, row.get::<_, &[u8]>(idx).unwrap()),
DataType::String => write!(f, "{:?}", row.get::<_, &str>(idx).unwrap()),
}
}
lazy_static! {
static ref LOCAL_ADDRS: HashSet<IpAddr> = {
let mut addrs = HashSet::new();
match ifaces::Interface::get_all() {
Ok(ifaces) => {
for iface in ifaces {
if let Some(addr) = iface.addr {
addrs.insert(addr.ip());
}
}
}
Err(error) => warn!("failed to resolve local interface addresses: {}", error),
}
addrs
};
}
/// Returns `true` if socket addr is for a local interface.
#[allow(dead_code)]
pub fn is_local_addr(addr: &IpAddr) -> bool {
LOCAL_ADDRS.contains(addr) || addr.is_loopback()
}
pub(crate) fn urls_from_pb(
hostports: &[HostPortPb],
https_enabled: bool,
) -> Result<Vec<Url>, Error> {
hostports
.iter()
.map(|hostport| {
Url::parse(&format!(
"{}://{}:{}",
if https_enabled { "https" } else { "http" },
hostport.host,
hostport.port
)).map_err(From::from)
}).collect()
}
pub struct ContextFuture<F, C> {
future: F,
context: Option<C>,
}
impl<F, C> ContextFuture<F, C> {
pub fn new(future: F, context: C) -> ContextFuture<F, C> {
ContextFuture {
future,
context: Some(context),
}
}
}
impl<F, C> Future for ContextFuture<F, C>
where
F: Future,
{
type Item = (F::Item, C);
type Error = (F::Error, C);
fn poll(&mut self) -> Poll<(F::Item, C), (F::Error, C)> {
match self.future.poll() {
Ok(Async::Ready(item)) => Ok(Async::Ready((
item,
self.context.take().expect("future already complete"),
))),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(error) => Err((error, self.context.take().expect("future already complete"))),
}
}
}
#[cfg(test)]
mod tests {
use std::net::ToSocketAddrs;
use env_logger;
use proptest::prelude::*;
use super::*;
#[test]
fn test_is_local_addr() {
let _ = env_logger::try_init();
let addr = "127.0.1.1:0"
.to_socket_addrs()
.unwrap()
.next()
.unwrap()
.ip();
assert!(is_local_addr(&addr));
let addr = "127.0.0.1:0"
.to_socket_addrs()
.unwrap()
.next()
.unwrap()
.ip();
assert!(is_local_addr(&addr));
}
proptest! {
#[test]
fn check_timestamp_micros_roundtrip(us in any::<i64>()) {
prop_assert_eq!(us, time_to_us(us_to_time(us)))
}
#[test]
fn check_systemtime_roundtrip(timestamp in any::<SystemTime>()) {
prop_assert_eq!(timestamp, us_to_time(time_to_us(timestamp)));
}
#[test]
fn check_format_timestamp(system_time in any::<SystemTime>()) {
let _ = env_logger::try_init();
format!("{}", fmt_timestamp(system_time));
}
}
}
| {
UNIX_EPOCH + Duration::new(s, ns)
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.