file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
p_2_3_1_01.rs
// P_2_3_1_01 // // Generative Gestaltung – Creative Coding im Web // ISBN: 978-3-87439-902-9, First Edition, Hermann Schmidt, Mainz, 2018 // Benedikt Groß, Hartmut Bohnacker, Julia Laub, Claudius Lazzeroni // with contributions by Joey Lee and Niels Poldervaart // Copyright 2018 // // http://www.generative-gestaltung.de // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * draw tool. draw with a rotating line. * * MOUSE * drag : draw * * KEYS * 1-4 : switch default colors * delete/backspace : clear screen * d : reverse direction and mirror angle * space : new random color * arrow left : rotation speed - * arrow right : rotation speed + * arrow up : line length + * arrow down : line length - * s : save png */ use nannou::prelude::*; fn main() { nannou::app(model).update(update).run(); } struct Model { c: Rgba, line_length: f32, angle: f32, angle_speed: f32, } fn model(app: &App) -> Model { app.new_window() .size(1280, 720) .view(view) .mouse_pressed(mouse_pressed) .key_pressed(key_pressed) .key_released(key_released) .build() .unwrap(); Model { c: rgba(0.7, 0.6, 0.0, 1.0), line_length: 0.0, angle: 0.0, angle_speed: 1.0, } } fn update(app: &App, model: &mut Model, _update: Update) { if app.mouse.buttons.left().is_down() { model.angle += model.angle_speed; } } fn view(app: &App, model: &Model, frame: Frame) { let mut draw = app.draw(); if frame.nth() == 0 || app.keys.down.contains(&Key::Delete) { frame.clear(WHITE); } if app.mouse.buttons.left().is_down() { draw = draw .x_y(app.mouse.x, app.mouse.y) .rotate(model.angle.to_radians()); draw.line() .start(pt2(0.0, 0.0)) .end(pt2(model.line_length, 0.0))
.color(model.c); } // Write to the window frame. draw.to_frame(app, &frame).unwrap(); } fn mouse_pressed(_app: &App, model: &mut Model, _button: MouseButton) { model.line_length = random_range(70.0, 200.0); } fn key_pressed(_app: &App, model: &mut Model, key: Key) { match key { Key::Up => { model.line_length += 5.0; } Key::Down => { model.line_length -= 5.0; } Key::Left => { model.angle_speed -= 0.5; } Key::Right => { model.angle_speed += 0.5; } _otherkey => (), } } fn key_released(app: &App, model: &mut Model, key: Key) { match key { Key::S => { app.main_window() .capture_frame(app.exe_name().unwrap() + ".png"); } // reverse direction and mirror angle Key::D => { model.angle += 180.0; model.angle_speed *= -1.0; } // change color Key::Space => { model.c = rgba( random_f32(), random_f32(), random_f32(), random_range(0.3, 0.4), ); } // default colors from 1 to 4 Key::Key1 => { model.c = rgba(0.7, 0.61, 0.0, 1.0); } Key::Key2 => { model.c = rgba(0.0, 0.5, 0.64, 1.0); } Key::Key3 => { model.c = rgba(0.34, 0.13, 0.5, 1.0); } Key::Key4 => { model.c = rgba(0.77, 0.0, 0.48, 1.0); } _otherkey => (), } }
random_line_split
p_2_3_1_01.rs
// P_2_3_1_01 // // Generative Gestaltung – Creative Coding im Web // ISBN: 978-3-87439-902-9, First Edition, Hermann Schmidt, Mainz, 2018 // Benedikt Groß, Hartmut Bohnacker, Julia Laub, Claudius Lazzeroni // with contributions by Joey Lee and Niels Poldervaart // Copyright 2018 // // http://www.generative-gestaltung.de // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * draw tool. draw with a rotating line. * * MOUSE * drag : draw * * KEYS * 1-4 : switch default colors * delete/backspace : clear screen * d : reverse direction and mirror angle * space : new random color * arrow left : rotation speed - * arrow right : rotation speed + * arrow up : line length + * arrow down : line length - * s : save png */ use nannou::prelude::*; fn main() { nannou::app(model).update(update).run(); } struct Model { c: Rgba, line_length: f32, angle: f32, angle_speed: f32, } fn model(app: &App) -> Model { app.new_window() .size(1280, 720) .view(view) .mouse_pressed(mouse_pressed) .key_pressed(key_pressed) .key_released(key_released) .build() .unwrap(); Model { c: rgba(0.7, 0.6, 0.0, 1.0), line_length: 0.0, angle: 0.0, angle_speed: 1.0, } } fn update(app: &App, model: &mut Model, _update: Update) { if app.mouse.buttons.left().is_down() { model.angle += model.angle_speed; } } fn view(app: &App, model: &Model, frame: Frame) { let mut draw = app.draw(); if frame.nth() == 0 || app.keys.down.contains(&Key::Delete) { frame.clear(WHITE); } if app.mouse.buttons.left().is_down() { draw = draw .x_y(app.mouse.x, app.mouse.y) .rotate(model.angle.to_radians()); draw.line() .start(pt2(0.0, 0.0)) .end(pt2(model.line_length, 0.0)) .color(model.c); } // Write to the window frame. draw.to_frame(app, &frame).unwrap(); } fn mouse_pressed(_app: &App, model: &mut Model, _button: MouseButton) { model.line_length = random_range(70.0, 200.0); } fn key_pressed(_app: &App, model: &mut Model, key: Key) { match key { Key::Up => { model.line_length += 5.0; } Key::Down => { model.line_length -= 5.0; } Key::Left => { model.angle_speed -= 0.5; } Key::Right => { model.angle_speed += 0.5; } _otherkey => (), } } fn key_released(app: &App, model: &mut Model, key: Key) { match key { Key::S => { app.main_window() .capture_frame(app.exe_name().unwrap() + ".png"); } // reverse direction and mirror angle Key::D => { model.angle += 180.0; model.angle_speed *= -1.0; } // change color Key::Space => { model.c = rgba( random_f32(), random_f32(), random_f32(), random_range(0.3, 0.4), ); } // default colors from 1 to 4 Key::Key1 => {
Key::Key2 => { model.c = rgba(0.0, 0.5, 0.64, 1.0); } Key::Key3 => { model.c = rgba(0.34, 0.13, 0.5, 1.0); } Key::Key4 => { model.c = rgba(0.77, 0.0, 0.48, 1.0); } _otherkey => (), } }
model.c = rgba(0.7, 0.61, 0.0, 1.0); }
conditional_block
render.rs
use ammonia::Ammonia; use comrak; use util::CargoResult; /// Context for markdown to HTML rendering. #[allow(missing_debug_implementations)] pub struct MarkdownRenderer<'a> { html_sanitizer: Ammonia<'a>, } impl<'a> MarkdownRenderer<'a> { /// Creates a new renderer instance. pub fn new() -> MarkdownRenderer<'a> { let tags = [ "a", "b", "blockquote", "br", "code", "dd", "del", "dl", "dt", "em", "h1", "h2", "h3", "hr", "i", "img", "input", "kbd", "li", "ol", "p", "pre", "s", "strike", "strong", "sub", "sup", "table", "tbody", "td", "th", "thead", "tr", "ul", "hr", "span", ].iter() .cloned() .collect(); let tag_attributes = [ ("a", ["href", "target"].iter().cloned().collect()), ( "img", ["width", "height", "src", "alt", "align", "width"] .iter() .cloned() .collect(), ), ( "input", ["checked", "disabled", "type"].iter().cloned().collect(), ), ].iter() .cloned() .collect(); let html_sanitizer = Ammonia { link_rel: Some("nofollow noopener noreferrer"), keep_cleaned_elements: true, tags: tags, tag_attributes: tag_attributes, ..Ammonia::default() }; MarkdownRenderer { html_sanitizer: html_sanitizer } } /// Renders the given markdown to HTML using the current settings. pub fn to_html(&self, text: &str) -> CargoResult<String> { let options = comrak::ComrakOptions { ext_autolink: true, ext_strikethrough: true, ext_table: true, ext_tagfilter: true, ext_tasklist: true, ..comrak::ComrakOptions::default() }; let rendered = comrak::markdown_to_html(text, &options); Ok(self.html_sanitizer.clean(&rendered)) } } impl<'a> Default for MarkdownRenderer<'a> { fn default() -> Self { Self::new() } } /// Renders a markdown text to sanitized HTML. /// /// The returned text should not contain any harmful HTML tag or attribute (such as iframe, /// onclick, onmouseover, etc.). /// /// # Examples /// /// ``` /// use render::markdown_to_html; /// /// let text = "[Rust](https://rust-lang.org/) is an awesome *systems programming* language!"; /// let rendered = markdown_to_html(text)?; /// ``` pub fn markdown_to_html(text: &str) -> CargoResult<String> { let renderer = MarkdownRenderer::new(); renderer.to_html(text) } #[cfg(test)] mod tests { use super::*; #[test] fn empty_text() { let text = ""; let result = markdown_to_html(text).unwrap(); assert_eq!(result, ""); } #[test] fn text_with_script_tag() { let text = "foo_readme\n\n<script>alert('Hello World')</script>"; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme</p>\n&lt;script&gt;alert(\'Hello World\')&lt;/script&gt;\n" ); } #[test] fn text_with_iframe_tag()
#[test] fn text_with_unknown_tag() { let text = "foo_readme\n\n<unknown>alert('Hello World')</unknown>"; let result = markdown_to_html(text).unwrap(); assert_eq!(result, "<p>foo_readme</p>\n<p>alert(\'Hello World\')</p>\n"); } #[test] fn text_with_inline_javascript() { let text = r#"foo_readme\n\n<a href="https://crates.io/crates/cargo-registry" onclick="window.alert('Got you')">Crate page</a>"#; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme\\n\\n<a href=\"https://crates.io/crates/cargo-registry\" rel=\"nofollow noopener noreferrer\">Crate page</a></p>\n" ); } }
{ let text = "foo_readme\n\n<iframe>alert('Hello World')</iframe>"; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme</p>\n&lt;iframe&gt;alert(\'Hello World\')&lt;/iframe&gt;\n" ); }
identifier_body
render.rs
use ammonia::Ammonia; use comrak; use util::CargoResult; /// Context for markdown to HTML rendering. #[allow(missing_debug_implementations)] pub struct MarkdownRenderer<'a> { html_sanitizer: Ammonia<'a>, } impl<'a> MarkdownRenderer<'a> { /// Creates a new renderer instance. pub fn new() -> MarkdownRenderer<'a> { let tags = [ "a", "b", "blockquote", "br", "code", "dd", "del", "dl", "dt", "em", "h1", "h2", "h3", "hr", "i", "img", "input", "kbd", "li", "ol", "p", "pre", "s", "strike", "strong", "sub", "sup", "table", "tbody", "td", "th", "thead", "tr", "ul", "hr", "span", ].iter() .cloned() .collect(); let tag_attributes = [ ("a", ["href", "target"].iter().cloned().collect()), ( "img", ["width", "height", "src", "alt", "align", "width"] .iter() .cloned() .collect(), ), ( "input", ["checked", "disabled", "type"].iter().cloned().collect(), ), ].iter() .cloned() .collect(); let html_sanitizer = Ammonia { link_rel: Some("nofollow noopener noreferrer"), keep_cleaned_elements: true, tags: tags, tag_attributes: tag_attributes, ..Ammonia::default() }; MarkdownRenderer { html_sanitizer: html_sanitizer } } /// Renders the given markdown to HTML using the current settings. pub fn to_html(&self, text: &str) -> CargoResult<String> { let options = comrak::ComrakOptions { ext_autolink: true, ext_strikethrough: true, ext_table: true, ext_tagfilter: true, ext_tasklist: true, ..comrak::ComrakOptions::default() }; let rendered = comrak::markdown_to_html(text, &options); Ok(self.html_sanitizer.clean(&rendered)) } } impl<'a> Default for MarkdownRenderer<'a> { fn default() -> Self { Self::new() } } /// Renders a markdown text to sanitized HTML. /// /// The returned text should not contain any harmful HTML tag or attribute (such as iframe, /// onclick, onmouseover, etc.). /// /// # Examples /// /// ``` /// use render::markdown_to_html; /// /// let text = "[Rust](https://rust-lang.org/) is an awesome *systems programming* language!"; /// let rendered = markdown_to_html(text)?; /// ``` pub fn markdown_to_html(text: &str) -> CargoResult<String> { let renderer = MarkdownRenderer::new(); renderer.to_html(text) } #[cfg(test)] mod tests { use super::*; #[test] fn empty_text() { let text = ""; let result = markdown_to_html(text).unwrap(); assert_eq!(result, ""); } #[test] fn text_with_script_tag() { let text = "foo_readme\n\n<script>alert('Hello World')</script>"; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme</p>\n&lt;script&gt;alert(\'Hello World\')&lt;/script&gt;\n" ); } #[test] fn text_with_iframe_tag() { let text = "foo_readme\n\n<iframe>alert('Hello World')</iframe>"; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme</p>\n&lt;iframe&gt;alert(\'Hello World\')&lt;/iframe&gt;\n" ); } #[test] fn
() { let text = "foo_readme\n\n<unknown>alert('Hello World')</unknown>"; let result = markdown_to_html(text).unwrap(); assert_eq!(result, "<p>foo_readme</p>\n<p>alert(\'Hello World\')</p>\n"); } #[test] fn text_with_inline_javascript() { let text = r#"foo_readme\n\n<a href="https://crates.io/crates/cargo-registry" onclick="window.alert('Got you')">Crate page</a>"#; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme\\n\\n<a href=\"https://crates.io/crates/cargo-registry\" rel=\"nofollow noopener noreferrer\">Crate page</a></p>\n" ); } }
text_with_unknown_tag
identifier_name
render.rs
use ammonia::Ammonia; use comrak; use util::CargoResult; /// Context for markdown to HTML rendering. #[allow(missing_debug_implementations)] pub struct MarkdownRenderer<'a> { html_sanitizer: Ammonia<'a>, } impl<'a> MarkdownRenderer<'a> { /// Creates a new renderer instance. pub fn new() -> MarkdownRenderer<'a> { let tags = [ "a",
"blockquote", "br", "code", "dd", "del", "dl", "dt", "em", "h1", "h2", "h3", "hr", "i", "img", "input", "kbd", "li", "ol", "p", "pre", "s", "strike", "strong", "sub", "sup", "table", "tbody", "td", "th", "thead", "tr", "ul", "hr", "span", ].iter() .cloned() .collect(); let tag_attributes = [ ("a", ["href", "target"].iter().cloned().collect()), ( "img", ["width", "height", "src", "alt", "align", "width"] .iter() .cloned() .collect(), ), ( "input", ["checked", "disabled", "type"].iter().cloned().collect(), ), ].iter() .cloned() .collect(); let html_sanitizer = Ammonia { link_rel: Some("nofollow noopener noreferrer"), keep_cleaned_elements: true, tags: tags, tag_attributes: tag_attributes, ..Ammonia::default() }; MarkdownRenderer { html_sanitizer: html_sanitizer } } /// Renders the given markdown to HTML using the current settings. pub fn to_html(&self, text: &str) -> CargoResult<String> { let options = comrak::ComrakOptions { ext_autolink: true, ext_strikethrough: true, ext_table: true, ext_tagfilter: true, ext_tasklist: true, ..comrak::ComrakOptions::default() }; let rendered = comrak::markdown_to_html(text, &options); Ok(self.html_sanitizer.clean(&rendered)) } } impl<'a> Default for MarkdownRenderer<'a> { fn default() -> Self { Self::new() } } /// Renders a markdown text to sanitized HTML. /// /// The returned text should not contain any harmful HTML tag or attribute (such as iframe, /// onclick, onmouseover, etc.). /// /// # Examples /// /// ``` /// use render::markdown_to_html; /// /// let text = "[Rust](https://rust-lang.org/) is an awesome *systems programming* language!"; /// let rendered = markdown_to_html(text)?; /// ``` pub fn markdown_to_html(text: &str) -> CargoResult<String> { let renderer = MarkdownRenderer::new(); renderer.to_html(text) } #[cfg(test)] mod tests { use super::*; #[test] fn empty_text() { let text = ""; let result = markdown_to_html(text).unwrap(); assert_eq!(result, ""); } #[test] fn text_with_script_tag() { let text = "foo_readme\n\n<script>alert('Hello World')</script>"; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme</p>\n&lt;script&gt;alert(\'Hello World\')&lt;/script&gt;\n" ); } #[test] fn text_with_iframe_tag() { let text = "foo_readme\n\n<iframe>alert('Hello World')</iframe>"; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme</p>\n&lt;iframe&gt;alert(\'Hello World\')&lt;/iframe&gt;\n" ); } #[test] fn text_with_unknown_tag() { let text = "foo_readme\n\n<unknown>alert('Hello World')</unknown>"; let result = markdown_to_html(text).unwrap(); assert_eq!(result, "<p>foo_readme</p>\n<p>alert(\'Hello World\')</p>\n"); } #[test] fn text_with_inline_javascript() { let text = r#"foo_readme\n\n<a href="https://crates.io/crates/cargo-registry" onclick="window.alert('Got you')">Crate page</a>"#; let result = markdown_to_html(text).unwrap(); assert_eq!( result, "<p>foo_readme\\n\\n<a href=\"https://crates.io/crates/cargo-registry\" rel=\"nofollow noopener noreferrer\">Crate page</a></p>\n" ); } }
"b",
random_line_split
f13-mkstemp.rs
/// Code for Figure 5.13 (Demonstrate mkstemp function) /// /// implementation in rust was quite straight forward /// only caveat was that the pointer to the variable on the stack /// is not possible (or I just didn't find out how to do that and /// didn't dare to ask on stackoverflow) /// /// $ f13-mkstemp 2>/dev/null /// trying to create first temp file... /// file exists extern crate libc; #[macro_use(print_err)] extern crate apue; use std::ffi::{CString, CStr}; use std::io; use std::mem; fn make_temp(template: *mut libc::c_char) -> Result<String, String> { unsafe { let fd = libc::mkstemp(template); if fd < 0 { return Err("can't create tmp dir".to_owned()); } print_err!("temp name = {:?}", CStr::from_ptr(template)); libc::close(fd); let mut stat: libc::stat = mem::uninitialized(); if libc::stat(template, &mut stat) < 0 { if io::Error::last_os_error().raw_os_error().unwrap() == libc::ENOENT { println!("file doesn’t exist"); } else { return Err("stat failed".to_owned()); } } else { println!("file exists"); libc::unlink(template); } Ok(CString::from_raw(template).into_string().unwrap()) } } fn main() {
let good_template = CString::new("/tmp/dirXXXXXX").unwrap(); println!("trying to create first temp file..."); let res = make_temp(good_template.into_raw()); print_err!("{:?}", res); // the second part with the bad template I was just // unable to do in rust, seems that the type safety // was good enough that even after 30 minutes I couldn't // succeed }
identifier_body
f13-mkstemp.rs
/// Code for Figure 5.13 (Demonstrate mkstemp function) /// /// implementation in rust was quite straight forward /// only caveat was that the pointer to the variable on the stack /// is not possible (or I just didn't find out how to do that and /// didn't dare to ask on stackoverflow) /// /// $ f13-mkstemp 2>/dev/null /// trying to create first temp file... /// file exists extern crate libc; #[macro_use(print_err)] extern crate apue; use std::ffi::{CString, CStr}; use std::io; use std::mem; fn make_temp(template: *mut libc::c_char) -> Result<String, String> { unsafe { let fd = libc::mkstemp(template); if fd < 0 { return Err("can't create tmp dir".to_owned()); } print_err!("temp name = {:?}", CStr::from_ptr(template)); libc::close(fd); let mut stat: libc::stat = mem::uninitialized(); if libc::stat(template, &mut stat) < 0 { if io::Error::last_os_error().raw_os_error().unwrap() == libc::ENOENT { println!("file doesn’t exist"); } else {
} else { println!("file exists"); libc::unlink(template); } Ok(CString::from_raw(template).into_string().unwrap()) } } fn main() { let good_template = CString::new("/tmp/dirXXXXXX").unwrap(); println!("trying to create first temp file..."); let res = make_temp(good_template.into_raw()); print_err!("{:?}", res); // the second part with the bad template I was just // unable to do in rust, seems that the type safety // was good enough that even after 30 minutes I couldn't // succeed }
return Err("stat failed".to_owned()); }
conditional_block
f13-mkstemp.rs
/// Code for Figure 5.13 (Demonstrate mkstemp function) /// /// implementation in rust was quite straight forward /// only caveat was that the pointer to the variable on the stack /// is not possible (or I just didn't find out how to do that and /// didn't dare to ask on stackoverflow)
/// /// $ f13-mkstemp 2>/dev/null /// trying to create first temp file... /// file exists extern crate libc; #[macro_use(print_err)] extern crate apue; use std::ffi::{CString, CStr}; use std::io; use std::mem; fn make_temp(template: *mut libc::c_char) -> Result<String, String> { unsafe { let fd = libc::mkstemp(template); if fd < 0 { return Err("can't create tmp dir".to_owned()); } print_err!("temp name = {:?}", CStr::from_ptr(template)); libc::close(fd); let mut stat: libc::stat = mem::uninitialized(); if libc::stat(template, &mut stat) < 0 { if io::Error::last_os_error().raw_os_error().unwrap() == libc::ENOENT { println!("file doesn’t exist"); } else { return Err("stat failed".to_owned()); } } else { println!("file exists"); libc::unlink(template); } Ok(CString::from_raw(template).into_string().unwrap()) } } fn main() { let good_template = CString::new("/tmp/dirXXXXXX").unwrap(); println!("trying to create first temp file..."); let res = make_temp(good_template.into_raw()); print_err!("{:?}", res); // the second part with the bad template I was just // unable to do in rust, seems that the type safety // was good enough that even after 30 minutes I couldn't // succeed }
random_line_split
f13-mkstemp.rs
/// Code for Figure 5.13 (Demonstrate mkstemp function) /// /// implementation in rust was quite straight forward /// only caveat was that the pointer to the variable on the stack /// is not possible (or I just didn't find out how to do that and /// didn't dare to ask on stackoverflow) /// /// $ f13-mkstemp 2>/dev/null /// trying to create first temp file... /// file exists extern crate libc; #[macro_use(print_err)] extern crate apue; use std::ffi::{CString, CStr}; use std::io; use std::mem; fn make_temp(template: *mut libc::c_char) -> Result<String, String> { unsafe { let fd = libc::mkstemp(template); if fd < 0 { return Err("can't create tmp dir".to_owned()); } print_err!("temp name = {:?}", CStr::from_ptr(template)); libc::close(fd); let mut stat: libc::stat = mem::uninitialized(); if libc::stat(template, &mut stat) < 0 { if io::Error::last_os_error().raw_os_error().unwrap() == libc::ENOENT { println!("file doesn’t exist"); } else { return Err("stat failed".to_owned()); } } else { println!("file exists"); libc::unlink(template); } Ok(CString::from_raw(template).into_string().unwrap()) } } fn ma
{ let good_template = CString::new("/tmp/dirXXXXXX").unwrap(); println!("trying to create first temp file..."); let res = make_temp(good_template.into_raw()); print_err!("{:?}", res); // the second part with the bad template I was just // unable to do in rust, seems that the type safety // was good enough that even after 30 minutes I couldn't // succeed }
in()
identifier_name
seal.rs
// Copyright 2015-2017 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Spec seal deserialization. use hash::*; use uint::Uint; use bytes::Bytes; /// Ethereum seal. #[derive(Debug, PartialEq, Deserialize)] pub struct Ethereum { /// Seal nonce. pub nonce: H64, /// Seal mix hash. #[serde(rename="mixHash")] pub mix_hash: H256, } /// AuthorityRound seal. #[derive(Debug, PartialEq, Deserialize)] pub struct AuthorityRoundSeal { /// Seal step.
pub step: Uint, /// Seal signature. pub signature: H520, } /// Tendermint seal. #[derive(Debug, PartialEq, Deserialize)] pub struct TendermintSeal { /// Seal round. pub round: Uint, /// Proposal seal signature. pub proposal: H520, /// Proposal seal signature. pub precommits: Vec<H520>, } /// Seal variants. #[derive(Debug, PartialEq, Deserialize)] pub enum Seal { /// Ethereum seal. #[serde(rename="ethereum")] Ethereum(Ethereum), /// AuthorityRound seal. #[serde(rename="authorityRound")] AuthorityRound(AuthorityRoundSeal), /// Tendermint seal. #[serde(rename="tendermint")] Tendermint(TendermintSeal), /// Generic seal. #[serde(rename="generic")] Generic(Bytes), } #[cfg(test)] mod tests { use serde_json; use spec::Seal; #[test] fn seal_deserialization() { let s = r#"[{ "ethereum": { "nonce": "0x0000000000000042", "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" } },{ "generic": "0xe011bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa" },{ "authorityRound": { "step": "0x0", "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" } },{ "tendermint": { "round": "0x0", "proposal": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "precommits": [ "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" ] } }]"#; let _deserialized: Vec<Seal> = serde_json::from_str(s).unwrap(); // TODO: validate all fields } }
random_line_split
seal.rs
// Copyright 2015-2017 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Spec seal deserialization. use hash::*; use uint::Uint; use bytes::Bytes; /// Ethereum seal. #[derive(Debug, PartialEq, Deserialize)] pub struct Ethereum { /// Seal nonce. pub nonce: H64, /// Seal mix hash. #[serde(rename="mixHash")] pub mix_hash: H256, } /// AuthorityRound seal. #[derive(Debug, PartialEq, Deserialize)] pub struct
{ /// Seal step. pub step: Uint, /// Seal signature. pub signature: H520, } /// Tendermint seal. #[derive(Debug, PartialEq, Deserialize)] pub struct TendermintSeal { /// Seal round. pub round: Uint, /// Proposal seal signature. pub proposal: H520, /// Proposal seal signature. pub precommits: Vec<H520>, } /// Seal variants. #[derive(Debug, PartialEq, Deserialize)] pub enum Seal { /// Ethereum seal. #[serde(rename="ethereum")] Ethereum(Ethereum), /// AuthorityRound seal. #[serde(rename="authorityRound")] AuthorityRound(AuthorityRoundSeal), /// Tendermint seal. #[serde(rename="tendermint")] Tendermint(TendermintSeal), /// Generic seal. #[serde(rename="generic")] Generic(Bytes), } #[cfg(test)] mod tests { use serde_json; use spec::Seal; #[test] fn seal_deserialization() { let s = r#"[{ "ethereum": { "nonce": "0x0000000000000042", "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" } },{ "generic": "0xe011bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa" },{ "authorityRound": { "step": "0x0", "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" } },{ "tendermint": { "round": "0x0", "proposal": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "precommits": [ "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" ] } }]"#; let _deserialized: Vec<Seal> = serde_json::from_str(s).unwrap(); // TODO: validate all fields } }
AuthorityRoundSeal
identifier_name
issue-22645.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::ops::Add; trait Scalar {} impl Scalar for f64 {} struct Bob; impl<RHS: Scalar> Add <RHS> for Bob { type Output = Bob; fn add(self, rhs : RHS) -> Bob { Bob } } fn main() {
let b = Bob + 3.5; b + 3 //~ ERROR E0277 //~^ ERROR: mismatched types }
random_line_split
issue-22645.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::ops::Add; trait Scalar {} impl Scalar for f64 {} struct Bob; impl<RHS: Scalar> Add <RHS> for Bob { type Output = Bob; fn add(self, rhs : RHS) -> Bob
} fn main() { let b = Bob + 3.5; b + 3 //~ ERROR E0277 //~^ ERROR: mismatched types }
{ Bob }
identifier_body
issue-22645.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::ops::Add; trait Scalar {} impl Scalar for f64 {} struct Bob; impl<RHS: Scalar> Add <RHS> for Bob { type Output = Bob; fn
(self, rhs : RHS) -> Bob { Bob } } fn main() { let b = Bob + 3.5; b + 3 //~ ERROR E0277 //~^ ERROR: mismatched types }
add
identifier_name
objdump.rs
extern crate capstone; extern crate macho; use capstone::prelude::*; use std::env; use std::fs; use std::io::Read; use std::process; fn main()
if segment.segname == "__TEXT" { for section in segment.sections { if section.sectname == "__text" { let text = &buf[section.offset as usize ..(u64::from(section.offset) + section.size) as usize]; match cs.disasm_all(text, section.addr) { Ok(insns) => { println!("Got {} instructions", insns.len()); for i in insns.iter() { println!("{}", i); } } Err(err) => { println!("Error: {}", err); process::exit(1); } } return; } } } } panic!("No __TEXT segment"); }
{ let cs = Capstone::new() .x86() .mode(arch::x86::ArchMode::Mode64) .build() .expect("Failed to create capstone handle"); let args: Vec<_> = env::args().collect(); if args.len() != 2 { println!("Usage: {} <file>", args[0]); return; } let mut fh = fs::File::open(&args[1]).unwrap(); let mut buf: Vec<u8> = Vec::new(); let _ = fh.read_to_end(&mut buf); let header = macho::MachObject::parse(&buf[..]).unwrap(); // Find the text segment for segment in header.segments {
identifier_body
objdump.rs
extern crate capstone; extern crate macho; use capstone::prelude::*; use std::env; use std::fs; use std::io::Read; use std::process; fn
() { let cs = Capstone::new() .x86() .mode(arch::x86::ArchMode::Mode64) .build() .expect("Failed to create capstone handle"); let args: Vec<_> = env::args().collect(); if args.len()!= 2 { println!("Usage: {} <file>", args[0]); return; } let mut fh = fs::File::open(&args[1]).unwrap(); let mut buf: Vec<u8> = Vec::new(); let _ = fh.read_to_end(&mut buf); let header = macho::MachObject::parse(&buf[..]).unwrap(); // Find the text segment for segment in header.segments { if segment.segname == "__TEXT" { for section in segment.sections { if section.sectname == "__text" { let text = &buf[section.offset as usize ..(u64::from(section.offset) + section.size) as usize]; match cs.disasm_all(text, section.addr) { Ok(insns) => { println!("Got {} instructions", insns.len()); for i in insns.iter() { println!("{}", i); } } Err(err) => { println!("Error: {}", err); process::exit(1); } } return; } } } } panic!("No __TEXT segment"); }
main
identifier_name
objdump.rs
extern crate capstone; extern crate macho; use capstone::prelude::*; use std::env; use std::fs; use std::io::Read; use std::process; fn main() { let cs = Capstone::new() .x86() .mode(arch::x86::ArchMode::Mode64) .build() .expect("Failed to create capstone handle"); let args: Vec<_> = env::args().collect(); if args.len()!= 2
let mut fh = fs::File::open(&args[1]).unwrap(); let mut buf: Vec<u8> = Vec::new(); let _ = fh.read_to_end(&mut buf); let header = macho::MachObject::parse(&buf[..]).unwrap(); // Find the text segment for segment in header.segments { if segment.segname == "__TEXT" { for section in segment.sections { if section.sectname == "__text" { let text = &buf[section.offset as usize ..(u64::from(section.offset) + section.size) as usize]; match cs.disasm_all(text, section.addr) { Ok(insns) => { println!("Got {} instructions", insns.len()); for i in insns.iter() { println!("{}", i); } } Err(err) => { println!("Error: {}", err); process::exit(1); } } return; } } } } panic!("No __TEXT segment"); }
{ println!("Usage: {} <file>", args[0]); return; }
conditional_block
simd-intrinsic-float-minmax.rs
// run-pass // ignore-emscripten // Test that the simd_f{min,max} intrinsics produce the correct results. #![feature(repr_simd, platform_intrinsics)] #![allow(non_camel_case_types)] #[repr(simd)] #[derive(Copy, Clone, PartialEq, Debug)] struct
(pub f32, pub f32, pub f32, pub f32); extern "platform-intrinsic" { fn simd_fmin<T>(x: T, y: T) -> T; fn simd_fmax<T>(x: T, y: T) -> T; } fn main() { let x = f32x4(1.0, 2.0, 3.0, 4.0); let y = f32x4(2.0, 1.0, 4.0, 3.0); #[cfg(not(any(target_arch = "mips", target_arch = "mips64")))] let nan = f32::NAN; // MIPS hardware treats f32::NAN as SNAN. Clear the signaling bit. // See https://github.com/rust-lang/rust/issues/52746. #[cfg(any(target_arch = "mips", target_arch = "mips64"))] let nan = f32::from_bits(f32::NAN.to_bits() - 1); let n = f32x4(nan, nan, nan, nan); unsafe { let min0 = simd_fmin(x, y); let min1 = simd_fmin(y, x); assert_eq!(min0, min1); let e = f32x4(1.0, 1.0, 3.0, 3.0); assert_eq!(min0, e); let minn = simd_fmin(x, n); assert_eq!(minn, x); let minn = simd_fmin(y, n); assert_eq!(minn, y); let max0 = simd_fmax(x, y); let max1 = simd_fmax(y, x); assert_eq!(max0, max1); let e = f32x4(2.0, 2.0, 4.0, 4.0); assert_eq!(max0, e); let maxn = simd_fmax(x, n); assert_eq!(maxn, x); let maxn = simd_fmax(y, n); assert_eq!(maxn, y); } }
f32x4
identifier_name
simd-intrinsic-float-minmax.rs
// run-pass // ignore-emscripten // Test that the simd_f{min,max} intrinsics produce the correct results. #![feature(repr_simd, platform_intrinsics)] #![allow(non_camel_case_types)] #[repr(simd)] #[derive(Copy, Clone, PartialEq, Debug)] struct f32x4(pub f32, pub f32, pub f32, pub f32); extern "platform-intrinsic" { fn simd_fmin<T>(x: T, y: T) -> T; fn simd_fmax<T>(x: T, y: T) -> T; } fn main() { let x = f32x4(1.0, 2.0, 3.0, 4.0); let y = f32x4(2.0, 1.0, 4.0, 3.0); #[cfg(not(any(target_arch = "mips", target_arch = "mips64")))] let nan = f32::NAN; // MIPS hardware treats f32::NAN as SNAN. Clear the signaling bit. // See https://github.com/rust-lang/rust/issues/52746. #[cfg(any(target_arch = "mips", target_arch = "mips64"))] let nan = f32::from_bits(f32::NAN.to_bits() - 1); let n = f32x4(nan, nan, nan, nan); unsafe { let min0 = simd_fmin(x, y); let min1 = simd_fmin(y, x); assert_eq!(min0, min1); let e = f32x4(1.0, 1.0, 3.0, 3.0); assert_eq!(min0, e); let minn = simd_fmin(x, n); assert_eq!(minn, x);
assert_eq!(minn, y); let max0 = simd_fmax(x, y); let max1 = simd_fmax(y, x); assert_eq!(max0, max1); let e = f32x4(2.0, 2.0, 4.0, 4.0); assert_eq!(max0, e); let maxn = simd_fmax(x, n); assert_eq!(maxn, x); let maxn = simd_fmax(y, n); assert_eq!(maxn, y); } }
let minn = simd_fmin(y, n);
random_line_split
hashmap-memory.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate collections; extern crate debug; /** A somewhat reduced test case to expose some Valgrind issues. This originally came from the word-count benchmark. */ pub fn map(filename: String, emit: map_reduce::putter) { emit(filename, "1".to_string()); } mod map_reduce { use std::collections::HashMap; use std::str; use std::task; pub type putter<'a> = |String, String|: 'a; pub type mapper = extern fn(String, putter); enum ctrl_proto { find_reducer(Vec<u8>, Sender<int>), mapper_done, } fn start_mappers(ctrl: Sender<ctrl_proto>, inputs: Vec<String>) { for i in inputs.iter() { let ctrl = ctrl.clone(); let i = i.clone(); task::spawn(proc() map_task(ctrl.clone(), i.clone()) ); } } fn map_task(ctrl: Sender<ctrl_proto>, input: String)
ctrl_clone.send(mapper_done); } pub fn map_reduce(inputs: Vec<String>) { let (tx, rx) = channel(); // This task becomes the master control task. It spawns others // to do the rest. let mut reducers: HashMap<String, int>; reducers = HashMap::new(); start_mappers(tx, inputs.clone()); let mut num_mappers = inputs.len() as int; while num_mappers > 0 { match rx.recv() { mapper_done => { num_mappers -= 1; } find_reducer(k, cc) => { let mut c; match reducers.find(&str::from_utf8( k.as_slice()).unwrap().to_string()) { Some(&_c) => { c = _c; } None => { c = 0; } } cc.send(c); } } } } } pub fn main() { map_reduce::map_reduce( vec!("../src/test/run-pass/hashmap-memory.rs".to_string())); }
{ let mut intermediates = HashMap::new(); fn emit(im: &mut HashMap<String, int>, ctrl: Sender<ctrl_proto>, key: String, _val: String) { if im.contains_key(&key) { return; } let (tx, rx) = channel(); println!("sending find_reducer"); ctrl.send(find_reducer(Vec::from_slice(key.as_bytes()), tx)); println!("receiving"); let c = rx.recv(); println!("{:?}", c); im.insert(key, c); } let ctrl_clone = ctrl.clone(); ::map(input, |a,b| emit(&mut intermediates, ctrl.clone(), a, b) );
identifier_body
hashmap-memory.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate collections; extern crate debug; /** A somewhat reduced test case to expose some Valgrind issues. This originally came from the word-count benchmark. */ pub fn map(filename: String, emit: map_reduce::putter) { emit(filename, "1".to_string()); } mod map_reduce { use std::collections::HashMap; use std::str; use std::task; pub type putter<'a> = |String, String|: 'a; pub type mapper = extern fn(String, putter); enum ctrl_proto { find_reducer(Vec<u8>, Sender<int>), mapper_done, } fn start_mappers(ctrl: Sender<ctrl_proto>, inputs: Vec<String>) { for i in inputs.iter() { let ctrl = ctrl.clone(); let i = i.clone(); task::spawn(proc() map_task(ctrl.clone(), i.clone()) ); } } fn map_task(ctrl: Sender<ctrl_proto>, input: String) { let mut intermediates = HashMap::new(); fn emit(im: &mut HashMap<String, int>, ctrl: Sender<ctrl_proto>, key: String, _val: String) { if im.contains_key(&key) { return; } let (tx, rx) = channel(); println!("sending find_reducer"); ctrl.send(find_reducer(Vec::from_slice(key.as_bytes()), tx)); println!("receiving"); let c = rx.recv(); println!("{:?}", c); im.insert(key, c); } let ctrl_clone = ctrl.clone(); ::map(input, |a,b| emit(&mut intermediates, ctrl.clone(), a, b) ); ctrl_clone.send(mapper_done); } pub fn map_reduce(inputs: Vec<String>) { let (tx, rx) = channel(); // This task becomes the master control task. It spawns others // to do the rest. let mut reducers: HashMap<String, int>; reducers = HashMap::new(); start_mappers(tx, inputs.clone()); let mut num_mappers = inputs.len() as int; while num_mappers > 0 { match rx.recv() { mapper_done => { num_mappers -= 1; } find_reducer(k, cc) => { let mut c; match reducers.find(&str::from_utf8( k.as_slice()).unwrap().to_string()) { Some(&_c) => { c = _c; } None => { c = 0; } } cc.send(c); } } } } } pub fn
() { map_reduce::map_reduce( vec!("../src/test/run-pass/hashmap-memory.rs".to_string())); }
main
identifier_name
hashmap-memory.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate collections; extern crate debug; /** A somewhat reduced test case to expose some Valgrind issues. This originally came from the word-count benchmark. */ pub fn map(filename: String, emit: map_reduce::putter) { emit(filename, "1".to_string()); } mod map_reduce { use std::collections::HashMap; use std::str; use std::task; pub type putter<'a> = |String, String|: 'a; pub type mapper = extern fn(String, putter); enum ctrl_proto { find_reducer(Vec<u8>, Sender<int>), mapper_done, } fn start_mappers(ctrl: Sender<ctrl_proto>, inputs: Vec<String>) { for i in inputs.iter() { let ctrl = ctrl.clone(); let i = i.clone(); task::spawn(proc() map_task(ctrl.clone(), i.clone()) ); } } fn map_task(ctrl: Sender<ctrl_proto>, input: String) { let mut intermediates = HashMap::new(); fn emit(im: &mut HashMap<String, int>, ctrl: Sender<ctrl_proto>, key: String, _val: String) { if im.contains_key(&key) { return; } let (tx, rx) = channel(); println!("sending find_reducer"); ctrl.send(find_reducer(Vec::from_slice(key.as_bytes()), tx)); println!("receiving"); let c = rx.recv(); println!("{:?}", c); im.insert(key, c); } let ctrl_clone = ctrl.clone(); ::map(input, |a,b| emit(&mut intermediates, ctrl.clone(), a, b) ); ctrl_clone.send(mapper_done); } pub fn map_reduce(inputs: Vec<String>) { let (tx, rx) = channel(); // This task becomes the master control task. It spawns others // to do the rest. let mut reducers: HashMap<String, int>; reducers = HashMap::new(); start_mappers(tx, inputs.clone()); let mut num_mappers = inputs.len() as int; while num_mappers > 0 { match rx.recv() { mapper_done => { num_mappers -= 1; } find_reducer(k, cc) => { let mut c; match reducers.find(&str::from_utf8( k.as_slice()).unwrap().to_string()) { Some(&_c) =>
None => { c = 0; } } cc.send(c); } } } } } pub fn main() { map_reduce::map_reduce( vec!("../src/test/run-pass/hashmap-memory.rs".to_string())); }
{ c = _c; }
conditional_block
hashmap-memory.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate collections; extern crate debug; /** A somewhat reduced test case to expose some Valgrind issues. This originally came from the word-count benchmark. */ pub fn map(filename: String, emit: map_reduce::putter) { emit(filename, "1".to_string()); } mod map_reduce { use std::collections::HashMap; use std::str; use std::task; pub type putter<'a> = |String, String|: 'a; pub type mapper = extern fn(String, putter); enum ctrl_proto { find_reducer(Vec<u8>, Sender<int>), mapper_done, } fn start_mappers(ctrl: Sender<ctrl_proto>, inputs: Vec<String>) { for i in inputs.iter() { let ctrl = ctrl.clone(); let i = i.clone(); task::spawn(proc() map_task(ctrl.clone(), i.clone()) ); } } fn map_task(ctrl: Sender<ctrl_proto>, input: String) { let mut intermediates = HashMap::new(); fn emit(im: &mut HashMap<String, int>, ctrl: Sender<ctrl_proto>, key: String, _val: String) { if im.contains_key(&key) { return; }
println!("sending find_reducer"); ctrl.send(find_reducer(Vec::from_slice(key.as_bytes()), tx)); println!("receiving"); let c = rx.recv(); println!("{:?}", c); im.insert(key, c); } let ctrl_clone = ctrl.clone(); ::map(input, |a,b| emit(&mut intermediates, ctrl.clone(), a, b) ); ctrl_clone.send(mapper_done); } pub fn map_reduce(inputs: Vec<String>) { let (tx, rx) = channel(); // This task becomes the master control task. It spawns others // to do the rest. let mut reducers: HashMap<String, int>; reducers = HashMap::new(); start_mappers(tx, inputs.clone()); let mut num_mappers = inputs.len() as int; while num_mappers > 0 { match rx.recv() { mapper_done => { num_mappers -= 1; } find_reducer(k, cc) => { let mut c; match reducers.find(&str::from_utf8( k.as_slice()).unwrap().to_string()) { Some(&_c) => { c = _c; } None => { c = 0; } } cc.send(c); } } } } } pub fn main() { map_reduce::map_reduce( vec!("../src/test/run-pass/hashmap-memory.rs".to_string())); }
let (tx, rx) = channel();
random_line_split
unsafe-cell-hides-niche.rs
// For rust-lang/rust#68303: the contents of `UnsafeCell<T>` cannot // participate in the niche-optimization for enum discriminants. This // test checks that an `Option<UnsafeCell<NonZeroU32>>` has the same // size in memory as an `Option<UnsafeCell<u32>>` (namely, 8 bytes). // run-pass #![feature(no_niche)] use std::cell::UnsafeCell; use std::mem::size_of; use std::num::NonZeroU32 as N32; struct Wrapper<T>(T); #[repr(transparent)] struct Transparent<T>(T); #[repr(no_niche)] struct NoNiche<T>(T); fn main() { assert_eq!(size_of::<Option<Wrapper<u32>>>(), 8); assert_eq!(size_of::<Option<Wrapper<N32>>>(), 4);
assert_eq!(size_of::<Option<Transparent<u32>>>(), 8); assert_eq!(size_of::<Option<Transparent<N32>>>(), 4); assert_eq!(size_of::<Option<NoNiche<u32>>>(), 8); assert_eq!(size_of::<Option<NoNiche<N32>>>(), 8); assert_eq!(size_of::<Option<UnsafeCell<u32>>>(), 8); assert_eq!(size_of::<Option<UnsafeCell<N32>>>(), 8); }
random_line_split
unsafe-cell-hides-niche.rs
// For rust-lang/rust#68303: the contents of `UnsafeCell<T>` cannot // participate in the niche-optimization for enum discriminants. This // test checks that an `Option<UnsafeCell<NonZeroU32>>` has the same // size in memory as an `Option<UnsafeCell<u32>>` (namely, 8 bytes). // run-pass #![feature(no_niche)] use std::cell::UnsafeCell; use std::mem::size_of; use std::num::NonZeroU32 as N32; struct Wrapper<T>(T); #[repr(transparent)] struct Transparent<T>(T); #[repr(no_niche)] struct
<T>(T); fn main() { assert_eq!(size_of::<Option<Wrapper<u32>>>(), 8); assert_eq!(size_of::<Option<Wrapper<N32>>>(), 4); assert_eq!(size_of::<Option<Transparent<u32>>>(), 8); assert_eq!(size_of::<Option<Transparent<N32>>>(), 4); assert_eq!(size_of::<Option<NoNiche<u32>>>(), 8); assert_eq!(size_of::<Option<NoNiche<N32>>>(), 8); assert_eq!(size_of::<Option<UnsafeCell<u32>>>(), 8); assert_eq!(size_of::<Option<UnsafeCell<N32>>>(), 8); }
NoNiche
identifier_name
haskell_import.rs
extern crate curryrs; use std::env; use std::ffi::CStr; use std::path::Path; use curryrs::hsrt; use curryrs::types::*; #[link(name = "HShtest-0.1.0.0-inplace", kind = "static")] extern "C" { pub fn triple(x: I32) -> I32; pub fn getProgNameStr() -> Str; } fn
(x: I32) -> I32 { hsrt::start(); unsafe { triple(x) } } #[test] fn ffi_test() { // TODO Use the threaded Haskell runtime to let tests run safely in // parallel, allowing separate test functions. assert_eq!(900, triple_num(300)); hsrt::start(); let prog_name = unsafe { getProgNameStr() }; assert!(!prog_name.is_null()); let prog_name_str = unsafe { CStr::from_ptr(prog_name) }.to_str().unwrap(); let argv0 = env::args().nth(0).unwrap(); let argv0_file_name = Path::new(&argv0).file_name().unwrap(); assert_eq!(prog_name_str, argv0_file_name.to_str().unwrap()); }
triple_num
identifier_name
haskell_import.rs
extern crate curryrs; use std::env; use std::ffi::CStr; use std::path::Path; use curryrs::hsrt; use curryrs::types::*; #[link(name = "HShtest-0.1.0.0-inplace", kind = "static")] extern "C" { pub fn triple(x: I32) -> I32; pub fn getProgNameStr() -> Str; } fn triple_num(x: I32) -> I32
#[test] fn ffi_test() { // TODO Use the threaded Haskell runtime to let tests run safely in // parallel, allowing separate test functions. assert_eq!(900, triple_num(300)); hsrt::start(); let prog_name = unsafe { getProgNameStr() }; assert!(!prog_name.is_null()); let prog_name_str = unsafe { CStr::from_ptr(prog_name) }.to_str().unwrap(); let argv0 = env::args().nth(0).unwrap(); let argv0_file_name = Path::new(&argv0).file_name().unwrap(); assert_eq!(prog_name_str, argv0_file_name.to_str().unwrap()); }
{ hsrt::start(); unsafe { triple(x) } }
identifier_body
haskell_import.rs
extern crate curryrs; use std::env; use std::ffi::CStr; use std::path::Path; use curryrs::hsrt; use curryrs::types::*; #[link(name = "HShtest-0.1.0.0-inplace", kind = "static")] extern "C" { pub fn triple(x: I32) -> I32; pub fn getProgNameStr() -> Str; } fn triple_num(x: I32) -> I32 { hsrt::start(); unsafe { triple(x) } } #[test] fn ffi_test() { // TODO Use the threaded Haskell runtime to let tests run safely in // parallel, allowing separate test functions. assert_eq!(900, triple_num(300));
assert!(!prog_name.is_null()); let prog_name_str = unsafe { CStr::from_ptr(prog_name) }.to_str().unwrap(); let argv0 = env::args().nth(0).unwrap(); let argv0_file_name = Path::new(&argv0).file_name().unwrap(); assert_eq!(prog_name_str, argv0_file_name.to_str().unwrap()); }
hsrt::start(); let prog_name = unsafe { getProgNameStr() };
random_line_split
test_slice_transform.rs
extern crate rocksdb; use rocksdb::{DB, Options, SliceTransform}; #[test] pub fn test_slice_transform() { let path = "_rust_rocksdb_slicetransform_test"; let a1: Box<[u8]> = key(b"aaa1"); let a2: Box<[u8]> = key(b"aaa2"); let b1: Box<[u8]> = key(b"bbb1"); let b2: Box<[u8]> = key(b"bbb2"); fn first_three(k: &[u8]) -> Vec<u8> { k.iter().take(3).cloned().collect() } let prefix_extractor = SliceTransform::create("first_three", first_three, None); let mut opts = Options::default(); opts.create_if_missing(true); opts.set_prefix_extractor(prefix_extractor); let db = DB::open(&opts, path).unwrap(); assert!(db.put(&*a1, &*a1).is_ok()); assert!(db.put(&*a2, &*a2).is_ok()); assert!(db.put(&*b1, &*b1).is_ok()); assert!(db.put(&*b2, &*b2).is_ok()); fn cba(input: &Box<[u8]>) -> Box<[u8]>
fn key(k: &[u8]) -> Box<[u8]> { k.to_vec().into_boxed_slice() } { let expected = vec![(cba(&a1), cba(&a1)), (cba(&a2), cba(&a2))]; let a_iterator = db.prefix_iterator(b"aaa"); assert_eq!(a_iterator.collect::<Vec<_>>(), expected) } { let expected = vec![(cba(&b1), cba(&b1)), (cba(&b2), cba(&b2))]; let b_iterator = db.prefix_iterator(b"bbb"); assert_eq!(b_iterator.collect::<Vec<_>>(), expected) } }
{ input.iter().cloned().collect::<Vec<_>>().into_boxed_slice() }
identifier_body
test_slice_transform.rs
extern crate rocksdb; use rocksdb::{DB, Options, SliceTransform}; #[test] pub fn test_slice_transform() { let path = "_rust_rocksdb_slicetransform_test"; let a1: Box<[u8]> = key(b"aaa1"); let a2: Box<[u8]> = key(b"aaa2"); let b1: Box<[u8]> = key(b"bbb1"); let b2: Box<[u8]> = key(b"bbb2"); fn first_three(k: &[u8]) -> Vec<u8> { k.iter().take(3).cloned().collect() } let prefix_extractor = SliceTransform::create("first_three", first_three, None); let mut opts = Options::default(); opts.create_if_missing(true); opts.set_prefix_extractor(prefix_extractor); let db = DB::open(&opts, path).unwrap(); assert!(db.put(&*a1, &*a1).is_ok()); assert!(db.put(&*a2, &*a2).is_ok()); assert!(db.put(&*b1, &*b1).is_ok()); assert!(db.put(&*b2, &*b2).is_ok()); fn cba(input: &Box<[u8]>) -> Box<[u8]> { input.iter().cloned().collect::<Vec<_>>().into_boxed_slice() } fn
(k: &[u8]) -> Box<[u8]> { k.to_vec().into_boxed_slice() } { let expected = vec![(cba(&a1), cba(&a1)), (cba(&a2), cba(&a2))]; let a_iterator = db.prefix_iterator(b"aaa"); assert_eq!(a_iterator.collect::<Vec<_>>(), expected) } { let expected = vec![(cba(&b1), cba(&b1)), (cba(&b2), cba(&b2))]; let b_iterator = db.prefix_iterator(b"bbb"); assert_eq!(b_iterator.collect::<Vec<_>>(), expected) } }
key
identifier_name
test_slice_transform.rs
extern crate rocksdb; use rocksdb::{DB, Options, SliceTransform}; #[test] pub fn test_slice_transform() { let path = "_rust_rocksdb_slicetransform_test"; let a1: Box<[u8]> = key(b"aaa1"); let a2: Box<[u8]> = key(b"aaa2"); let b1: Box<[u8]> = key(b"bbb1"); let b2: Box<[u8]> = key(b"bbb2"); fn first_three(k: &[u8]) -> Vec<u8> { k.iter().take(3).cloned().collect() } let prefix_extractor = SliceTransform::create("first_three", first_three, None); let mut opts = Options::default(); opts.create_if_missing(true); opts.set_prefix_extractor(prefix_extractor); let db = DB::open(&opts, path).unwrap(); assert!(db.put(&*a1, &*a1).is_ok()); assert!(db.put(&*a2, &*a2).is_ok()); assert!(db.put(&*b1, &*b1).is_ok()); assert!(db.put(&*b2, &*b2).is_ok()); fn cba(input: &Box<[u8]>) -> Box<[u8]> { input.iter().cloned().collect::<Vec<_>>().into_boxed_slice() }
fn key(k: &[u8]) -> Box<[u8]> { k.to_vec().into_boxed_slice() } { let expected = vec![(cba(&a1), cba(&a1)), (cba(&a2), cba(&a2))]; let a_iterator = db.prefix_iterator(b"aaa"); assert_eq!(a_iterator.collect::<Vec<_>>(), expected) } { let expected = vec![(cba(&b1), cba(&b1)), (cba(&b2), cba(&b2))]; let b_iterator = db.prefix_iterator(b"bbb"); assert_eq!(b_iterator.collect::<Vec<_>>(), expected) } }
random_line_split
named_lifetime_parameters.rs
#![allow(warnings)] // **Exercise 1.** Modify the signature of `get` to use an explicit // lifetime name in the return type. // // **Exercise 2.** Change the signature of `get` to: // // fn get<'a>(&'a mut self, key: &'a K) -> Option<&'a V> // // - Which test fails to compile? // - Can you explain why? // // **Exercise 3.** Rewrite `insert` to not just push. pub struct Map<K: Eq, V> { elements: Vec<(K, V)>, } impl<K: Eq, V> Map<K, V> { pub fn new() -> Self { Map { elements: vec![] } } pub fn insert(&mut self, key: K, value: V) { self.elements.push((key, value)); } pub fn get(&self, key: &K) -> Option<&V> { self.elements.iter().rev().find(|pair| pair.0 == *key).map(|pair| &pair.1) } } #[test] fn
() { let mut map = Map::new(); map.insert('a', format!("alpha")); map.insert('b', format!("beta")); map.insert('g', format!("gamma")); assert_eq!(map.get(&'a'), Some(&format!("alpha"))); assert_eq!(map.get(&'b'), Some(&format!("beta"))); assert_eq!(map.get(&'g'), Some(&format!("gamma"))); map.insert('a', format!("xxx")); assert_eq!(map.get(&'a'), Some(&format!("xxx"))); assert_eq!(map.get(&'b'), Some(&format!("beta"))); assert_eq!(map.get(&'g'), Some(&format!("gamma"))); } #[test] fn lock_receiver() { let mut map = Map::new(); let mut string = format!("alpha"); map.insert('a', string.clone()); let r = map.get(&'a'); assert_eq!(r, Some(&string)); }
basic
identifier_name
named_lifetime_parameters.rs
#![allow(warnings)] // **Exercise 1.** Modify the signature of `get` to use an explicit // lifetime name in the return type. // // **Exercise 2.** Change the signature of `get` to: // // fn get<'a>(&'a mut self, key: &'a K) -> Option<&'a V> // // - Which test fails to compile? // - Can you explain why? // // **Exercise 3.** Rewrite `insert` to not just push. pub struct Map<K: Eq, V> { elements: Vec<(K, V)>, } impl<K: Eq, V> Map<K, V> { pub fn new() -> Self { Map { elements: vec![] } } pub fn insert(&mut self, key: K, value: V) { self.elements.push((key, value)); } pub fn get(&self, key: &K) -> Option<&V> { self.elements.iter().rev().find(|pair| pair.0 == *key).map(|pair| &pair.1) } } #[test] fn basic() { let mut map = Map::new(); map.insert('a', format!("alpha")); map.insert('b', format!("beta")); map.insert('g', format!("gamma")); assert_eq!(map.get(&'a'), Some(&format!("alpha"))); assert_eq!(map.get(&'b'), Some(&format!("beta"))); assert_eq!(map.get(&'g'), Some(&format!("gamma"))); map.insert('a', format!("xxx")); assert_eq!(map.get(&'a'), Some(&format!("xxx"))); assert_eq!(map.get(&'b'), Some(&format!("beta"))); assert_eq!(map.get(&'g'), Some(&format!("gamma"))); } #[test] fn lock_receiver()
{ let mut map = Map::new(); let mut string = format!("alpha"); map.insert('a', string.clone()); let r = map.get(&'a'); assert_eq!(r, Some(&string)); }
identifier_body
named_lifetime_parameters.rs
#![allow(warnings)] // **Exercise 1.** Modify the signature of `get` to use an explicit // lifetime name in the return type. // // **Exercise 2.** Change the signature of `get` to: // // fn get<'a>(&'a mut self, key: &'a K) -> Option<&'a V> // // - Which test fails to compile? // - Can you explain why? // // **Exercise 3.** Rewrite `insert` to not just push. pub struct Map<K: Eq, V> { elements: Vec<(K, V)>, } impl<K: Eq, V> Map<K, V> { pub fn new() -> Self { Map { elements: vec![] } } pub fn insert(&mut self, key: K, value: V) { self.elements.push((key, value)); } pub fn get(&self, key: &K) -> Option<&V> { self.elements.iter().rev().find(|pair| pair.0 == *key).map(|pair| &pair.1) } } #[test] fn basic() { let mut map = Map::new(); map.insert('a', format!("alpha")); map.insert('b', format!("beta")); map.insert('g', format!("gamma")); assert_eq!(map.get(&'a'), Some(&format!("alpha"))); assert_eq!(map.get(&'b'), Some(&format!("beta"))); assert_eq!(map.get(&'g'), Some(&format!("gamma"))); map.insert('a', format!("xxx")); assert_eq!(map.get(&'a'), Some(&format!("xxx"))); assert_eq!(map.get(&'b'), Some(&format!("beta"))); assert_eq!(map.get(&'g'), Some(&format!("gamma"))); } #[test]
let r = map.get(&'a'); assert_eq!(r, Some(&string)); }
fn lock_receiver() { let mut map = Map::new(); let mut string = format!("alpha"); map.insert('a', string.clone());
random_line_split
mem.rs
sleep_ms(period_ms); if chan.send(ProfilerMsg::Print).is_err() { break; } } }); } // Always spawn the memory profiler. If there is no timer thread it won't receive regular // `Print` events, but it will still receive the other events. spawn_named("Memory profiler".to_owned(), move || { let mut mem_profiler = Profiler::new(port); mem_profiler.start(); }); let mem_profiler_chan = ProfilerChan(chan); // Register the system memory reporter, which will run on the memory profiler's own thread. // It never needs to be unregistered, because as long as the memory profiler is running the // system memory reporter can make measurements. let system_reporter = box SystemReporter; mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(), system_reporter)); mem_profiler_chan } pub fn new(port: Receiver<ProfilerMsg>) -> Profiler { Profiler { port: port, reporters: HashMap::new(), } } pub fn start(&mut self) { loop { match self.port.recv() { Ok(msg) => { if!self.handle_msg(msg) { break } } _ => break } } } fn handle_msg(&mut self, msg: ProfilerMsg) -> bool { match msg { ProfilerMsg::RegisterReporter(name, reporter) => { // Panic if it has already been registered. let name_clone = name.clone(); match self.reporters.insert(name, reporter) { None => true, Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use", name_clone)), } }, ProfilerMsg::UnregisterReporter(name) => { // Panic if it hasn't previously been registered. match self.reporters.remove(&name) { Some(_) => true, None => panic!(format!("UnregisterReporter: '{}' name is unknown", &name)), } }, ProfilerMsg::Print => { self.handle_print_msg(); true }, ProfilerMsg::Exit => false } } fn handle_print_msg(&self) { println!("Begin memory reports"); println!("|"); // Collect reports from memory reporters. // // This serializes the report-gathering. It might be worth creating a new scoped thread for // each reporter once we have enough of them. // // If anything goes wrong with a reporter, we just skip it. let mut forest = ReportsForest::new(); for reporter in self.reporters.values() { let (chan, port) = channel(); if reporter.collect_reports(ReportsChan(chan)) { if let Ok(reports) = port.recv() { for report in reports.iter() { forest.insert(&report.path, report.size); } } } } forest.print(); println!("|"); println!("End memory reports"); println!(""); } } /// A collection of one or more reports with the same initial path segment. A ReportsTree /// containing a single node is described as "degenerate". struct ReportsTree { /// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location. /// For interior nodes, this is the sum of the sizes of all its child nodes. size: usize, /// For leaf nodes, this is the count of all reports that mapped to this location. /// For interor nodes, this is always zero. count: u32, /// The segment from the report path that maps to this node. path_seg: String, /// Child nodes. children: Vec<ReportsTree>, } impl ReportsTree { fn new(path_seg: String) -> ReportsTree { ReportsTree { size: 0, count: 0, path_seg: path_seg, children: vec![] } } // Searches the tree's children for a path_seg match, and returns the index if there is a // match. fn find_child(&self, path_seg: &String) -> Option<usize> { for (i, child) in self.children.iter().enumerate() { if child.path_seg == *path_seg { return Some(i); } } None } // Insert the path and size into the tree, adding any nodes as necessary. fn insert(&mut self, path: &[String], size: usize) { let mut t: &mut ReportsTree = self; for path_seg in path.iter() { let i = match t.find_child(&path_seg) { Some(i) => i, None => { let new_t = ReportsTree::new(path_seg.clone()); t.children.push(new_t); t.children.len() - 1 }, }; let tmp = t; // this temporary is needed to satisfy the borrow checker t = &mut tmp.children[i]; } t.size += size; t.count += 1; } // Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once // all the reports have been inserted. fn compute_interior_node_sizes_and_sort(&mut self) -> usize { if!self.children.is_empty() { // Interior node. Derive its size from its children. if self.size!= 0 { // This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"]. panic!("one report's path is a sub-path of another report's path"); } for child in self.children.iter_mut() { self.size += child.compute_interior_node_sizes_and_sort(); } // Now that child sizes have been computed, we can sort the children. self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size)); } self.size } fn print(&self, depth: i32) { if!self.children.is_empty() { assert_eq!(self.count, 0); } let mut indent_str = String::new(); for _ in 0..depth { indent_str.push_str(" "); } let mebi = 1024f64 * 1024f64; let count_str = if self.count > 1 { format!(" {}", self.count) } else { "".to_owned() }; println!("|{}{:8.2} MiB -- {}{}", indent_str, (self.size as f64) / mebi, self.path_seg, count_str); for child in self.children.iter() { child.print(depth + 1); } } } /// A collection of ReportsTrees. It represents the data from multiple memory reports in a form /// that's good to print. struct ReportsForest { trees: HashMap<String, ReportsTree>, } impl ReportsForest { fn new() -> ReportsForest { ReportsForest { trees: HashMap::new(), } } // Insert the path and size into the forest, adding any trees and nodes as necessary. fn insert(&mut self, path: &[String], size: usize) { // Get the right tree, creating it if necessary. if!self.trees.contains_key(&path[0]) { self.trees.insert(path[0].clone(), ReportsTree::new(path[0].clone())); } let t = self.trees.get_mut(&path[0]).unwrap(); // Use tail() because the 0th path segment was used to find the right tree in the forest. t.insert(path.tail(), size); } fn print(&mut self) { // Fill in sizes of interior nodes, and recursively sort the sub-trees. for (_, tree) in self.trees.iter_mut() { tree.compute_interior_node_sizes_and_sort(); } // Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a // single node) come after non-degenerate trees. Secondary sort: alphabetical order of the // root node's path_seg. let mut v = vec![]; for (_, tree) in self.trees.iter() { v.push(tree); } v.sort_by(|a, b| { if a.children.is_empty() &&!b.children.is_empty() { Ordering::Greater } else if!a.children.is_empty() && b.children.is_empty() { Ordering::Less } else { a.path_seg.cmp(&b.path_seg) } }); // Print the forest. for tree in v.iter() { tree.print(0); // Print a blank line after non-degenerate trees. if!tree.children.is_empty() { println!("|"); } } } } //--------------------------------------------------------------------------- mod system_reporter { use libc::{c_char, c_int, c_void, size_t}; use profile_traits::mem::{Report, Reporter, ReportsChan}; use std::borrow::ToOwned; use std::ffi::CString; use std::mem::size_of; use std::ptr::null_mut; #[cfg(target_os="macos")] use task_info::task_basic_info::{virtual_size, resident_size}; /// Collects global measurements from the OS and heap allocators. pub struct SystemReporter; impl Reporter for SystemReporter { fn collect_reports(&self, reports_chan: ReportsChan) -> bool { let mut reports = vec![]; { let mut report = |path, size| { if let Some(size) = size { reports.push(Report { path: path, size: size }); } }; // Virtual and physical memory usage, as reported by the OS. report(path!["vsize"], get_vsize()); report(path!["resident"], get_resident()); // Memory segments, as reported by the OS. for seg in get_resident_segments().iter() { report(path!["resident-according-to-smaps", seg.0], Some(seg.1)); } // Total number of bytes allocated by the application on the system // heap. report(path!["system-heap-allocated"], get_system_heap_allocated()); // The descriptions of the following jemalloc measurements are taken // directly from the jemalloc documentation. // "Total number of bytes allocated by the application." report(path!["jemalloc-heap-allocated"], get_jemalloc_stat("stats.allocated")); // "Total number of bytes in active pages allocated by the application. // This is a multiple of the page size, and greater than or equal to // |stats.allocated|." report(path!["jemalloc-heap-active"], get_jemalloc_stat("stats.active")); // "Total number of bytes in chunks mapped on behalf of the application. // This is a multiple of the chunk size, and is at least as large as // |stats.active|. This does not include inactive chunks." report(path!["jemalloc-heap-mapped"], get_jemalloc_stat("stats.mapped")); } reports_chan.send(reports); true } } #[cfg(target_os="linux")] extern { fn mallinfo() -> struct_mallinfo; } #[cfg(target_os="linux")] #[repr(C)] pub struct struct_mallinfo { arena: c_int, ordblks: c_int, smblks: c_int, hblks: c_int, hblkhd: c_int, usmblks: c_int, fsmblks: c_int, uordblks: c_int, fordblks: c_int, keepcost: c_int, } #[cfg(target_os="linux")] fn get_system_heap_allocated() -> Option<usize> { let mut info: struct_mallinfo; unsafe { info = mallinfo(); } // The documentation in the glibc man page makes it sound like |uordblks| // would suffice, but that only gets the small allocations that are put in // the brk heap. We need |hblkhd| as well to get the larger allocations // that are mmapped. Some((info.hblkhd + info.uordblks) as usize) } #[cfg(not(target_os="linux"))] fn get_system_heap_allocated() -> Option<usize> { None } extern { fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t, newp: *mut c_void, newlen: size_t) -> c_int; } fn get_jemalloc_stat(value_name: &str) -> Option<usize> { // Before we request the measurement of interest, we first send an "epoch" // request. Without that jemalloc gives cached statistics(!) which can be // highly inaccurate. let epoch_name = "epoch"; let epoch_c_name = CString::new(epoch_name).unwrap(); let mut epoch: u64 = 0; let epoch_ptr = &mut epoch as *mut _ as *mut c_void; let mut epoch_len = size_of::<u64>() as size_t; let value_c_name = CString::new(value_name).unwrap(); let mut value: size_t = 0; let value_ptr = &mut value as *mut _ as *mut c_void; let mut value_len = size_of::<size_t>() as size_t; // Using the same values for the `old` and `new` parameters is enough // to get the statistics updated. let rv = unsafe { je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr, epoch_len) }; if rv!= 0 { return None; } let rv = unsafe { je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0) }; if rv!= 0
Some(value as usize) } // Like std::macros::try!, but for Option<>. macro_rules! option_try( ($e:expr) => (match $e { Some(e) => e, None => return None }) ); #[cfg(target_os="linux")] fn get_proc_self_statm_field(field: usize) -> Option<usize> { use std::fs::File; use std::io::Read; let mut f = option_try!(File::open("/proc/self/statm").ok()); let mut contents = String::new(); option_try!(f.read_to_string(&mut contents).ok()); let s = option_try!(contents.split_whitespace().nth(field)); let npages = option_try!(s.parse::<usize>().ok()); Some(npages * ::std::env::page_size()) } #[cfg(target_os="linux")] fn get_vsize() -> Option<usize> { get_proc_self_statm_field(0) } #[cfg(target_os="linux")] fn get_resident() -> Option<usize> { get_proc_self_statm_field(1) } #[cfg(target_os="macos")] fn get_vsize() -> Option<usize> { virtual_size() } #[cfg(target_os="macos")] fn get_resident() -> Option<usize> { resident_size() } #[cfg(not(any(target_os="linux", target_os = "macos")))] fn get_vsize() -> Option<usize> { None } #[cfg(not(any(target_os="linux", target_os = "macos")))] fn get_resident() -> Option<usize> { None } #[cfg(target_os="linux")] fn get_resident_segments() -> Vec<(String, usize)> { use regex::Regex; use std::collections::HashMap; use std::collections::hash_map::Entry; use std::fs::File; use std::io::{BufReader, BufRead}; // The first line of an entry in /proc/<pid>/smaps looks just like an entry // in /proc/<pid>/maps: // // address perms offset dev inode pathname // 02366000-025d8000 rw-p 00000000 00:00 0 [heap] // // Each of the following lines contains a key and a value, separated // by ": ", where the key does not contain either of those characters. // For example: // // Rss: 132 kB let f = match File::open("/proc/self/smaps") { Ok(f) => BufReader::new(f), Err(_) => return vec![], }; let seg_re = Regex::new( r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap(); let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap(); // We record each segment's resident size. let mut seg_map: HashMap<String, usize> = HashMap::new(); #[derive(PartialEq)] enum LookingFor { Segment, Rss } let mut looking_for = LookingFor::Segment; let mut curr_seg_name = String::new(); // Parse the file. for line in f.lines() { let line = match line { Ok(line) => line, Err(_) => continue, }; if looking_for == LookingFor::Segment { // Look for a segment info line. let cap = match seg_re.captures(&line) { Some(cap) => cap, None => continue, }; let perms = cap.at(1).unwrap(); let pathname = cap.at(2).unwrap(); // Construct the segment name from its pathname and permissions. curr_seg_name.clear(); if pathname == "" || pathname.starts_with("[stack:") { // Anonymous memory. Entries marked with "[stack:nnn]" // look like thread stacks but they may include other // anonymous mappings, so we can't trust them and just // treat them as entirely anonymous. curr_seg_name.push
{ return None; }
conditional_block
mem.rs
sleep_ms(period_ms); if chan.send(ProfilerMsg::Print).is_err() { break; } } }); } // Always spawn the memory profiler. If there is no timer thread it won't receive regular // `Print` events, but it will still receive the other events. spawn_named("Memory profiler".to_owned(), move || { let mut mem_profiler = Profiler::new(port); mem_profiler.start(); }); let mem_profiler_chan = ProfilerChan(chan); // Register the system memory reporter, which will run on the memory profiler's own thread. // It never needs to be unregistered, because as long as the memory profiler is running the // system memory reporter can make measurements. let system_reporter = box SystemReporter; mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(), system_reporter)); mem_profiler_chan } pub fn new(port: Receiver<ProfilerMsg>) -> Profiler { Profiler { port: port, reporters: HashMap::new(), } } pub fn start(&mut self) { loop { match self.port.recv() { Ok(msg) => { if!self.handle_msg(msg) { break } } _ => break } } } fn handle_msg(&mut self, msg: ProfilerMsg) -> bool { match msg { ProfilerMsg::RegisterReporter(name, reporter) => { // Panic if it has already been registered. let name_clone = name.clone(); match self.reporters.insert(name, reporter) { None => true, Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use", name_clone)), } }, ProfilerMsg::UnregisterReporter(name) => { // Panic if it hasn't previously been registered. match self.reporters.remove(&name) { Some(_) => true, None => panic!(format!("UnregisterReporter: '{}' name is unknown", &name)), } }, ProfilerMsg::Print => { self.handle_print_msg(); true }, ProfilerMsg::Exit => false } } fn handle_print_msg(&self) { println!("Begin memory reports"); println!("|"); // Collect reports from memory reporters. // // This serializes the report-gathering. It might be worth creating a new scoped thread for // each reporter once we have enough of them. // // If anything goes wrong with a reporter, we just skip it. let mut forest = ReportsForest::new(); for reporter in self.reporters.values() { let (chan, port) = channel(); if reporter.collect_reports(ReportsChan(chan)) { if let Ok(reports) = port.recv() { for report in reports.iter() { forest.insert(&report.path, report.size); } } } } forest.print(); println!("|"); println!("End memory reports"); println!(""); } } /// A collection of one or more reports with the same initial path segment. A ReportsTree /// containing a single node is described as "degenerate". struct ReportsTree { /// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location. /// For interior nodes, this is the sum of the sizes of all its child nodes. size: usize, /// For leaf nodes, this is the count of all reports that mapped to this location. /// For interor nodes, this is always zero. count: u32, /// The segment from the report path that maps to this node. path_seg: String, /// Child nodes. children: Vec<ReportsTree>, } impl ReportsTree { fn new(path_seg: String) -> ReportsTree { ReportsTree { size: 0, count: 0, path_seg: path_seg, children: vec![] } } // Searches the tree's children for a path_seg match, and returns the index if there is a // match. fn find_child(&self, path_seg: &String) -> Option<usize> { for (i, child) in self.children.iter().enumerate() { if child.path_seg == *path_seg { return Some(i); } } None } // Insert the path and size into the tree, adding any nodes as necessary. fn insert(&mut self, path: &[String], size: usize) { let mut t: &mut ReportsTree = self; for path_seg in path.iter() { let i = match t.find_child(&path_seg) { Some(i) => i, None => { let new_t = ReportsTree::new(path_seg.clone()); t.children.push(new_t); t.children.len() - 1 }, }; let tmp = t; // this temporary is needed to satisfy the borrow checker t = &mut tmp.children[i]; } t.size += size; t.count += 1; } // Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once // all the reports have been inserted. fn compute_interior_node_sizes_and_sort(&mut self) -> usize { if!self.children.is_empty() { // Interior node. Derive its size from its children. if self.size!= 0 { // This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"]. panic!("one report's path is a sub-path of another report's path"); } for child in self.children.iter_mut() { self.size += child.compute_interior_node_sizes_and_sort(); } // Now that child sizes have been computed, we can sort the children. self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size)); } self.size } fn print(&self, depth: i32) { if!self.children.is_empty() { assert_eq!(self.count, 0); } let mut indent_str = String::new(); for _ in 0..depth { indent_str.push_str(" "); } let mebi = 1024f64 * 1024f64; let count_str = if self.count > 1 { format!(" {}", self.count) } else { "".to_owned() }; println!("|{}{:8.2} MiB -- {}{}", indent_str, (self.size as f64) / mebi, self.path_seg, count_str); for child in self.children.iter() { child.print(depth + 1); } } } /// A collection of ReportsTrees. It represents the data from multiple memory reports in a form /// that's good to print. struct ReportsForest { trees: HashMap<String, ReportsTree>, } impl ReportsForest { fn new() -> ReportsForest { ReportsForest { trees: HashMap::new(), } } // Insert the path and size into the forest, adding any trees and nodes as necessary. fn insert(&mut self, path: &[String], size: usize)
fn print(&mut self) { // Fill in sizes of interior nodes, and recursively sort the sub-trees. for (_, tree) in self.trees.iter_mut() { tree.compute_interior_node_sizes_and_sort(); } // Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a // single node) come after non-degenerate trees. Secondary sort: alphabetical order of the // root node's path_seg. let mut v = vec![]; for (_, tree) in self.trees.iter() { v.push(tree); } v.sort_by(|a, b| { if a.children.is_empty() &&!b.children.is_empty() { Ordering::Greater } else if!a.children.is_empty() && b.children.is_empty() { Ordering::Less } else { a.path_seg.cmp(&b.path_seg) } }); // Print the forest. for tree in v.iter() { tree.print(0); // Print a blank line after non-degenerate trees. if!tree.children.is_empty() { println!("|"); } } } } //--------------------------------------------------------------------------- mod system_reporter { use libc::{c_char, c_int, c_void, size_t}; use profile_traits::mem::{Report, Reporter, ReportsChan}; use std::borrow::ToOwned; use std::ffi::CString; use std::mem::size_of; use std::ptr::null_mut; #[cfg(target_os="macos")] use task_info::task_basic_info::{virtual_size, resident_size}; /// Collects global measurements from the OS and heap allocators. pub struct SystemReporter; impl Reporter for SystemReporter { fn collect_reports(&self, reports_chan: ReportsChan) -> bool { let mut reports = vec![]; { let mut report = |path, size| { if let Some(size) = size { reports.push(Report { path: path, size: size }); } }; // Virtual and physical memory usage, as reported by the OS. report(path!["vsize"], get_vsize()); report(path!["resident"], get_resident()); // Memory segments, as reported by the OS. for seg in get_resident_segments().iter() { report(path!["resident-according-to-smaps", seg.0], Some(seg.1)); } // Total number of bytes allocated by the application on the system // heap. report(path!["system-heap-allocated"], get_system_heap_allocated()); // The descriptions of the following jemalloc measurements are taken // directly from the jemalloc documentation. // "Total number of bytes allocated by the application." report(path!["jemalloc-heap-allocated"], get_jemalloc_stat("stats.allocated")); // "Total number of bytes in active pages allocated by the application. // This is a multiple of the page size, and greater than or equal to // |stats.allocated|." report(path!["jemalloc-heap-active"], get_jemalloc_stat("stats.active")); // "Total number of bytes in chunks mapped on behalf of the application. // This is a multiple of the chunk size, and is at least as large as // |stats.active|. This does not include inactive chunks." report(path!["jemalloc-heap-mapped"], get_jemalloc_stat("stats.mapped")); } reports_chan.send(reports); true } } #[cfg(target_os="linux")] extern { fn mallinfo() -> struct_mallinfo; } #[cfg(target_os="linux")] #[repr(C)] pub struct struct_mallinfo { arena: c_int, ordblks: c_int, smblks: c_int, hblks: c_int, hblkhd: c_int, usmblks: c_int, fsmblks: c_int, uordblks: c_int, fordblks: c_int, keepcost: c_int, } #[cfg(target_os="linux")] fn get_system_heap_allocated() -> Option<usize> { let mut info: struct_mallinfo; unsafe { info = mallinfo(); } // The documentation in the glibc man page makes it sound like |uordblks| // would suffice, but that only gets the small allocations that are put in // the brk heap. We need |hblkhd| as well to get the larger allocations // that are mmapped. Some((info.hblkhd + info.uordblks) as usize) } #[cfg(not(target_os="linux"))] fn get_system_heap_allocated() -> Option<usize> { None } extern { fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t, newp: *mut c_void, newlen: size_t) -> c_int; } fn get_jemalloc_stat(value_name: &str) -> Option<usize> { // Before we request the measurement of interest, we first send an "epoch" // request. Without that jemalloc gives cached statistics(!) which can be // highly inaccurate. let epoch_name = "epoch"; let epoch_c_name = CString::new(epoch_name).unwrap(); let mut epoch: u64 = 0; let epoch_ptr = &mut epoch as *mut _ as *mut c_void; let mut epoch_len = size_of::<u64>() as size_t; let value_c_name = CString::new(value_name).unwrap(); let mut value: size_t = 0; let value_ptr = &mut value as *mut _ as *mut c_void; let mut value_len = size_of::<size_t>() as size_t; // Using the same values for the `old` and `new` parameters is enough // to get the statistics updated. let rv = unsafe { je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr, epoch_len) }; if rv!= 0 { return None; } let rv = unsafe { je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0) }; if rv!= 0 { return None; } Some(value as usize) } // Like std::macros::try!, but for Option<>. macro_rules! option_try( ($e:expr) => (match $e { Some(e) => e, None => return None }) ); #[cfg(target_os="linux")] fn get_proc_self_statm_field(field: usize) -> Option<usize> { use std::fs::File; use std::io::Read; let mut f = option_try!(File::open("/proc/self/statm").ok()); let mut contents = String::new(); option_try!(f.read_to_string(&mut contents).ok()); let s = option_try!(contents.split_whitespace().nth(field)); let npages = option_try!(s.parse::<usize>().ok()); Some(npages * ::std::env::page_size()) } #[cfg(target_os="linux")] fn get_vsize() -> Option<usize> { get_proc_self_statm_field(0) } #[cfg(target_os="linux")] fn get_resident() -> Option<usize> { get_proc_self_statm_field(1) } #[cfg(target_os="macos")] fn get_vsize() -> Option<usize> { virtual_size() } #[cfg(target_os="macos")] fn get_resident() -> Option<usize> { resident_size() } #[cfg(not(any(target_os="linux", target_os = "macos")))] fn get_vsize() -> Option<usize> { None } #[cfg(not(any(target_os="linux", target_os = "macos")))] fn get_resident() -> Option<usize> { None } #[cfg(target_os="linux")] fn get_resident_segments() -> Vec<(String, usize)> { use regex::Regex; use std::collections::HashMap; use std::collections::hash_map::Entry; use std::fs::File; use std::io::{BufReader, BufRead}; // The first line of an entry in /proc/<pid>/smaps looks just like an entry // in /proc/<pid>/maps: // // address perms offset dev inode pathname // 02366000-025d8000 rw-p 00000000 00:00 0 [heap] // // Each of the following lines contains a key and a value, separated // by ": ", where the key does not contain either of those characters. // For example: // // Rss: 132 kB let f = match File::open("/proc/self/smaps") { Ok(f) => BufReader::new(f), Err(_) => return vec![], }; let seg_re = Regex::new( r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap(); let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap(); // We record each segment's resident size. let mut seg_map: HashMap<String, usize> = HashMap::new(); #[derive(PartialEq)] enum LookingFor { Segment, Rss } let mut looking_for = LookingFor::Segment; let mut curr_seg_name = String::new(); // Parse the file. for line in f.lines() { let line = match line { Ok(line) => line, Err(_) => continue, }; if looking_for == LookingFor::Segment { // Look for a segment info line. let cap = match seg_re.captures(&line) { Some(cap) => cap, None => continue, }; let perms = cap.at(1).unwrap(); let pathname = cap.at(2).unwrap(); // Construct the segment name from its pathname and permissions. curr_seg_name.clear(); if pathname == "" || pathname.starts_with("[stack:") { // Anonymous memory. Entries marked with "[stack:nnn]" // look like thread stacks but they may include other // anonymous mappings, so we can't trust them and just // treat them as entirely anonymous. curr_seg_name.push
{ // Get the right tree, creating it if necessary. if !self.trees.contains_key(&path[0]) { self.trees.insert(path[0].clone(), ReportsTree::new(path[0].clone())); } let t = self.trees.get_mut(&path[0]).unwrap(); // Use tail() because the 0th path segment was used to find the right tree in the forest. t.insert(path.tail(), size); }
identifier_body
mem.rs
sleep_ms(period_ms); if chan.send(ProfilerMsg::Print).is_err() { break; } } }); } // Always spawn the memory profiler. If there is no timer thread it won't receive regular // `Print` events, but it will still receive the other events. spawn_named("Memory profiler".to_owned(), move || { let mut mem_profiler = Profiler::new(port); mem_profiler.start(); }); let mem_profiler_chan = ProfilerChan(chan); // Register the system memory reporter, which will run on the memory profiler's own thread. // It never needs to be unregistered, because as long as the memory profiler is running the // system memory reporter can make measurements. let system_reporter = box SystemReporter; mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(), system_reporter)); mem_profiler_chan } pub fn new(port: Receiver<ProfilerMsg>) -> Profiler { Profiler { port: port, reporters: HashMap::new(), } } pub fn start(&mut self) { loop { match self.port.recv() { Ok(msg) => { if!self.handle_msg(msg) { break } } _ => break } } } fn handle_msg(&mut self, msg: ProfilerMsg) -> bool { match msg { ProfilerMsg::RegisterReporter(name, reporter) => { // Panic if it has already been registered. let name_clone = name.clone(); match self.reporters.insert(name, reporter) { None => true, Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use", name_clone)), } }, ProfilerMsg::UnregisterReporter(name) => { // Panic if it hasn't previously been registered. match self.reporters.remove(&name) { Some(_) => true, None => panic!(format!("UnregisterReporter: '{}' name is unknown", &name)), } }, ProfilerMsg::Print => { self.handle_print_msg(); true }, ProfilerMsg::Exit => false } } fn handle_print_msg(&self) { println!("Begin memory reports"); println!("|"); // Collect reports from memory reporters. // // This serializes the report-gathering. It might be worth creating a new scoped thread for // each reporter once we have enough of them. // // If anything goes wrong with a reporter, we just skip it. let mut forest = ReportsForest::new(); for reporter in self.reporters.values() { let (chan, port) = channel(); if reporter.collect_reports(ReportsChan(chan)) { if let Ok(reports) = port.recv() { for report in reports.iter() { forest.insert(&report.path, report.size); } } } } forest.print(); println!("|"); println!("End memory reports"); println!(""); } } /// A collection of one or more reports with the same initial path segment. A ReportsTree /// containing a single node is described as "degenerate". struct ReportsTree { /// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location. /// For interior nodes, this is the sum of the sizes of all its child nodes. size: usize, /// For leaf nodes, this is the count of all reports that mapped to this location. /// For interor nodes, this is always zero. count: u32, /// The segment from the report path that maps to this node. path_seg: String, /// Child nodes. children: Vec<ReportsTree>, } impl ReportsTree { fn new(path_seg: String) -> ReportsTree { ReportsTree { size: 0, count: 0, path_seg: path_seg, children: vec![] } } // Searches the tree's children for a path_seg match, and returns the index if there is a // match. fn find_child(&self, path_seg: &String) -> Option<usize> { for (i, child) in self.children.iter().enumerate() { if child.path_seg == *path_seg { return Some(i); } } None } // Insert the path and size into the tree, adding any nodes as necessary. fn insert(&mut self, path: &[String], size: usize) { let mut t: &mut ReportsTree = self; for path_seg in path.iter() { let i = match t.find_child(&path_seg) { Some(i) => i, None => { let new_t = ReportsTree::new(path_seg.clone()); t.children.push(new_t); t.children.len() - 1 }, }; let tmp = t; // this temporary is needed to satisfy the borrow checker t = &mut tmp.children[i]; } t.size += size; t.count += 1; } // Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once // all the reports have been inserted. fn compute_interior_node_sizes_and_sort(&mut self) -> usize { if!self.children.is_empty() { // Interior node. Derive its size from its children. if self.size!= 0 { // This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"]. panic!("one report's path is a sub-path of another report's path"); } for child in self.children.iter_mut() { self.size += child.compute_interior_node_sizes_and_sort(); } // Now that child sizes have been computed, we can sort the children. self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size)); } self.size } fn
(&self, depth: i32) { if!self.children.is_empty() { assert_eq!(self.count, 0); } let mut indent_str = String::new(); for _ in 0..depth { indent_str.push_str(" "); } let mebi = 1024f64 * 1024f64; let count_str = if self.count > 1 { format!(" {}", self.count) } else { "".to_owned() }; println!("|{}{:8.2} MiB -- {}{}", indent_str, (self.size as f64) / mebi, self.path_seg, count_str); for child in self.children.iter() { child.print(depth + 1); } } } /// A collection of ReportsTrees. It represents the data from multiple memory reports in a form /// that's good to print. struct ReportsForest { trees: HashMap<String, ReportsTree>, } impl ReportsForest { fn new() -> ReportsForest { ReportsForest { trees: HashMap::new(), } } // Insert the path and size into the forest, adding any trees and nodes as necessary. fn insert(&mut self, path: &[String], size: usize) { // Get the right tree, creating it if necessary. if!self.trees.contains_key(&path[0]) { self.trees.insert(path[0].clone(), ReportsTree::new(path[0].clone())); } let t = self.trees.get_mut(&path[0]).unwrap(); // Use tail() because the 0th path segment was used to find the right tree in the forest. t.insert(path.tail(), size); } fn print(&mut self) { // Fill in sizes of interior nodes, and recursively sort the sub-trees. for (_, tree) in self.trees.iter_mut() { tree.compute_interior_node_sizes_and_sort(); } // Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a // single node) come after non-degenerate trees. Secondary sort: alphabetical order of the // root node's path_seg. let mut v = vec![]; for (_, tree) in self.trees.iter() { v.push(tree); } v.sort_by(|a, b| { if a.children.is_empty() &&!b.children.is_empty() { Ordering::Greater } else if!a.children.is_empty() && b.children.is_empty() { Ordering::Less } else { a.path_seg.cmp(&b.path_seg) } }); // Print the forest. for tree in v.iter() { tree.print(0); // Print a blank line after non-degenerate trees. if!tree.children.is_empty() { println!("|"); } } } } //--------------------------------------------------------------------------- mod system_reporter { use libc::{c_char, c_int, c_void, size_t}; use profile_traits::mem::{Report, Reporter, ReportsChan}; use std::borrow::ToOwned; use std::ffi::CString; use std::mem::size_of; use std::ptr::null_mut; #[cfg(target_os="macos")] use task_info::task_basic_info::{virtual_size, resident_size}; /// Collects global measurements from the OS and heap allocators. pub struct SystemReporter; impl Reporter for SystemReporter { fn collect_reports(&self, reports_chan: ReportsChan) -> bool { let mut reports = vec![]; { let mut report = |path, size| { if let Some(size) = size { reports.push(Report { path: path, size: size }); } }; // Virtual and physical memory usage, as reported by the OS. report(path!["vsize"], get_vsize()); report(path!["resident"], get_resident()); // Memory segments, as reported by the OS. for seg in get_resident_segments().iter() { report(path!["resident-according-to-smaps", seg.0], Some(seg.1)); } // Total number of bytes allocated by the application on the system // heap. report(path!["system-heap-allocated"], get_system_heap_allocated()); // The descriptions of the following jemalloc measurements are taken // directly from the jemalloc documentation. // "Total number of bytes allocated by the application." report(path!["jemalloc-heap-allocated"], get_jemalloc_stat("stats.allocated")); // "Total number of bytes in active pages allocated by the application. // This is a multiple of the page size, and greater than or equal to // |stats.allocated|." report(path!["jemalloc-heap-active"], get_jemalloc_stat("stats.active")); // "Total number of bytes in chunks mapped on behalf of the application. // This is a multiple of the chunk size, and is at least as large as // |stats.active|. This does not include inactive chunks." report(path!["jemalloc-heap-mapped"], get_jemalloc_stat("stats.mapped")); } reports_chan.send(reports); true } } #[cfg(target_os="linux")] extern { fn mallinfo() -> struct_mallinfo; } #[cfg(target_os="linux")] #[repr(C)] pub struct struct_mallinfo { arena: c_int, ordblks: c_int, smblks: c_int, hblks: c_int, hblkhd: c_int, usmblks: c_int, fsmblks: c_int, uordblks: c_int, fordblks: c_int, keepcost: c_int, } #[cfg(target_os="linux")] fn get_system_heap_allocated() -> Option<usize> { let mut info: struct_mallinfo; unsafe { info = mallinfo(); } // The documentation in the glibc man page makes it sound like |uordblks| // would suffice, but that only gets the small allocations that are put in // the brk heap. We need |hblkhd| as well to get the larger allocations // that are mmapped. Some((info.hblkhd + info.uordblks) as usize) } #[cfg(not(target_os="linux"))] fn get_system_heap_allocated() -> Option<usize> { None } extern { fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t, newp: *mut c_void, newlen: size_t) -> c_int; } fn get_jemalloc_stat(value_name: &str) -> Option<usize> { // Before we request the measurement of interest, we first send an "epoch" // request. Without that jemalloc gives cached statistics(!) which can be // highly inaccurate. let epoch_name = "epoch"; let epoch_c_name = CString::new(epoch_name).unwrap(); let mut epoch: u64 = 0; let epoch_ptr = &mut epoch as *mut _ as *mut c_void; let mut epoch_len = size_of::<u64>() as size_t; let value_c_name = CString::new(value_name).unwrap(); let mut value: size_t = 0; let value_ptr = &mut value as *mut _ as *mut c_void; let mut value_len = size_of::<size_t>() as size_t; // Using the same values for the `old` and `new` parameters is enough // to get the statistics updated. let rv = unsafe { je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr, epoch_len) }; if rv!= 0 { return None; } let rv = unsafe { je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0) }; if rv!= 0 { return None; } Some(value as usize) } // Like std::macros::try!, but for Option<>. macro_rules! option_try( ($e:expr) => (match $e { Some(e) => e, None => return None }) ); #[cfg(target_os="linux")] fn get_proc_self_statm_field(field: usize) -> Option<usize> { use std::fs::File; use std::io::Read; let mut f = option_try!(File::open("/proc/self/statm").ok()); let mut contents = String::new(); option_try!(f.read_to_string(&mut contents).ok()); let s = option_try!(contents.split_whitespace().nth(field)); let npages = option_try!(s.parse::<usize>().ok()); Some(npages * ::std::env::page_size()) } #[cfg(target_os="linux")] fn get_vsize() -> Option<usize> { get_proc_self_statm_field(0) } #[cfg(target_os="linux")] fn get_resident() -> Option<usize> { get_proc_self_statm_field(1) } #[cfg(target_os="macos")] fn get_vsize() -> Option<usize> { virtual_size() } #[cfg(target_os="macos")] fn get_resident() -> Option<usize> { resident_size() } #[cfg(not(any(target_os="linux", target_os = "macos")))] fn get_vsize() -> Option<usize> { None } #[cfg(not(any(target_os="linux", target_os = "macos")))] fn get_resident() -> Option<usize> { None } #[cfg(target_os="linux")] fn get_resident_segments() -> Vec<(String, usize)> { use regex::Regex; use std::collections::HashMap; use std::collections::hash_map::Entry; use std::fs::File; use std::io::{BufReader, BufRead}; // The first line of an entry in /proc/<pid>/smaps looks just like an entry // in /proc/<pid>/maps: // // address perms offset dev inode pathname // 02366000-025d8000 rw-p 00000000 00:00 0 [heap] // // Each of the following lines contains a key and a value, separated // by ": ", where the key does not contain either of those characters. // For example: // // Rss: 132 kB let f = match File::open("/proc/self/smaps") { Ok(f) => BufReader::new(f), Err(_) => return vec![], }; let seg_re = Regex::new( r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap(); let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap(); // We record each segment's resident size. let mut seg_map: HashMap<String, usize> = HashMap::new(); #[derive(PartialEq)] enum LookingFor { Segment, Rss } let mut looking_for = LookingFor::Segment; let mut curr_seg_name = String::new(); // Parse the file. for line in f.lines() { let line = match line { Ok(line) => line, Err(_) => continue, }; if looking_for == LookingFor::Segment { // Look for a segment info line. let cap = match seg_re.captures(&line) { Some(cap) => cap, None => continue, }; let perms = cap.at(1).unwrap(); let pathname = cap.at(2).unwrap(); // Construct the segment name from its pathname and permissions. curr_seg_name.clear(); if pathname == "" || pathname.starts_with("[stack:") { // Anonymous memory. Entries marked with "[stack:nnn]" // look like thread stacks but they may include other // anonymous mappings, so we can't trust them and just // treat them as entirely anonymous. curr_seg_name.push
print
identifier_name
mem.rs
sleep_ms(period_ms); if chan.send(ProfilerMsg::Print).is_err() { break; } } }); } // Always spawn the memory profiler. If there is no timer thread it won't receive regular // `Print` events, but it will still receive the other events. spawn_named("Memory profiler".to_owned(), move || { let mut mem_profiler = Profiler::new(port); mem_profiler.start(); }); let mem_profiler_chan = ProfilerChan(chan); // Register the system memory reporter, which will run on the memory profiler's own thread. // It never needs to be unregistered, because as long as the memory profiler is running the // system memory reporter can make measurements. let system_reporter = box SystemReporter; mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(), system_reporter)); mem_profiler_chan } pub fn new(port: Receiver<ProfilerMsg>) -> Profiler { Profiler { port: port, reporters: HashMap::new(), } } pub fn start(&mut self) { loop { match self.port.recv() { Ok(msg) => { if!self.handle_msg(msg) { break } } _ => break } } } fn handle_msg(&mut self, msg: ProfilerMsg) -> bool { match msg { ProfilerMsg::RegisterReporter(name, reporter) => { // Panic if it has already been registered. let name_clone = name.clone(); match self.reporters.insert(name, reporter) { None => true, Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use", name_clone)), } }, ProfilerMsg::UnregisterReporter(name) => { // Panic if it hasn't previously been registered. match self.reporters.remove(&name) { Some(_) => true, None => panic!(format!("UnregisterReporter: '{}' name is unknown", &name)), } }, ProfilerMsg::Print => { self.handle_print_msg(); true }, ProfilerMsg::Exit => false } } fn handle_print_msg(&self) { println!("Begin memory reports"); println!("|"); // Collect reports from memory reporters. // // This serializes the report-gathering. It might be worth creating a new scoped thread for // each reporter once we have enough of them. // // If anything goes wrong with a reporter, we just skip it. let mut forest = ReportsForest::new(); for reporter in self.reporters.values() { let (chan, port) = channel(); if reporter.collect_reports(ReportsChan(chan)) { if let Ok(reports) = port.recv() { for report in reports.iter() { forest.insert(&report.path, report.size); } } } } forest.print(); println!("|"); println!("End memory reports"); println!(""); } } /// A collection of one or more reports with the same initial path segment. A ReportsTree /// containing a single node is described as "degenerate". struct ReportsTree { /// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location. /// For interior nodes, this is the sum of the sizes of all its child nodes. size: usize, /// For leaf nodes, this is the count of all reports that mapped to this location. /// For interor nodes, this is always zero. count: u32, /// The segment from the report path that maps to this node. path_seg: String, /// Child nodes. children: Vec<ReportsTree>, } impl ReportsTree { fn new(path_seg: String) -> ReportsTree { ReportsTree { size: 0, count: 0, path_seg: path_seg, children: vec![] } } // Searches the tree's children for a path_seg match, and returns the index if there is a // match. fn find_child(&self, path_seg: &String) -> Option<usize> { for (i, child) in self.children.iter().enumerate() { if child.path_seg == *path_seg { return Some(i); } } None } // Insert the path and size into the tree, adding any nodes as necessary. fn insert(&mut self, path: &[String], size: usize) { let mut t: &mut ReportsTree = self; for path_seg in path.iter() { let i = match t.find_child(&path_seg) { Some(i) => i, None => { let new_t = ReportsTree::new(path_seg.clone()); t.children.push(new_t); t.children.len() - 1 }, }; let tmp = t; // this temporary is needed to satisfy the borrow checker t = &mut tmp.children[i]; } t.size += size; t.count += 1; } // Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once // all the reports have been inserted. fn compute_interior_node_sizes_and_sort(&mut self) -> usize { if!self.children.is_empty() { // Interior node. Derive its size from its children. if self.size!= 0 { // This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"]. panic!("one report's path is a sub-path of another report's path"); } for child in self.children.iter_mut() { self.size += child.compute_interior_node_sizes_and_sort(); } // Now that child sizes have been computed, we can sort the children. self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size)); } self.size } fn print(&self, depth: i32) { if!self.children.is_empty() { assert_eq!(self.count, 0); } let mut indent_str = String::new(); for _ in 0..depth { indent_str.push_str(" "); } let mebi = 1024f64 * 1024f64; let count_str = if self.count > 1 { format!(" {}", self.count) } else { "".to_owned() }; println!("|{}{:8.2} MiB -- {}{}", indent_str, (self.size as f64) / mebi, self.path_seg, count_str); for child in self.children.iter() { child.print(depth + 1); } } } /// A collection of ReportsTrees. It represents the data from multiple memory reports in a form /// that's good to print. struct ReportsForest { trees: HashMap<String, ReportsTree>, } impl ReportsForest { fn new() -> ReportsForest { ReportsForest { trees: HashMap::new(), } } // Insert the path and size into the forest, adding any trees and nodes as necessary. fn insert(&mut self, path: &[String], size: usize) { // Get the right tree, creating it if necessary. if!self.trees.contains_key(&path[0]) { self.trees.insert(path[0].clone(), ReportsTree::new(path[0].clone())); } let t = self.trees.get_mut(&path[0]).unwrap(); // Use tail() because the 0th path segment was used to find the right tree in the forest. t.insert(path.tail(), size); } fn print(&mut self) { // Fill in sizes of interior nodes, and recursively sort the sub-trees. for (_, tree) in self.trees.iter_mut() { tree.compute_interior_node_sizes_and_sort(); } // Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a // single node) come after non-degenerate trees. Secondary sort: alphabetical order of the // root node's path_seg. let mut v = vec![]; for (_, tree) in self.trees.iter() { v.push(tree); } v.sort_by(|a, b| { if a.children.is_empty() &&!b.children.is_empty() { Ordering::Greater } else if!a.children.is_empty() && b.children.is_empty() { Ordering::Less } else { a.path_seg.cmp(&b.path_seg) } }); // Print the forest. for tree in v.iter() { tree.print(0); // Print a blank line after non-degenerate trees. if!tree.children.is_empty() { println!("|"); } } } } //--------------------------------------------------------------------------- mod system_reporter { use libc::{c_char, c_int, c_void, size_t}; use profile_traits::mem::{Report, Reporter, ReportsChan}; use std::borrow::ToOwned; use std::ffi::CString; use std::mem::size_of; use std::ptr::null_mut; #[cfg(target_os="macos")] use task_info::task_basic_info::{virtual_size, resident_size}; /// Collects global measurements from the OS and heap allocators. pub struct SystemReporter; impl Reporter for SystemReporter { fn collect_reports(&self, reports_chan: ReportsChan) -> bool { let mut reports = vec![]; {
let mut report = |path, size| { if let Some(size) = size { reports.push(Report { path: path, size: size }); } }; // Virtual and physical memory usage, as reported by the OS. report(path!["vsize"], get_vsize()); report(path!["resident"], get_resident()); // Memory segments, as reported by the OS. for seg in get_resident_segments().iter() { report(path!["resident-according-to-smaps", seg.0], Some(seg.1)); } // Total number of bytes allocated by the application on the system // heap. report(path!["system-heap-allocated"], get_system_heap_allocated()); // The descriptions of the following jemalloc measurements are taken // directly from the jemalloc documentation. // "Total number of bytes allocated by the application." report(path!["jemalloc-heap-allocated"], get_jemalloc_stat("stats.allocated")); // "Total number of bytes in active pages allocated by the application. // This is a multiple of the page size, and greater than or equal to // |stats.allocated|." report(path!["jemalloc-heap-active"], get_jemalloc_stat("stats.active")); // "Total number of bytes in chunks mapped on behalf of the application. // This is a multiple of the chunk size, and is at least as large as // |stats.active|. This does not include inactive chunks." report(path!["jemalloc-heap-mapped"], get_jemalloc_stat("stats.mapped")); } reports_chan.send(reports); true } } #[cfg(target_os="linux")] extern { fn mallinfo() -> struct_mallinfo; } #[cfg(target_os="linux")] #[repr(C)] pub struct struct_mallinfo { arena: c_int, ordblks: c_int, smblks: c_int, hblks: c_int, hblkhd: c_int, usmblks: c_int, fsmblks: c_int, uordblks: c_int, fordblks: c_int, keepcost: c_int, } #[cfg(target_os="linux")] fn get_system_heap_allocated() -> Option<usize> { let mut info: struct_mallinfo; unsafe { info = mallinfo(); } // The documentation in the glibc man page makes it sound like |uordblks| // would suffice, but that only gets the small allocations that are put in // the brk heap. We need |hblkhd| as well to get the larger allocations // that are mmapped. Some((info.hblkhd + info.uordblks) as usize) } #[cfg(not(target_os="linux"))] fn get_system_heap_allocated() -> Option<usize> { None } extern { fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t, newp: *mut c_void, newlen: size_t) -> c_int; } fn get_jemalloc_stat(value_name: &str) -> Option<usize> { // Before we request the measurement of interest, we first send an "epoch" // request. Without that jemalloc gives cached statistics(!) which can be // highly inaccurate. let epoch_name = "epoch"; let epoch_c_name = CString::new(epoch_name).unwrap(); let mut epoch: u64 = 0; let epoch_ptr = &mut epoch as *mut _ as *mut c_void; let mut epoch_len = size_of::<u64>() as size_t; let value_c_name = CString::new(value_name).unwrap(); let mut value: size_t = 0; let value_ptr = &mut value as *mut _ as *mut c_void; let mut value_len = size_of::<size_t>() as size_t; // Using the same values for the `old` and `new` parameters is enough // to get the statistics updated. let rv = unsafe { je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr, epoch_len) }; if rv!= 0 { return None; } let rv = unsafe { je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0) }; if rv!= 0 { return None; } Some(value as usize) } // Like std::macros::try!, but for Option<>. macro_rules! option_try( ($e:expr) => (match $e { Some(e) => e, None => return None }) ); #[cfg(target_os="linux")] fn get_proc_self_statm_field(field: usize) -> Option<usize> { use std::fs::File; use std::io::Read; let mut f = option_try!(File::open("/proc/self/statm").ok()); let mut contents = String::new(); option_try!(f.read_to_string(&mut contents).ok()); let s = option_try!(contents.split_whitespace().nth(field)); let npages = option_try!(s.parse::<usize>().ok()); Some(npages * ::std::env::page_size()) } #[cfg(target_os="linux")] fn get_vsize() -> Option<usize> { get_proc_self_statm_field(0) } #[cfg(target_os="linux")] fn get_resident() -> Option<usize> { get_proc_self_statm_field(1) } #[cfg(target_os="macos")] fn get_vsize() -> Option<usize> { virtual_size() } #[cfg(target_os="macos")] fn get_resident() -> Option<usize> { resident_size() } #[cfg(not(any(target_os="linux", target_os = "macos")))] fn get_vsize() -> Option<usize> { None } #[cfg(not(any(target_os="linux", target_os = "macos")))] fn get_resident() -> Option<usize> { None } #[cfg(target_os="linux")] fn get_resident_segments() -> Vec<(String, usize)> { use regex::Regex; use std::collections::HashMap; use std::collections::hash_map::Entry; use std::fs::File; use std::io::{BufReader, BufRead}; // The first line of an entry in /proc/<pid>/smaps looks just like an entry // in /proc/<pid>/maps: // // address perms offset dev inode pathname // 02366000-025d8000 rw-p 00000000 00:00 0 [heap] // // Each of the following lines contains a key and a value, separated // by ": ", where the key does not contain either of those characters. // For example: // // Rss: 132 kB let f = match File::open("/proc/self/smaps") { Ok(f) => BufReader::new(f), Err(_) => return vec![], }; let seg_re = Regex::new( r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap(); let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap(); // We record each segment's resident size. let mut seg_map: HashMap<String, usize> = HashMap::new(); #[derive(PartialEq)] enum LookingFor { Segment, Rss } let mut looking_for = LookingFor::Segment; let mut curr_seg_name = String::new(); // Parse the file. for line in f.lines() { let line = match line { Ok(line) => line, Err(_) => continue, }; if looking_for == LookingFor::Segment { // Look for a segment info line. let cap = match seg_re.captures(&line) { Some(cap) => cap, None => continue, }; let perms = cap.at(1).unwrap(); let pathname = cap.at(2).unwrap(); // Construct the segment name from its pathname and permissions. curr_seg_name.clear(); if pathname == "" || pathname.starts_with("[stack:") { // Anonymous memory. Entries marked with "[stack:nnn]" // look like thread stacks but they may include other // anonymous mappings, so we can't trust them and just // treat them as entirely anonymous. curr_seg_name.push_
random_line_split
monomorphize.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use back::link::exported_name; use session; use llvm::ValueRef; use llvm; use middle::subst; use middle::subst::Subst; use trans::base::{set_llvm_fn_attrs, set_inline_hint}; use trans::base::{trans_enum_variant, push_ctxt, get_item_val}; use trans::base::{trans_fn, decl_internal_rust_fn}; use trans::base; use trans::common::*; use trans::foreign; use middle::ty::{mod, Ty}; use util::ppaux::Repr; use syntax::abi; use syntax::ast; use syntax::ast_map; use syntax::ast_util::{local_def, PostExpansionMethod}; use syntax::attr; use std::hash::{sip, Hash}; pub fn
<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_id: ast::DefId, psubsts: &subst::Substs<'tcx>, ref_id: Option<ast::NodeId>) -> (ValueRef, bool) { debug!("monomorphic_fn(\ fn_id={}, \ real_substs={}, \ ref_id={})", fn_id.repr(ccx.tcx()), psubsts.repr(ccx.tcx()), ref_id); assert!(psubsts.types.all(|t| { !ty::type_needs_infer(*t) &&!ty::type_has_params(*t) })); let _icx = push_ctxt("monomorphic_fn"); let hash_id = MonoId { def: fn_id, params: psubsts.types.clone() }; match ccx.monomorphized().borrow().get(&hash_id) { Some(&val) => { debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id)); return (val, false); } None => () } debug!("monomorphic_fn(\ fn_id={}, \ psubsts={}, \ hash_id={})", fn_id.repr(ccx.tcx()), psubsts.repr(ccx.tcx()), hash_id); let tpt = ty::lookup_item_type(ccx.tcx(), fn_id); let llitem_ty = tpt.ty; let map_node = session::expect( ccx.sess(), ccx.tcx().map.find(fn_id.node), || { format!("while monomorphizing {}, couldn't find it in \ the item map (may have attempted to monomorphize \ an item defined in a different crate?)", fn_id) }); match map_node { ast_map::NodeForeignItem(_) => { if ccx.tcx().map.get_foreign_abi(fn_id.node)!= abi::RustIntrinsic { // Foreign externs don't have to be monomorphized. return (get_item_val(ccx, fn_id.node), true); } } _ => {} } debug!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx())); let mono_ty = llitem_ty.subst(ccx.tcx(), psubsts); ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); let depth; { let mut monomorphizing = ccx.monomorphizing().borrow_mut(); depth = match monomorphizing.get(&fn_id) { Some(&d) => d, None => 0 }; // Random cut-off -- code that needs to instantiate the same function // recursively more than thirty times can probably safely be assumed // to be causing an infinite expansion. if depth > ccx.sess().recursion_limit.get() { ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node), "reached the recursion limit during monomorphization"); } monomorphizing.insert(fn_id, depth + 1); } let hash; let s = { let mut state = sip::SipState::new(); hash_id.hash(&mut state); mono_ty.hash(&mut state); hash = format!("h{}", state.result()); ccx.tcx().map.with_path(fn_id.node, |path| { exported_name(path, hash.as_slice()) }) }; debug!("monomorphize_fn mangled to {}", s); // This shouldn't need to option dance. let mut hash_id = Some(hash_id); let mk_lldecl = |abi: abi::Abi| { let lldecl = if abi!= abi::Rust { foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, s.as_slice()) } else { decl_internal_rust_fn(ccx, mono_ty, s.as_slice()) }; ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl); lldecl }; let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| { base::update_linkage(ccx, lldecl, None, base::OriginalTranslation); set_llvm_fn_attrs(ccx, attrs, lldecl); let is_first =!ccx.available_monomorphizations().borrow().contains(&s); if is_first { ccx.available_monomorphizations().borrow_mut().insert(s.clone()); } let trans_everywhere = attr::requests_inline(attrs); if trans_everywhere &&!is_first { llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage); } // If `true`, then `lldecl` should be given a function body. // Otherwise, it should be left as a declaration of an external // function, with no definition in the current compilation unit. trans_everywhere || is_first }; let lldecl = match map_node { ast_map::NodeItem(i) => { match *i { ast::Item { node: ast::ItemFn(ref decl, _, abi, _, ref body), .. } => { let d = mk_lldecl(abi); let needs_body = setup_lldecl(d, i.attrs.as_slice()); if needs_body { if abi!= abi::Rust { foreign::trans_rust_fn_with_foreign_abi( ccx, &**decl, &**body, &[], d, psubsts, fn_id.node, Some(hash.as_slice())); } else { trans_fn(ccx, &**decl, &**body, d, psubsts, fn_id.node, &[]); } } d } _ => { ccx.sess().bug("Can't monomorphize this kind of item") } } } ast_map::NodeVariant(v) => { let parent = ccx.tcx().map.get_parent(fn_id.node); let tvs = ty::enum_variants(ccx.tcx(), local_def(parent)); let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap(); let d = mk_lldecl(abi::Rust); set_inline_hint(d); match v.node.kind { ast::TupleVariantKind(ref args) => { trans_enum_variant(ccx, parent, &*v, args.as_slice(), this_tv.disr_val, psubsts, d); } ast::StructVariantKind(_) => ccx.sess().bug("can't monomorphize struct variants"), } d } ast_map::NodeImplItem(ii) => { match *ii { ast::MethodImplItem(ref mth) => { let d = mk_lldecl(abi::Rust); let needs_body = setup_lldecl(d, mth.attrs.as_slice()); if needs_body { trans_fn(ccx, mth.pe_fn_decl(), mth.pe_body(), d, psubsts, mth.id, &[]); } d } ast::TypeImplItem(_) => { ccx.sess().bug("can't monomorphize an associated type") } } } ast_map::NodeTraitItem(method) => { match *method { ast::ProvidedMethod(ref mth) => { let d = mk_lldecl(abi::Rust); let needs_body = setup_lldecl(d, mth.attrs.as_slice()); if needs_body { trans_fn(ccx, mth.pe_fn_decl(), mth.pe_body(), d, psubsts, mth.id, &[]); } d } _ => { ccx.sess().bug(format!("can't monomorphize a {}", map_node).as_slice()) } } } ast_map::NodeStructCtor(struct_def) => { let d = mk_lldecl(abi::Rust); set_inline_hint(d); base::trans_tuple_struct(ccx, struct_def.fields.as_slice(), struct_def.ctor_id.expect("ast-mapped tuple struct \ didn't have a ctor id"), psubsts, d); d } // Ugh -- but this ensures any new variants won't be forgotten ast_map::NodeForeignItem(..) | ast_map::NodeLifetime(..) | ast_map::NodeExpr(..) | ast_map::NodeStmt(..) | ast_map::NodeArg(..) | ast_map::NodeBlock(..) | ast_map::NodePat(..) | ast_map::NodeLocal(..) => { ccx.sess().bug(format!("can't monomorphize a {}", map_node).as_slice()) } }; ccx.monomorphizing().borrow_mut().insert(fn_id, depth); debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id)); (lldecl, true) } #[deriving(PartialEq, Eq, Hash, Show)] pub struct MonoId<'tcx> { pub def: ast::DefId, pub params: subst::VecPerParamSpace<Ty<'tcx>> }
monomorphic_fn
identifier_name
monomorphize.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use back::link::exported_name; use session; use llvm::ValueRef; use llvm; use middle::subst; use middle::subst::Subst; use trans::base::{set_llvm_fn_attrs, set_inline_hint}; use trans::base::{trans_enum_variant, push_ctxt, get_item_val}; use trans::base::{trans_fn, decl_internal_rust_fn}; use trans::base; use trans::common::*; use trans::foreign; use middle::ty::{mod, Ty}; use util::ppaux::Repr; use syntax::abi; use syntax::ast; use syntax::ast_map; use syntax::ast_util::{local_def, PostExpansionMethod}; use syntax::attr; use std::hash::{sip, Hash}; pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_id: ast::DefId, psubsts: &subst::Substs<'tcx>, ref_id: Option<ast::NodeId>) -> (ValueRef, bool)
match ccx.monomorphized().borrow().get(&hash_id) { Some(&val) => { debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id)); return (val, false); } None => () } debug!("monomorphic_fn(\ fn_id={}, \ psubsts={}, \ hash_id={})", fn_id.repr(ccx.tcx()), psubsts.repr(ccx.tcx()), hash_id); let tpt = ty::lookup_item_type(ccx.tcx(), fn_id); let llitem_ty = tpt.ty; let map_node = session::expect( ccx.sess(), ccx.tcx().map.find(fn_id.node), || { format!("while monomorphizing {}, couldn't find it in \ the item map (may have attempted to monomorphize \ an item defined in a different crate?)", fn_id) }); match map_node { ast_map::NodeForeignItem(_) => { if ccx.tcx().map.get_foreign_abi(fn_id.node)!= abi::RustIntrinsic { // Foreign externs don't have to be monomorphized. return (get_item_val(ccx, fn_id.node), true); } } _ => {} } debug!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx())); let mono_ty = llitem_ty.subst(ccx.tcx(), psubsts); ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); let depth; { let mut monomorphizing = ccx.monomorphizing().borrow_mut(); depth = match monomorphizing.get(&fn_id) { Some(&d) => d, None => 0 }; // Random cut-off -- code that needs to instantiate the same function // recursively more than thirty times can probably safely be assumed // to be causing an infinite expansion. if depth > ccx.sess().recursion_limit.get() { ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node), "reached the recursion limit during monomorphization"); } monomorphizing.insert(fn_id, depth + 1); } let hash; let s = { let mut state = sip::SipState::new(); hash_id.hash(&mut state); mono_ty.hash(&mut state); hash = format!("h{}", state.result()); ccx.tcx().map.with_path(fn_id.node, |path| { exported_name(path, hash.as_slice()) }) }; debug!("monomorphize_fn mangled to {}", s); // This shouldn't need to option dance. let mut hash_id = Some(hash_id); let mk_lldecl = |abi: abi::Abi| { let lldecl = if abi!= abi::Rust { foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, s.as_slice()) } else { decl_internal_rust_fn(ccx, mono_ty, s.as_slice()) }; ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl); lldecl }; let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| { base::update_linkage(ccx, lldecl, None, base::OriginalTranslation); set_llvm_fn_attrs(ccx, attrs, lldecl); let is_first =!ccx.available_monomorphizations().borrow().contains(&s); if is_first { ccx.available_monomorphizations().borrow_mut().insert(s.clone()); } let trans_everywhere = attr::requests_inline(attrs); if trans_everywhere &&!is_first { llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage); } // If `true`, then `lldecl` should be given a function body. // Otherwise, it should be left as a declaration of an external // function, with no definition in the current compilation unit. trans_everywhere || is_first }; let lldecl = match map_node { ast_map::NodeItem(i) => { match *i { ast::Item { node: ast::ItemFn(ref decl, _, abi, _, ref body), .. } => { let d = mk_lldecl(abi); let needs_body = setup_lldecl(d, i.attrs.as_slice()); if needs_body { if abi!= abi::Rust { foreign::trans_rust_fn_with_foreign_abi( ccx, &**decl, &**body, &[], d, psubsts, fn_id.node, Some(hash.as_slice())); } else { trans_fn(ccx, &**decl, &**body, d, psubsts, fn_id.node, &[]); } } d } _ => { ccx.sess().bug("Can't monomorphize this kind of item") } } } ast_map::NodeVariant(v) => { let parent = ccx.tcx().map.get_parent(fn_id.node); let tvs = ty::enum_variants(ccx.tcx(), local_def(parent)); let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap(); let d = mk_lldecl(abi::Rust); set_inline_hint(d); match v.node.kind { ast::TupleVariantKind(ref args) => { trans_enum_variant(ccx, parent, &*v, args.as_slice(), this_tv.disr_val, psubsts, d); } ast::StructVariantKind(_) => ccx.sess().bug("can't monomorphize struct variants"), } d } ast_map::NodeImplItem(ii) => { match *ii { ast::MethodImplItem(ref mth) => { let d = mk_lldecl(abi::Rust); let needs_body = setup_lldecl(d, mth.attrs.as_slice()); if needs_body { trans_fn(ccx, mth.pe_fn_decl(), mth.pe_body(), d, psubsts, mth.id, &[]); } d } ast::TypeImplItem(_) => { ccx.sess().bug("can't monomorphize an associated type") } } } ast_map::NodeTraitItem(method) => { match *method { ast::ProvidedMethod(ref mth) => { let d = mk_lldecl(abi::Rust); let needs_body = setup_lldecl(d, mth.attrs.as_slice()); if needs_body { trans_fn(ccx, mth.pe_fn_decl(), mth.pe_body(), d, psubsts, mth.id, &[]); } d } _ => { ccx.sess().bug(format!("can't monomorphize a {}", map_node).as_slice()) } } } ast_map::NodeStructCtor(struct_def) => { let d = mk_lldecl(abi::Rust); set_inline_hint(d); base::trans_tuple_struct(ccx, struct_def.fields.as_slice(), struct_def.ctor_id.expect("ast-mapped tuple struct \ didn't have a ctor id"), psubsts, d); d } // Ugh -- but this ensures any new variants won't be forgotten ast_map::NodeForeignItem(..) | ast_map::NodeLifetime(..) | ast_map::NodeExpr(..) | ast_map::NodeStmt(..) | ast_map::NodeArg(..) | ast_map::NodeBlock(..) | ast_map::NodePat(..) | ast_map::NodeLocal(..) => { ccx.sess().bug(format!("can't monomorphize a {}", map_node).as_slice()) } }; ccx.monomorphizing().borrow_mut().insert(fn_id, depth); debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id)); (lldecl, true) } #[deriving(PartialEq, Eq, Hash, Show)] pub struct MonoId<'tcx> { pub def: ast::DefId, pub params: subst::VecPerParamSpace<Ty<'tcx>> }
{ debug!("monomorphic_fn(\ fn_id={}, \ real_substs={}, \ ref_id={})", fn_id.repr(ccx.tcx()), psubsts.repr(ccx.tcx()), ref_id); assert!(psubsts.types.all(|t| { !ty::type_needs_infer(*t) && !ty::type_has_params(*t) })); let _icx = push_ctxt("monomorphic_fn"); let hash_id = MonoId { def: fn_id, params: psubsts.types.clone() };
identifier_body
monomorphize.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use back::link::exported_name; use session; use llvm::ValueRef; use llvm; use middle::subst; use middle::subst::Subst; use trans::base::{set_llvm_fn_attrs, set_inline_hint}; use trans::base::{trans_enum_variant, push_ctxt, get_item_val}; use trans::base::{trans_fn, decl_internal_rust_fn}; use trans::base; use trans::common::*; use trans::foreign; use middle::ty::{mod, Ty}; use util::ppaux::Repr; use syntax::abi; use syntax::ast; use syntax::ast_map; use syntax::ast_util::{local_def, PostExpansionMethod}; use syntax::attr; use std::hash::{sip, Hash}; pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_id: ast::DefId, psubsts: &subst::Substs<'tcx>, ref_id: Option<ast::NodeId>) -> (ValueRef, bool) { debug!("monomorphic_fn(\ fn_id={}, \ real_substs={}, \ ref_id={})", fn_id.repr(ccx.tcx()), psubsts.repr(ccx.tcx()), ref_id); assert!(psubsts.types.all(|t| { !ty::type_needs_infer(*t) &&!ty::type_has_params(*t) })); let _icx = push_ctxt("monomorphic_fn"); let hash_id = MonoId { def: fn_id, params: psubsts.types.clone() }; match ccx.monomorphized().borrow().get(&hash_id) { Some(&val) => { debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id)); return (val, false); } None => () } debug!("monomorphic_fn(\ fn_id={}, \ psubsts={}, \ hash_id={})", fn_id.repr(ccx.tcx()), psubsts.repr(ccx.tcx()), hash_id); let tpt = ty::lookup_item_type(ccx.tcx(), fn_id); let llitem_ty = tpt.ty; let map_node = session::expect( ccx.sess(), ccx.tcx().map.find(fn_id.node), || { format!("while monomorphizing {}, couldn't find it in \ the item map (may have attempted to monomorphize \ an item defined in a different crate?)", fn_id) }); match map_node { ast_map::NodeForeignItem(_) => { if ccx.tcx().map.get_foreign_abi(fn_id.node)!= abi::RustIntrinsic { // Foreign externs don't have to be monomorphized. return (get_item_val(ccx, fn_id.node), true); } } _ => {} } debug!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx())); let mono_ty = llitem_ty.subst(ccx.tcx(), psubsts); ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); let depth; { let mut monomorphizing = ccx.monomorphizing().borrow_mut(); depth = match monomorphizing.get(&fn_id) { Some(&d) => d, None => 0 }; // Random cut-off -- code that needs to instantiate the same function // recursively more than thirty times can probably safely be assumed // to be causing an infinite expansion. if depth > ccx.sess().recursion_limit.get() { ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node), "reached the recursion limit during monomorphization");
let hash; let s = { let mut state = sip::SipState::new(); hash_id.hash(&mut state); mono_ty.hash(&mut state); hash = format!("h{}", state.result()); ccx.tcx().map.with_path(fn_id.node, |path| { exported_name(path, hash.as_slice()) }) }; debug!("monomorphize_fn mangled to {}", s); // This shouldn't need to option dance. let mut hash_id = Some(hash_id); let mk_lldecl = |abi: abi::Abi| { let lldecl = if abi!= abi::Rust { foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, s.as_slice()) } else { decl_internal_rust_fn(ccx, mono_ty, s.as_slice()) }; ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl); lldecl }; let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| { base::update_linkage(ccx, lldecl, None, base::OriginalTranslation); set_llvm_fn_attrs(ccx, attrs, lldecl); let is_first =!ccx.available_monomorphizations().borrow().contains(&s); if is_first { ccx.available_monomorphizations().borrow_mut().insert(s.clone()); } let trans_everywhere = attr::requests_inline(attrs); if trans_everywhere &&!is_first { llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage); } // If `true`, then `lldecl` should be given a function body. // Otherwise, it should be left as a declaration of an external // function, with no definition in the current compilation unit. trans_everywhere || is_first }; let lldecl = match map_node { ast_map::NodeItem(i) => { match *i { ast::Item { node: ast::ItemFn(ref decl, _, abi, _, ref body), .. } => { let d = mk_lldecl(abi); let needs_body = setup_lldecl(d, i.attrs.as_slice()); if needs_body { if abi!= abi::Rust { foreign::trans_rust_fn_with_foreign_abi( ccx, &**decl, &**body, &[], d, psubsts, fn_id.node, Some(hash.as_slice())); } else { trans_fn(ccx, &**decl, &**body, d, psubsts, fn_id.node, &[]); } } d } _ => { ccx.sess().bug("Can't monomorphize this kind of item") } } } ast_map::NodeVariant(v) => { let parent = ccx.tcx().map.get_parent(fn_id.node); let tvs = ty::enum_variants(ccx.tcx(), local_def(parent)); let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap(); let d = mk_lldecl(abi::Rust); set_inline_hint(d); match v.node.kind { ast::TupleVariantKind(ref args) => { trans_enum_variant(ccx, parent, &*v, args.as_slice(), this_tv.disr_val, psubsts, d); } ast::StructVariantKind(_) => ccx.sess().bug("can't monomorphize struct variants"), } d } ast_map::NodeImplItem(ii) => { match *ii { ast::MethodImplItem(ref mth) => { let d = mk_lldecl(abi::Rust); let needs_body = setup_lldecl(d, mth.attrs.as_slice()); if needs_body { trans_fn(ccx, mth.pe_fn_decl(), mth.pe_body(), d, psubsts, mth.id, &[]); } d } ast::TypeImplItem(_) => { ccx.sess().bug("can't monomorphize an associated type") } } } ast_map::NodeTraitItem(method) => { match *method { ast::ProvidedMethod(ref mth) => { let d = mk_lldecl(abi::Rust); let needs_body = setup_lldecl(d, mth.attrs.as_slice()); if needs_body { trans_fn(ccx, mth.pe_fn_decl(), mth.pe_body(), d, psubsts, mth.id, &[]); } d } _ => { ccx.sess().bug(format!("can't monomorphize a {}", map_node).as_slice()) } } } ast_map::NodeStructCtor(struct_def) => { let d = mk_lldecl(abi::Rust); set_inline_hint(d); base::trans_tuple_struct(ccx, struct_def.fields.as_slice(), struct_def.ctor_id.expect("ast-mapped tuple struct \ didn't have a ctor id"), psubsts, d); d } // Ugh -- but this ensures any new variants won't be forgotten ast_map::NodeForeignItem(..) | ast_map::NodeLifetime(..) | ast_map::NodeExpr(..) | ast_map::NodeStmt(..) | ast_map::NodeArg(..) | ast_map::NodeBlock(..) | ast_map::NodePat(..) | ast_map::NodeLocal(..) => { ccx.sess().bug(format!("can't monomorphize a {}", map_node).as_slice()) } }; ccx.monomorphizing().borrow_mut().insert(fn_id, depth); debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id)); (lldecl, true) } #[deriving(PartialEq, Eq, Hash, Show)] pub struct MonoId<'tcx> { pub def: ast::DefId, pub params: subst::VecPerParamSpace<Ty<'tcx>> }
} monomorphizing.insert(fn_id, depth + 1); }
random_line_split
monomorphize.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use back::link::exported_name; use session; use llvm::ValueRef; use llvm; use middle::subst; use middle::subst::Subst; use trans::base::{set_llvm_fn_attrs, set_inline_hint}; use trans::base::{trans_enum_variant, push_ctxt, get_item_val}; use trans::base::{trans_fn, decl_internal_rust_fn}; use trans::base; use trans::common::*; use trans::foreign; use middle::ty::{mod, Ty}; use util::ppaux::Repr; use syntax::abi; use syntax::ast; use syntax::ast_map; use syntax::ast_util::{local_def, PostExpansionMethod}; use syntax::attr; use std::hash::{sip, Hash}; pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_id: ast::DefId, psubsts: &subst::Substs<'tcx>, ref_id: Option<ast::NodeId>) -> (ValueRef, bool) { debug!("monomorphic_fn(\ fn_id={}, \ real_substs={}, \ ref_id={})", fn_id.repr(ccx.tcx()), psubsts.repr(ccx.tcx()), ref_id); assert!(psubsts.types.all(|t| { !ty::type_needs_infer(*t) &&!ty::type_has_params(*t) })); let _icx = push_ctxt("monomorphic_fn"); let hash_id = MonoId { def: fn_id, params: psubsts.types.clone() }; match ccx.monomorphized().borrow().get(&hash_id) { Some(&val) => { debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id)); return (val, false); } None => () } debug!("monomorphic_fn(\ fn_id={}, \ psubsts={}, \ hash_id={})", fn_id.repr(ccx.tcx()), psubsts.repr(ccx.tcx()), hash_id); let tpt = ty::lookup_item_type(ccx.tcx(), fn_id); let llitem_ty = tpt.ty; let map_node = session::expect( ccx.sess(), ccx.tcx().map.find(fn_id.node), || { format!("while monomorphizing {}, couldn't find it in \ the item map (may have attempted to monomorphize \ an item defined in a different crate?)", fn_id) }); match map_node { ast_map::NodeForeignItem(_) => { if ccx.tcx().map.get_foreign_abi(fn_id.node)!= abi::RustIntrinsic { // Foreign externs don't have to be monomorphized. return (get_item_val(ccx, fn_id.node), true); } } _ =>
} debug!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx())); let mono_ty = llitem_ty.subst(ccx.tcx(), psubsts); ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); let depth; { let mut monomorphizing = ccx.monomorphizing().borrow_mut(); depth = match monomorphizing.get(&fn_id) { Some(&d) => d, None => 0 }; // Random cut-off -- code that needs to instantiate the same function // recursively more than thirty times can probably safely be assumed // to be causing an infinite expansion. if depth > ccx.sess().recursion_limit.get() { ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node), "reached the recursion limit during monomorphization"); } monomorphizing.insert(fn_id, depth + 1); } let hash; let s = { let mut state = sip::SipState::new(); hash_id.hash(&mut state); mono_ty.hash(&mut state); hash = format!("h{}", state.result()); ccx.tcx().map.with_path(fn_id.node, |path| { exported_name(path, hash.as_slice()) }) }; debug!("monomorphize_fn mangled to {}", s); // This shouldn't need to option dance. let mut hash_id = Some(hash_id); let mk_lldecl = |abi: abi::Abi| { let lldecl = if abi!= abi::Rust { foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, s.as_slice()) } else { decl_internal_rust_fn(ccx, mono_ty, s.as_slice()) }; ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl); lldecl }; let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| { base::update_linkage(ccx, lldecl, None, base::OriginalTranslation); set_llvm_fn_attrs(ccx, attrs, lldecl); let is_first =!ccx.available_monomorphizations().borrow().contains(&s); if is_first { ccx.available_monomorphizations().borrow_mut().insert(s.clone()); } let trans_everywhere = attr::requests_inline(attrs); if trans_everywhere &&!is_first { llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage); } // If `true`, then `lldecl` should be given a function body. // Otherwise, it should be left as a declaration of an external // function, with no definition in the current compilation unit. trans_everywhere || is_first }; let lldecl = match map_node { ast_map::NodeItem(i) => { match *i { ast::Item { node: ast::ItemFn(ref decl, _, abi, _, ref body), .. } => { let d = mk_lldecl(abi); let needs_body = setup_lldecl(d, i.attrs.as_slice()); if needs_body { if abi!= abi::Rust { foreign::trans_rust_fn_with_foreign_abi( ccx, &**decl, &**body, &[], d, psubsts, fn_id.node, Some(hash.as_slice())); } else { trans_fn(ccx, &**decl, &**body, d, psubsts, fn_id.node, &[]); } } d } _ => { ccx.sess().bug("Can't monomorphize this kind of item") } } } ast_map::NodeVariant(v) => { let parent = ccx.tcx().map.get_parent(fn_id.node); let tvs = ty::enum_variants(ccx.tcx(), local_def(parent)); let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap(); let d = mk_lldecl(abi::Rust); set_inline_hint(d); match v.node.kind { ast::TupleVariantKind(ref args) => { trans_enum_variant(ccx, parent, &*v, args.as_slice(), this_tv.disr_val, psubsts, d); } ast::StructVariantKind(_) => ccx.sess().bug("can't monomorphize struct variants"), } d } ast_map::NodeImplItem(ii) => { match *ii { ast::MethodImplItem(ref mth) => { let d = mk_lldecl(abi::Rust); let needs_body = setup_lldecl(d, mth.attrs.as_slice()); if needs_body { trans_fn(ccx, mth.pe_fn_decl(), mth.pe_body(), d, psubsts, mth.id, &[]); } d } ast::TypeImplItem(_) => { ccx.sess().bug("can't monomorphize an associated type") } } } ast_map::NodeTraitItem(method) => { match *method { ast::ProvidedMethod(ref mth) => { let d = mk_lldecl(abi::Rust); let needs_body = setup_lldecl(d, mth.attrs.as_slice()); if needs_body { trans_fn(ccx, mth.pe_fn_decl(), mth.pe_body(), d, psubsts, mth.id, &[]); } d } _ => { ccx.sess().bug(format!("can't monomorphize a {}", map_node).as_slice()) } } } ast_map::NodeStructCtor(struct_def) => { let d = mk_lldecl(abi::Rust); set_inline_hint(d); base::trans_tuple_struct(ccx, struct_def.fields.as_slice(), struct_def.ctor_id.expect("ast-mapped tuple struct \ didn't have a ctor id"), psubsts, d); d } // Ugh -- but this ensures any new variants won't be forgotten ast_map::NodeForeignItem(..) | ast_map::NodeLifetime(..) | ast_map::NodeExpr(..) | ast_map::NodeStmt(..) | ast_map::NodeArg(..) | ast_map::NodeBlock(..) | ast_map::NodePat(..) | ast_map::NodeLocal(..) => { ccx.sess().bug(format!("can't monomorphize a {}", map_node).as_slice()) } }; ccx.monomorphizing().borrow_mut().insert(fn_id, depth); debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id)); (lldecl, true) } #[deriving(PartialEq, Eq, Hash, Show)] pub struct MonoId<'tcx> { pub def: ast::DefId, pub params: subst::VecPerParamSpace<Ty<'tcx>> }
{}
conditional_block
rom.rs
use std::io::Write; use std::fs::File; #[allow(dead_code)] pub struct INesFile { magic: [u8; 4], has_trainer: bool, pub mapper: u8, pub prg_rom_cnt: u8, prg_rom_size: u32, chr_rom_size: u32, prg_ram_size: u32, flags6: u8, flags7: u8, flags9: u8, flags10: u8, zeros: [u8; 5], trainer: [u8; 0x200], pub prg_rom: Vec<[u8; 0x4000]>, // TODO make this a Vec of [u8; 0x4000] (Vector of 16kB pages) chr_rom: Vec<u8>, pc_inst_rom: Vec<u8>, pc_prom: Vec<u8>, title: Vec<u8>, } impl INesFile { pub fn
(bin: Vec<u8>) -> INesFile { let header = &bin[0..16]; let m = [ header[0], header[1], header[2], header[3] ]; if m[0]!= 'N' as u8 || m[1]!= 'E' as u8 || m[2]!= 'S' as u8 || m[3]!= 0x1A { panic!("Invalid ROM!"); } let prg_rom_cnt = header[4]; let prg_rom_size = prg_rom_cnt as u32 * 16384; let chr_rom_cnt = header[5]; let chr_rom_size = chr_rom_cnt as u32 * 8192; let flags6 = header[6]; let flags7 = header[7]; let prg_ram_size = header[8] as u32 * 8192; let flags9 = header[9]; let flags10 = header[10]; let zeros = [ header[11], header[12], header[13], header[14], header[15]]; let mapper = (flags7 & 0xF0) | ((flags6 & 0xF0) >> 4); let mut pos: usize = 16; let has_trainer: bool; let mut trainer = [0u8; 0x200]; let mut prg_rom: Vec<[u8; 0x4000]> = Vec::new(); if (flags6 & 0x04) > 0 { has_trainer = true; for i in 0..0x200 { trainer[i] = bin[pos]; pos = pos + 1; } } else { has_trainer = false; } for _i in 0..prg_rom_cnt { let mut page = [0u8; 0x4000]; for j in 0..0x4000 { page[j] = bin[pos]; pos = pos + 1; } prg_rom.push(page); } INesFile { magic: m, has_trainer: has_trainer, mapper: mapper, prg_rom_size: prg_rom_size, prg_rom_cnt: prg_rom_cnt, chr_rom_size: chr_rom_size, flags6: flags6, flags7: flags7, prg_ram_size: prg_ram_size, flags9: flags9, flags10: flags10, zeros: zeros, trainer: trainer, prg_rom: prg_rom, //TODO finish initializing these properly chr_rom: vec![0], pc_inst_rom: vec![0], pc_prom: vec![0], title: vec![0], } } #[allow(dead_code)] pub fn info(&self) { println!("has_trainer: {}", self.has_trainer); println!("mapper: {}", self.mapper); println!("prg_rom_size: {}", self.prg_rom_size); println!("chr_rom_size: {}", self.chr_rom_size); println!("prg_ram_size: {}", self.prg_ram_size); println!("flags6: 0x{:x}", self.flags6); println!("flags7: 0x{:x}", self.flags7); println!("flags9: 0x{:x}", self.flags9); println!("flags10: 0x{:x}", self.flags10); println!("has_trainer: {}", self.has_trainer); //println!("prg_rom: {:?}", self.prg_rom); } #[allow(dead_code)] pub fn dump_prg_rom(&self) { let mut f = File::create("prg0.rom").unwrap(); let buf = &self.prg_rom[0][..]; f.write_all(buf).expect("failed to write to file"); } }
load
identifier_name
rom.rs
use std::io::Write; use std::fs::File; #[allow(dead_code)] pub struct INesFile { magic: [u8; 4], has_trainer: bool, pub mapper: u8, pub prg_rom_cnt: u8, prg_rom_size: u32, chr_rom_size: u32, prg_ram_size: u32, flags6: u8, flags7: u8, flags9: u8, flags10: u8, zeros: [u8; 5], trainer: [u8; 0x200], pub prg_rom: Vec<[u8; 0x4000]>, // TODO make this a Vec of [u8; 0x4000] (Vector of 16kB pages) chr_rom: Vec<u8>, pc_inst_rom: Vec<u8>, pc_prom: Vec<u8>, title: Vec<u8>, } impl INesFile { pub fn load(bin: Vec<u8>) -> INesFile { let header = &bin[0..16]; let m = [ header[0], header[1], header[2], header[3] ]; if m[0]!= 'N' as u8 || m[1]!= 'E' as u8 || m[2]!= 'S' as u8 || m[3]!= 0x1A { panic!("Invalid ROM!"); } let prg_rom_cnt = header[4]; let prg_rom_size = prg_rom_cnt as u32 * 16384; let chr_rom_cnt = header[5]; let chr_rom_size = chr_rom_cnt as u32 * 8192; let flags6 = header[6]; let flags7 = header[7]; let prg_ram_size = header[8] as u32 * 8192; let flags9 = header[9]; let flags10 = header[10]; let zeros = [ header[11], header[12], header[13], header[14], header[15]]; let mapper = (flags7 & 0xF0) | ((flags6 & 0xF0) >> 4); let mut pos: usize = 16; let has_trainer: bool; let mut trainer = [0u8; 0x200]; let mut prg_rom: Vec<[u8; 0x4000]> = Vec::new(); if (flags6 & 0x04) > 0 { has_trainer = true; for i in 0..0x200 { trainer[i] = bin[pos]; pos = pos + 1; } } else { has_trainer = false; } for _i in 0..prg_rom_cnt { let mut page = [0u8; 0x4000]; for j in 0..0x4000 { page[j] = bin[pos]; pos = pos + 1; } prg_rom.push(page); } INesFile { magic: m, has_trainer: has_trainer, mapper: mapper, prg_rom_size: prg_rom_size, prg_rom_cnt: prg_rom_cnt, chr_rom_size: chr_rom_size, flags6: flags6, flags7: flags7, prg_ram_size: prg_ram_size, flags9: flags9, flags10: flags10, zeros: zeros, trainer: trainer, prg_rom: prg_rom, //TODO finish initializing these properly chr_rom: vec![0], pc_inst_rom: vec![0], pc_prom: vec![0], title: vec![0], } } #[allow(dead_code)] pub fn info(&self) { println!("has_trainer: {}", self.has_trainer); println!("mapper: {}", self.mapper); println!("prg_rom_size: {}", self.prg_rom_size); println!("chr_rom_size: {}", self.chr_rom_size); println!("prg_ram_size: {}", self.prg_ram_size); println!("flags6: 0x{:x}", self.flags6); println!("flags7: 0x{:x}", self.flags7); println!("flags9: 0x{:x}", self.flags9); println!("flags10: 0x{:x}", self.flags10); println!("has_trainer: {}", self.has_trainer); //println!("prg_rom: {:?}", self.prg_rom); } #[allow(dead_code)] pub fn dump_prg_rom(&self) { let mut f = File::create("prg0.rom").unwrap();
f.write_all(buf).expect("failed to write to file"); } }
let buf = &self.prg_rom[0][..];
random_line_split
rom.rs
use std::io::Write; use std::fs::File; #[allow(dead_code)] pub struct INesFile { magic: [u8; 4], has_trainer: bool, pub mapper: u8, pub prg_rom_cnt: u8, prg_rom_size: u32, chr_rom_size: u32, prg_ram_size: u32, flags6: u8, flags7: u8, flags9: u8, flags10: u8, zeros: [u8; 5], trainer: [u8; 0x200], pub prg_rom: Vec<[u8; 0x4000]>, // TODO make this a Vec of [u8; 0x4000] (Vector of 16kB pages) chr_rom: Vec<u8>, pc_inst_rom: Vec<u8>, pc_prom: Vec<u8>, title: Vec<u8>, } impl INesFile { pub fn load(bin: Vec<u8>) -> INesFile { let header = &bin[0..16]; let m = [ header[0], header[1], header[2], header[3] ]; if m[0]!= 'N' as u8 || m[1]!= 'E' as u8 || m[2]!= 'S' as u8 || m[3]!= 0x1A { panic!("Invalid ROM!"); } let prg_rom_cnt = header[4]; let prg_rom_size = prg_rom_cnt as u32 * 16384; let chr_rom_cnt = header[5]; let chr_rom_size = chr_rom_cnt as u32 * 8192; let flags6 = header[6]; let flags7 = header[7]; let prg_ram_size = header[8] as u32 * 8192; let flags9 = header[9]; let flags10 = header[10]; let zeros = [ header[11], header[12], header[13], header[14], header[15]]; let mapper = (flags7 & 0xF0) | ((flags6 & 0xF0) >> 4); let mut pos: usize = 16; let has_trainer: bool; let mut trainer = [0u8; 0x200]; let mut prg_rom: Vec<[u8; 0x4000]> = Vec::new(); if (flags6 & 0x04) > 0
else { has_trainer = false; } for _i in 0..prg_rom_cnt { let mut page = [0u8; 0x4000]; for j in 0..0x4000 { page[j] = bin[pos]; pos = pos + 1; } prg_rom.push(page); } INesFile { magic: m, has_trainer: has_trainer, mapper: mapper, prg_rom_size: prg_rom_size, prg_rom_cnt: prg_rom_cnt, chr_rom_size: chr_rom_size, flags6: flags6, flags7: flags7, prg_ram_size: prg_ram_size, flags9: flags9, flags10: flags10, zeros: zeros, trainer: trainer, prg_rom: prg_rom, //TODO finish initializing these properly chr_rom: vec![0], pc_inst_rom: vec![0], pc_prom: vec![0], title: vec![0], } } #[allow(dead_code)] pub fn info(&self) { println!("has_trainer: {}", self.has_trainer); println!("mapper: {}", self.mapper); println!("prg_rom_size: {}", self.prg_rom_size); println!("chr_rom_size: {}", self.chr_rom_size); println!("prg_ram_size: {}", self.prg_ram_size); println!("flags6: 0x{:x}", self.flags6); println!("flags7: 0x{:x}", self.flags7); println!("flags9: 0x{:x}", self.flags9); println!("flags10: 0x{:x}", self.flags10); println!("has_trainer: {}", self.has_trainer); //println!("prg_rom: {:?}", self.prg_rom); } #[allow(dead_code)] pub fn dump_prg_rom(&self) { let mut f = File::create("prg0.rom").unwrap(); let buf = &self.prg_rom[0][..]; f.write_all(buf).expect("failed to write to file"); } }
{ has_trainer = true; for i in 0..0x200 { trainer[i] = bin[pos]; pos = pos + 1; } }
conditional_block
rom.rs
use std::io::Write; use std::fs::File; #[allow(dead_code)] pub struct INesFile { magic: [u8; 4], has_trainer: bool, pub mapper: u8, pub prg_rom_cnt: u8, prg_rom_size: u32, chr_rom_size: u32, prg_ram_size: u32, flags6: u8, flags7: u8, flags9: u8, flags10: u8, zeros: [u8; 5], trainer: [u8; 0x200], pub prg_rom: Vec<[u8; 0x4000]>, // TODO make this a Vec of [u8; 0x4000] (Vector of 16kB pages) chr_rom: Vec<u8>, pc_inst_rom: Vec<u8>, pc_prom: Vec<u8>, title: Vec<u8>, } impl INesFile { pub fn load(bin: Vec<u8>) -> INesFile { let header = &bin[0..16]; let m = [ header[0], header[1], header[2], header[3] ]; if m[0]!= 'N' as u8 || m[1]!= 'E' as u8 || m[2]!= 'S' as u8 || m[3]!= 0x1A { panic!("Invalid ROM!"); } let prg_rom_cnt = header[4]; let prg_rom_size = prg_rom_cnt as u32 * 16384; let chr_rom_cnt = header[5]; let chr_rom_size = chr_rom_cnt as u32 * 8192; let flags6 = header[6]; let flags7 = header[7]; let prg_ram_size = header[8] as u32 * 8192; let flags9 = header[9]; let flags10 = header[10]; let zeros = [ header[11], header[12], header[13], header[14], header[15]]; let mapper = (flags7 & 0xF0) | ((flags6 & 0xF0) >> 4); let mut pos: usize = 16; let has_trainer: bool; let mut trainer = [0u8; 0x200]; let mut prg_rom: Vec<[u8; 0x4000]> = Vec::new(); if (flags6 & 0x04) > 0 { has_trainer = true; for i in 0..0x200 { trainer[i] = bin[pos]; pos = pos + 1; } } else { has_trainer = false; } for _i in 0..prg_rom_cnt { let mut page = [0u8; 0x4000]; for j in 0..0x4000 { page[j] = bin[pos]; pos = pos + 1; } prg_rom.push(page); } INesFile { magic: m, has_trainer: has_trainer, mapper: mapper, prg_rom_size: prg_rom_size, prg_rom_cnt: prg_rom_cnt, chr_rom_size: chr_rom_size, flags6: flags6, flags7: flags7, prg_ram_size: prg_ram_size, flags9: flags9, flags10: flags10, zeros: zeros, trainer: trainer, prg_rom: prg_rom, //TODO finish initializing these properly chr_rom: vec![0], pc_inst_rom: vec![0], pc_prom: vec![0], title: vec![0], } } #[allow(dead_code)] pub fn info(&self) { println!("has_trainer: {}", self.has_trainer); println!("mapper: {}", self.mapper); println!("prg_rom_size: {}", self.prg_rom_size); println!("chr_rom_size: {}", self.chr_rom_size); println!("prg_ram_size: {}", self.prg_ram_size); println!("flags6: 0x{:x}", self.flags6); println!("flags7: 0x{:x}", self.flags7); println!("flags9: 0x{:x}", self.flags9); println!("flags10: 0x{:x}", self.flags10); println!("has_trainer: {}", self.has_trainer); //println!("prg_rom: {:?}", self.prg_rom); } #[allow(dead_code)] pub fn dump_prg_rom(&self)
}
{ let mut f = File::create("prg0.rom").unwrap(); let buf = &self.prg_rom[0][..]; f.write_all(buf).expect("failed to write to file"); }
identifier_body
cylinder.rs
// Copyright 2013 The CGMath Developers. For a full listing of the authors, // refer to the Cargo.toml file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Oriented bounding cylinder use point::Point3; use vector::Vector3; #[derive(Copy, Clone, PartialEq, RustcEncodable, RustcDecodable)] pub struct
<S> { pub center: Point3<S>, pub axis: Vector3<S>, pub radius: S, }
Cylinder
identifier_name
cylinder.rs
// Copyright 2013 The CGMath Developers. For a full listing of the authors, // refer to the Cargo.toml file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! Oriented bounding cylinder use point::Point3; use vector::Vector3; #[derive(Copy, Clone, PartialEq, RustcEncodable, RustcDecodable)] pub struct Cylinder<S> { pub center: Point3<S>, pub axis: Vector3<S>, pub radius: S, }
// See the License for the specific language governing permissions and // limitations under the License.
random_line_split
lib.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; #[macro_use] extern crate num_derive; #[macro_use] extern crate serde; pub mod resources; use crossbeam_channel::{Receiver, Sender}; use ipc_channel::ipc::IpcSender; use keyboard_types::KeyboardEvent; use msg::constellation_msg::{InputMethodType, PipelineId, TopLevelBrowsingContextId}; use servo_url::ServoUrl; use std::fmt::{Debug, Error, Formatter}; use webrender_api::units::{DeviceIntPoint, DeviceIntSize}; pub use webxr_api::MainThreadWaker as EventLoopWaker; /// A cursor for the window. This is different from a CSS cursor (see /// `CursorKind`) in that it has no `Auto` value. #[repr(u8)] #[derive(Clone, Copy, Deserialize, Eq, FromPrimitive, PartialEq, Serialize)] pub enum Cursor { None, Default, Pointer, ContextMenu, Help, Progress, Wait, Cell, Crosshair, Text, VerticalText, Alias, Copy, Move,
Grabbing, EResize, NResize, NeResize, NwResize, SResize, SeResize, SwResize, WResize, EwResize, NsResize, NeswResize, NwseResize, ColResize, RowResize, AllScroll, ZoomIn, ZoomOut, } /// Sends messages to the embedder. pub struct EmbedderProxy { pub sender: Sender<(Option<TopLevelBrowsingContextId>, EmbedderMsg)>, pub event_loop_waker: Box<dyn EventLoopWaker>, } impl EmbedderProxy { pub fn send(&self, msg: (Option<TopLevelBrowsingContextId>, EmbedderMsg)) { // Send a message and kick the OS event loop awake. if let Err(err) = self.sender.send(msg) { warn!("Failed to send response ({:?}).", err); } self.event_loop_waker.wake(); } } impl Clone for EmbedderProxy { fn clone(&self) -> EmbedderProxy { EmbedderProxy { sender: self.sender.clone(), event_loop_waker: self.event_loop_waker.clone(), } } } /// The port that the embedder receives messages on. pub struct EmbedderReceiver { pub receiver: Receiver<(Option<TopLevelBrowsingContextId>, EmbedderMsg)>, } impl EmbedderReceiver { pub fn try_recv_embedder_msg( &mut self, ) -> Option<(Option<TopLevelBrowsingContextId>, EmbedderMsg)> { self.receiver.try_recv().ok() } pub fn recv_embedder_msg(&mut self) -> (Option<TopLevelBrowsingContextId>, EmbedderMsg) { self.receiver.recv().unwrap() } } #[derive(Deserialize, Serialize)] pub enum EmbedderMsg { /// A status message to be displayed by the browser chrome. Status(Option<String>), /// Alerts the embedder that the current page has changed its title. ChangePageTitle(Option<String>), /// Move the window to a point MoveTo(DeviceIntPoint), /// Resize the window to size ResizeTo(DeviceIntSize), // Show an alert message. Alert(String, IpcSender<()>), /// Wether or not to allow a pipeline to load a url. AllowNavigationRequest(PipelineId, ServoUrl), /// Whether or not to allow script to open a new tab/browser AllowOpeningBrowser(IpcSender<bool>), /// A new browser was created by script BrowserCreated(TopLevelBrowsingContextId), /// Wether or not to unload a document AllowUnload(IpcSender<bool>), /// Sends an unconsumed key event back to the embedder. Keyboard(KeyboardEvent), /// Gets system clipboard contents GetClipboardContents(IpcSender<String>), /// Sets system clipboard contents SetClipboardContents(String), /// Changes the cursor. SetCursor(Cursor), /// A favicon was detected NewFavicon(ServoUrl), /// <head> tag finished parsing HeadParsed, /// The history state has changed. HistoryChanged(Vec<ServoUrl>, usize), /// Enter or exit fullscreen SetFullscreenState(bool), /// The load of a page has begun LoadStart, /// The load of a page has completed LoadComplete, /// A browser is to be closed CloseBrowser, /// A pipeline panicked. First string is the reason, second one is the backtrace. Panic(String, Option<String>), /// Open dialog to select bluetooth device. GetSelectedBluetoothDevice(Vec<String>, IpcSender<Option<String>>), /// Open file dialog to select files. Set boolean flag to true allows to select multiple files. SelectFiles(Vec<FilterPattern>, bool, IpcSender<Option<Vec<String>>>), /// Request to present an IME to the user when an editable element is focused. ShowIME(InputMethodType), /// Request to hide the IME when the editable element is blurred. HideIME, /// Servo has shut down Shutdown, /// Report a complete sampled profile ReportProfile(Vec<u8>), } impl Debug for EmbedderMsg { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { match *self { EmbedderMsg::Status(..) => write!(f, "Status"), EmbedderMsg::ChangePageTitle(..) => write!(f, "ChangePageTitle"), EmbedderMsg::MoveTo(..) => write!(f, "MoveTo"), EmbedderMsg::ResizeTo(..) => write!(f, "ResizeTo"), EmbedderMsg::Alert(..) => write!(f, "Alert"), EmbedderMsg::AllowUnload(..) => write!(f, "AllowUnload"), EmbedderMsg::AllowNavigationRequest(..) => write!(f, "AllowNavigationRequest"), EmbedderMsg::Keyboard(..) => write!(f, "Keyboard"), EmbedderMsg::GetClipboardContents(..) => write!(f, "GetClipboardContents"), EmbedderMsg::SetClipboardContents(..) => write!(f, "SetClipboardContents"), EmbedderMsg::SetCursor(..) => write!(f, "SetCursor"), EmbedderMsg::NewFavicon(..) => write!(f, "NewFavicon"), EmbedderMsg::HeadParsed => write!(f, "HeadParsed"), EmbedderMsg::CloseBrowser => write!(f, "CloseBrowser"), EmbedderMsg::HistoryChanged(..) => write!(f, "HistoryChanged"), EmbedderMsg::SetFullscreenState(..) => write!(f, "SetFullscreenState"), EmbedderMsg::LoadStart => write!(f, "LoadStart"), EmbedderMsg::LoadComplete => write!(f, "LoadComplete"), EmbedderMsg::Panic(..) => write!(f, "Panic"), EmbedderMsg::GetSelectedBluetoothDevice(..) => write!(f, "GetSelectedBluetoothDevice"), EmbedderMsg::SelectFiles(..) => write!(f, "SelectFiles"), EmbedderMsg::ShowIME(..) => write!(f, "ShowIME"), EmbedderMsg::HideIME => write!(f, "HideIME"), EmbedderMsg::Shutdown => write!(f, "Shutdown"), EmbedderMsg::AllowOpeningBrowser(..) => write!(f, "AllowOpeningBrowser"), EmbedderMsg::BrowserCreated(..) => write!(f, "BrowserCreated"), EmbedderMsg::ReportProfile(..) => write!(f, "ReportProfile"), } } } /// Filter for file selection; /// the `String` content is expected to be extension (e.g, "doc", without the prefixing ".") #[derive(Clone, Debug, Deserialize, Serialize)] pub struct FilterPattern(pub String);
NoDrop, NotAllowed, Grab,
random_line_split
lib.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; #[macro_use] extern crate num_derive; #[macro_use] extern crate serde; pub mod resources; use crossbeam_channel::{Receiver, Sender}; use ipc_channel::ipc::IpcSender; use keyboard_types::KeyboardEvent; use msg::constellation_msg::{InputMethodType, PipelineId, TopLevelBrowsingContextId}; use servo_url::ServoUrl; use std::fmt::{Debug, Error, Formatter}; use webrender_api::units::{DeviceIntPoint, DeviceIntSize}; pub use webxr_api::MainThreadWaker as EventLoopWaker; /// A cursor for the window. This is different from a CSS cursor (see /// `CursorKind`) in that it has no `Auto` value. #[repr(u8)] #[derive(Clone, Copy, Deserialize, Eq, FromPrimitive, PartialEq, Serialize)] pub enum Cursor { None, Default, Pointer, ContextMenu, Help, Progress, Wait, Cell, Crosshair, Text, VerticalText, Alias, Copy, Move, NoDrop, NotAllowed, Grab, Grabbing, EResize, NResize, NeResize, NwResize, SResize, SeResize, SwResize, WResize, EwResize, NsResize, NeswResize, NwseResize, ColResize, RowResize, AllScroll, ZoomIn, ZoomOut, } /// Sends messages to the embedder. pub struct EmbedderProxy { pub sender: Sender<(Option<TopLevelBrowsingContextId>, EmbedderMsg)>, pub event_loop_waker: Box<dyn EventLoopWaker>, } impl EmbedderProxy { pub fn send(&self, msg: (Option<TopLevelBrowsingContextId>, EmbedderMsg)) { // Send a message and kick the OS event loop awake. if let Err(err) = self.sender.send(msg) { warn!("Failed to send response ({:?}).", err); } self.event_loop_waker.wake(); } } impl Clone for EmbedderProxy { fn clone(&self) -> EmbedderProxy { EmbedderProxy { sender: self.sender.clone(), event_loop_waker: self.event_loop_waker.clone(), } } } /// The port that the embedder receives messages on. pub struct
{ pub receiver: Receiver<(Option<TopLevelBrowsingContextId>, EmbedderMsg)>, } impl EmbedderReceiver { pub fn try_recv_embedder_msg( &mut self, ) -> Option<(Option<TopLevelBrowsingContextId>, EmbedderMsg)> { self.receiver.try_recv().ok() } pub fn recv_embedder_msg(&mut self) -> (Option<TopLevelBrowsingContextId>, EmbedderMsg) { self.receiver.recv().unwrap() } } #[derive(Deserialize, Serialize)] pub enum EmbedderMsg { /// A status message to be displayed by the browser chrome. Status(Option<String>), /// Alerts the embedder that the current page has changed its title. ChangePageTitle(Option<String>), /// Move the window to a point MoveTo(DeviceIntPoint), /// Resize the window to size ResizeTo(DeviceIntSize), // Show an alert message. Alert(String, IpcSender<()>), /// Wether or not to allow a pipeline to load a url. AllowNavigationRequest(PipelineId, ServoUrl), /// Whether or not to allow script to open a new tab/browser AllowOpeningBrowser(IpcSender<bool>), /// A new browser was created by script BrowserCreated(TopLevelBrowsingContextId), /// Wether or not to unload a document AllowUnload(IpcSender<bool>), /// Sends an unconsumed key event back to the embedder. Keyboard(KeyboardEvent), /// Gets system clipboard contents GetClipboardContents(IpcSender<String>), /// Sets system clipboard contents SetClipboardContents(String), /// Changes the cursor. SetCursor(Cursor), /// A favicon was detected NewFavicon(ServoUrl), /// <head> tag finished parsing HeadParsed, /// The history state has changed. HistoryChanged(Vec<ServoUrl>, usize), /// Enter or exit fullscreen SetFullscreenState(bool), /// The load of a page has begun LoadStart, /// The load of a page has completed LoadComplete, /// A browser is to be closed CloseBrowser, /// A pipeline panicked. First string is the reason, second one is the backtrace. Panic(String, Option<String>), /// Open dialog to select bluetooth device. GetSelectedBluetoothDevice(Vec<String>, IpcSender<Option<String>>), /// Open file dialog to select files. Set boolean flag to true allows to select multiple files. SelectFiles(Vec<FilterPattern>, bool, IpcSender<Option<Vec<String>>>), /// Request to present an IME to the user when an editable element is focused. ShowIME(InputMethodType), /// Request to hide the IME when the editable element is blurred. HideIME, /// Servo has shut down Shutdown, /// Report a complete sampled profile ReportProfile(Vec<u8>), } impl Debug for EmbedderMsg { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { match *self { EmbedderMsg::Status(..) => write!(f, "Status"), EmbedderMsg::ChangePageTitle(..) => write!(f, "ChangePageTitle"), EmbedderMsg::MoveTo(..) => write!(f, "MoveTo"), EmbedderMsg::ResizeTo(..) => write!(f, "ResizeTo"), EmbedderMsg::Alert(..) => write!(f, "Alert"), EmbedderMsg::AllowUnload(..) => write!(f, "AllowUnload"), EmbedderMsg::AllowNavigationRequest(..) => write!(f, "AllowNavigationRequest"), EmbedderMsg::Keyboard(..) => write!(f, "Keyboard"), EmbedderMsg::GetClipboardContents(..) => write!(f, "GetClipboardContents"), EmbedderMsg::SetClipboardContents(..) => write!(f, "SetClipboardContents"), EmbedderMsg::SetCursor(..) => write!(f, "SetCursor"), EmbedderMsg::NewFavicon(..) => write!(f, "NewFavicon"), EmbedderMsg::HeadParsed => write!(f, "HeadParsed"), EmbedderMsg::CloseBrowser => write!(f, "CloseBrowser"), EmbedderMsg::HistoryChanged(..) => write!(f, "HistoryChanged"), EmbedderMsg::SetFullscreenState(..) => write!(f, "SetFullscreenState"), EmbedderMsg::LoadStart => write!(f, "LoadStart"), EmbedderMsg::LoadComplete => write!(f, "LoadComplete"), EmbedderMsg::Panic(..) => write!(f, "Panic"), EmbedderMsg::GetSelectedBluetoothDevice(..) => write!(f, "GetSelectedBluetoothDevice"), EmbedderMsg::SelectFiles(..) => write!(f, "SelectFiles"), EmbedderMsg::ShowIME(..) => write!(f, "ShowIME"), EmbedderMsg::HideIME => write!(f, "HideIME"), EmbedderMsg::Shutdown => write!(f, "Shutdown"), EmbedderMsg::AllowOpeningBrowser(..) => write!(f, "AllowOpeningBrowser"), EmbedderMsg::BrowserCreated(..) => write!(f, "BrowserCreated"), EmbedderMsg::ReportProfile(..) => write!(f, "ReportProfile"), } } } /// Filter for file selection; /// the `String` content is expected to be extension (e.g, "doc", without the prefixing ".") #[derive(Clone, Debug, Deserialize, Serialize)] pub struct FilterPattern(pub String);
EmbedderReceiver
identifier_name
memchr.rs
// Original implementation taken from rust-memchr. // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch use crate::cmp; use crate::mem; const LO_U64: u64 = 0x0101010101010101; const HI_U64: u64 = 0x8080808080808080; // Use truncation. const LO_USIZE: usize = LO_U64 as usize; const HI_USIZE: usize = HI_U64 as usize; const USIZE_BYTES: usize = mem::size_of::<usize>(); /// Returns `true` if `x` contains any zero byte. /// /// From *Matters Computational*, J. Arndt: /// /// "The idea is to subtract one from each of the bytes and then look for /// bytes where the borrow propagated all the way to the most significant /// bit." #[inline] fn contains_zero_byte(x: usize) -> bool { x.wrapping_sub(LO_USIZE) &!x & HI_USIZE!= 0 } #[cfg(target_pointer_width = "16")] #[inline] fn
(b: u8) -> usize { (b as usize) << 8 | b as usize } #[cfg(not(target_pointer_width = "16"))] #[inline] fn repeat_byte(b: u8) -> usize { (b as usize) * (usize::MAX / 255) } /// Returns the first index matching the byte `x` in `text`. #[inline] pub fn memchr(x: u8, text: &[u8]) -> Option<usize> { // Fast path for small slices if text.len() < 2 * USIZE_BYTES { return text.iter().position(|elt| *elt == x); } memchr_general_case(x, text) } fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // // Split `text` in three parts // - unaligned initial part, before the first word aligned address in text // - body, scan by 2 words at a time // - the last remaining part, < 2 word size // search up to an aligned boundary let len = text.len(); let ptr = text.as_ptr(); let mut offset = ptr.align_offset(USIZE_BYTES); if offset > 0 { offset = cmp::min(offset, len); if let Some(index) = text[..offset].iter().position(|elt| *elt == x) { return Some(index); } } // search the body of the text let repeated_x = repeat_byte(x); while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. unsafe { let u = *(ptr.add(offset) as *const usize); let v = *(ptr.add(offset + USIZE_BYTES) as *const usize); // break if there is a matching byte let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv { break; } } offset += USIZE_BYTES * 2; } // Find the byte after the point the body loop stopped. text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i) } /// Returns the last index matching the byte `x` in `text`. pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // // Split `text` in three parts: // - unaligned tail, after the last word aligned address in text, // - body, scanned by 2 words at a time, // - the first remaining bytes, < 2 word size. let len = text.len(); let ptr = text.as_ptr(); type Chunk = usize; let (min_aligned_offset, max_aligned_offset) = { // We call this just to obtain the length of the prefix and suffix. // In the middle we always process two chunks at once. // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size differences // which are handled by `align_to`. let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() }; (prefix.len(), len - suffix.len()) }; let mut offset = max_aligned_offset; if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) { return Some(offset + index); } // Search the body of the text, make sure we don't cross min_aligned_offset. // offset is always aligned, so just testing `>` is sufficient and avoids possible // overflow. let repeated_x = repeat_byte(x); let chunk_bytes = mem::size_of::<Chunk>(); while offset > min_aligned_offset { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes. unsafe { let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); // Break if there is a matching byte. let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv { break; } } offset -= 2 * chunk_bytes; } // Find the byte before the point the body loop stopped. text[..offset].iter().rposition(|elt| *elt == x) }
repeat_byte
identifier_name
memchr.rs
// Original implementation taken from rust-memchr. // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch use crate::cmp; use crate::mem; const LO_U64: u64 = 0x0101010101010101; const HI_U64: u64 = 0x8080808080808080; // Use truncation. const LO_USIZE: usize = LO_U64 as usize; const HI_USIZE: usize = HI_U64 as usize; const USIZE_BYTES: usize = mem::size_of::<usize>(); /// Returns `true` if `x` contains any zero byte. /// /// From *Matters Computational*, J. Arndt: /// /// "The idea is to subtract one from each of the bytes and then look for /// bytes where the borrow propagated all the way to the most significant /// bit." #[inline] fn contains_zero_byte(x: usize) -> bool { x.wrapping_sub(LO_USIZE) &!x & HI_USIZE!= 0 } #[cfg(target_pointer_width = "16")] #[inline] fn repeat_byte(b: u8) -> usize { (b as usize) << 8 | b as usize } #[cfg(not(target_pointer_width = "16"))] #[inline] fn repeat_byte(b: u8) -> usize { (b as usize) * (usize::MAX / 255) } /// Returns the first index matching the byte `x` in `text`. #[inline] pub fn memchr(x: u8, text: &[u8]) -> Option<usize> { // Fast path for small slices if text.len() < 2 * USIZE_BYTES { return text.iter().position(|elt| *elt == x); } memchr_general_case(x, text) } fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // // Split `text` in three parts // - unaligned initial part, before the first word aligned address in text // - body, scan by 2 words at a time // - the last remaining part, < 2 word size // search up to an aligned boundary let len = text.len(); let ptr = text.as_ptr(); let mut offset = ptr.align_offset(USIZE_BYTES); if offset > 0 { offset = cmp::min(offset, len); if let Some(index) = text[..offset].iter().position(|elt| *elt == x) { return Some(index); } } // search the body of the text let repeated_x = repeat_byte(x); while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. unsafe { let u = *(ptr.add(offset) as *const usize); let v = *(ptr.add(offset + USIZE_BYTES) as *const usize); // break if there is a matching byte let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv { break; } } offset += USIZE_BYTES * 2; } // Find the byte after the point the body loop stopped. text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i) } /// Returns the last index matching the byte `x` in `text`. pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // // Split `text` in three parts: // - unaligned tail, after the last word aligned address in text, // - body, scanned by 2 words at a time, // - the first remaining bytes, < 2 word size. let len = text.len(); let ptr = text.as_ptr(); type Chunk = usize; let (min_aligned_offset, max_aligned_offset) = { // We call this just to obtain the length of the prefix and suffix. // In the middle we always process two chunks at once. // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size differences // which are handled by `align_to`. let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() }; (prefix.len(), len - suffix.len()) }; let mut offset = max_aligned_offset; if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x)
// Search the body of the text, make sure we don't cross min_aligned_offset. // offset is always aligned, so just testing `>` is sufficient and avoids possible // overflow. let repeated_x = repeat_byte(x); let chunk_bytes = mem::size_of::<Chunk>(); while offset > min_aligned_offset { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes. unsafe { let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); // Break if there is a matching byte. let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv { break; } } offset -= 2 * chunk_bytes; } // Find the byte before the point the body loop stopped. text[..offset].iter().rposition(|elt| *elt == x) }
{ return Some(offset + index); }
conditional_block
memchr.rs
// Original implementation taken from rust-memchr. // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch use crate::cmp; use crate::mem; const LO_U64: u64 = 0x0101010101010101; const HI_U64: u64 = 0x8080808080808080; // Use truncation. const LO_USIZE: usize = LO_U64 as usize; const HI_USIZE: usize = HI_U64 as usize; const USIZE_BYTES: usize = mem::size_of::<usize>(); /// Returns `true` if `x` contains any zero byte. /// /// From *Matters Computational*, J. Arndt: /// /// "The idea is to subtract one from each of the bytes and then look for /// bytes where the borrow propagated all the way to the most significant /// bit." #[inline] fn contains_zero_byte(x: usize) -> bool { x.wrapping_sub(LO_USIZE) &!x & HI_USIZE!= 0 } #[cfg(target_pointer_width = "16")] #[inline] fn repeat_byte(b: u8) -> usize { (b as usize) << 8 | b as usize } #[cfg(not(target_pointer_width = "16"))] #[inline] fn repeat_byte(b: u8) -> usize { (b as usize) * (usize::MAX / 255) } /// Returns the first index matching the byte `x` in `text`. #[inline] pub fn memchr(x: u8, text: &[u8]) -> Option<usize> { // Fast path for small slices if text.len() < 2 * USIZE_BYTES { return text.iter().position(|elt| *elt == x); } memchr_general_case(x, text) } fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize>
// search the body of the text let repeated_x = repeat_byte(x); while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. unsafe { let u = *(ptr.add(offset) as *const usize); let v = *(ptr.add(offset + USIZE_BYTES) as *const usize); // break if there is a matching byte let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv { break; } } offset += USIZE_BYTES * 2; } // Find the byte after the point the body loop stopped. text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i) } /// Returns the last index matching the byte `x` in `text`. pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // // Split `text` in three parts: // - unaligned tail, after the last word aligned address in text, // - body, scanned by 2 words at a time, // - the first remaining bytes, < 2 word size. let len = text.len(); let ptr = text.as_ptr(); type Chunk = usize; let (min_aligned_offset, max_aligned_offset) = { // We call this just to obtain the length of the prefix and suffix. // In the middle we always process two chunks at once. // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size differences // which are handled by `align_to`. let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() }; (prefix.len(), len - suffix.len()) }; let mut offset = max_aligned_offset; if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) { return Some(offset + index); } // Search the body of the text, make sure we don't cross min_aligned_offset. // offset is always aligned, so just testing `>` is sufficient and avoids possible // overflow. let repeated_x = repeat_byte(x); let chunk_bytes = mem::size_of::<Chunk>(); while offset > min_aligned_offset { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes. unsafe { let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); // Break if there is a matching byte. let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv { break; } } offset -= 2 * chunk_bytes; } // Find the byte before the point the body loop stopped. text[..offset].iter().rposition(|elt| *elt == x) }
{ // Scan for a single byte value by reading two `usize` words at a time. // // Split `text` in three parts // - unaligned initial part, before the first word aligned address in text // - body, scan by 2 words at a time // - the last remaining part, < 2 word size // search up to an aligned boundary let len = text.len(); let ptr = text.as_ptr(); let mut offset = ptr.align_offset(USIZE_BYTES); if offset > 0 { offset = cmp::min(offset, len); if let Some(index) = text[..offset].iter().position(|elt| *elt == x) { return Some(index); } }
identifier_body
memchr.rs
// Original implementation taken from rust-memchr. // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch use crate::cmp; use crate::mem; const LO_U64: u64 = 0x0101010101010101; const HI_U64: u64 = 0x8080808080808080; // Use truncation. const LO_USIZE: usize = LO_U64 as usize; const HI_USIZE: usize = HI_U64 as usize; const USIZE_BYTES: usize = mem::size_of::<usize>(); /// Returns `true` if `x` contains any zero byte. /// /// From *Matters Computational*, J. Arndt: /// /// "The idea is to subtract one from each of the bytes and then look for /// bytes where the borrow propagated all the way to the most significant /// bit." #[inline] fn contains_zero_byte(x: usize) -> bool { x.wrapping_sub(LO_USIZE) &!x & HI_USIZE!= 0 } #[cfg(target_pointer_width = "16")] #[inline] fn repeat_byte(b: u8) -> usize { (b as usize) << 8 | b as usize } #[cfg(not(target_pointer_width = "16"))] #[inline] fn repeat_byte(b: u8) -> usize { (b as usize) * (usize::MAX / 255) } /// Returns the first index matching the byte `x` in `text`. #[inline] pub fn memchr(x: u8, text: &[u8]) -> Option<usize> { // Fast path for small slices if text.len() < 2 * USIZE_BYTES { return text.iter().position(|elt| *elt == x); } memchr_general_case(x, text) } fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // // Split `text` in three parts // - unaligned initial part, before the first word aligned address in text // - body, scan by 2 words at a time // - the last remaining part, < 2 word size // search up to an aligned boundary let len = text.len(); let ptr = text.as_ptr(); let mut offset = ptr.align_offset(USIZE_BYTES); if offset > 0 { offset = cmp::min(offset, len); if let Some(index) = text[..offset].iter().position(|elt| *elt == x) { return Some(index); } } // search the body of the text let repeated_x = repeat_byte(x); while offset <= len - 2 * USIZE_BYTES { // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes // between the offset and the end of the slice. unsafe { let u = *(ptr.add(offset) as *const usize); let v = *(ptr.add(offset + USIZE_BYTES) as *const usize); // break if there is a matching byte let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv {
offset += USIZE_BYTES * 2; } // Find the byte after the point the body loop stopped. text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i) } /// Returns the last index matching the byte `x` in `text`. pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // // Split `text` in three parts: // - unaligned tail, after the last word aligned address in text, // - body, scanned by 2 words at a time, // - the first remaining bytes, < 2 word size. let len = text.len(); let ptr = text.as_ptr(); type Chunk = usize; let (min_aligned_offset, max_aligned_offset) = { // We call this just to obtain the length of the prefix and suffix. // In the middle we always process two chunks at once. // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size differences // which are handled by `align_to`. let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() }; (prefix.len(), len - suffix.len()) }; let mut offset = max_aligned_offset; if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) { return Some(offset + index); } // Search the body of the text, make sure we don't cross min_aligned_offset. // offset is always aligned, so just testing `>` is sufficient and avoids possible // overflow. let repeated_x = repeat_byte(x); let chunk_bytes = mem::size_of::<Chunk>(); while offset > min_aligned_offset { // SAFETY: offset starts at len - suffix.len(), as long as it is greater than // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes. unsafe { let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); // Break if there is a matching byte. let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv { break; } } offset -= 2 * chunk_bytes; } // Find the byte before the point the body loop stopped. text[..offset].iter().rposition(|elt| *elt == x) }
break; } }
random_line_split
edit.rs
// // imag - the personal information management suite for the commandline // Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; version // 2.1 of the License. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software
use chrono::naive::NaiveDateTime; use libimagdiary::diary::Diary; use libimagdiary::diaryid::DiaryId; use libimagdiary::error::DiaryErrorKind as DEK; use libimagdiary::error::DiaryError as DE; use libimagdiary::error::ResultExt; use libimagentryedit::edit::Edit; use libimagrt::runtime::Runtime; use libimagerror::trace::MapErrTrace; use libimagtimeui::datetime::DateTime; use libimagtimeui::parse::Parse; use libimagutil::warn_exit::warn_exit; use libimagerror::trace::trace_error_exit; use util::get_diary_name; pub fn edit(rt: &Runtime) { let diaryname = get_diary_name(rt).unwrap_or_else(|| warn_exit("No diary name", 1)); rt.cli() .subcommand_matches("edit") .unwrap() .value_of("datetime") .and_then(DateTime::parse) .map(|dt| dt.into()) .map(|dt: NaiveDateTime| DiaryId::from_datetime(diaryname.clone(), dt)) .or_else(|| { rt.store() .get_youngest_entry_id(&diaryname) .map(|optid| match optid { Ok(id) => id, Err(e) => trace_error_exit(&e, 1), }) }) .ok_or_else(|| { error!("No entries in diary. Aborting"); exit(1) }) .and_then(|id| rt.store().get(id)) .map(|opte| match opte { Some(mut e) => e.edit_content(rt).chain_err(|| DEK::IOError), None => Err(DE::from_kind(DEK::EntryNotInDiary)), }) .map_err_trace() .ok(); }
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // use std::process::exit;
random_line_split
edit.rs
// // imag - the personal information management suite for the commandline // Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; version // 2.1 of the License. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // use std::process::exit; use chrono::naive::NaiveDateTime; use libimagdiary::diary::Diary; use libimagdiary::diaryid::DiaryId; use libimagdiary::error::DiaryErrorKind as DEK; use libimagdiary::error::DiaryError as DE; use libimagdiary::error::ResultExt; use libimagentryedit::edit::Edit; use libimagrt::runtime::Runtime; use libimagerror::trace::MapErrTrace; use libimagtimeui::datetime::DateTime; use libimagtimeui::parse::Parse; use libimagutil::warn_exit::warn_exit; use libimagerror::trace::trace_error_exit; use util::get_diary_name; pub fn edit(rt: &Runtime)
exit(1) }) .and_then(|id| rt.store().get(id)) .map(|opte| match opte { Some(mut e) => e.edit_content(rt).chain_err(|| DEK::IOError), None => Err(DE::from_kind(DEK::EntryNotInDiary)), }) .map_err_trace() .ok(); }
{ let diaryname = get_diary_name(rt).unwrap_or_else(|| warn_exit("No diary name", 1)); rt.cli() .subcommand_matches("edit") .unwrap() .value_of("datetime") .and_then(DateTime::parse) .map(|dt| dt.into()) .map(|dt: NaiveDateTime| DiaryId::from_datetime(diaryname.clone(), dt)) .or_else(|| { rt.store() .get_youngest_entry_id(&diaryname) .map(|optid| match optid { Ok(id) => id, Err(e) => trace_error_exit(&e, 1), }) }) .ok_or_else(|| { error!("No entries in diary. Aborting");
identifier_body
edit.rs
// // imag - the personal information management suite for the commandline // Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; version // 2.1 of the License. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // use std::process::exit; use chrono::naive::NaiveDateTime; use libimagdiary::diary::Diary; use libimagdiary::diaryid::DiaryId; use libimagdiary::error::DiaryErrorKind as DEK; use libimagdiary::error::DiaryError as DE; use libimagdiary::error::ResultExt; use libimagentryedit::edit::Edit; use libimagrt::runtime::Runtime; use libimagerror::trace::MapErrTrace; use libimagtimeui::datetime::DateTime; use libimagtimeui::parse::Parse; use libimagutil::warn_exit::warn_exit; use libimagerror::trace::trace_error_exit; use util::get_diary_name; pub fn
(rt: &Runtime) { let diaryname = get_diary_name(rt).unwrap_or_else(|| warn_exit("No diary name", 1)); rt.cli() .subcommand_matches("edit") .unwrap() .value_of("datetime") .and_then(DateTime::parse) .map(|dt| dt.into()) .map(|dt: NaiveDateTime| DiaryId::from_datetime(diaryname.clone(), dt)) .or_else(|| { rt.store() .get_youngest_entry_id(&diaryname) .map(|optid| match optid { Ok(id) => id, Err(e) => trace_error_exit(&e, 1), }) }) .ok_or_else(|| { error!("No entries in diary. Aborting"); exit(1) }) .and_then(|id| rt.store().get(id)) .map(|opte| match opte { Some(mut e) => e.edit_content(rt).chain_err(|| DEK::IOError), None => Err(DE::from_kind(DEK::EntryNotInDiary)), }) .map_err_trace() .ok(); }
edit
identifier_name
main.rs
#![feature(collections)] extern crate opencl; use opencl::mem::CLBuffer; use std::fmt; fn main() { let ker = include_str!("demo.ocl"); println!("ker {}", ker); let vec_a = vec![0isize, 1, 2, -3, 4, 5, 6, 7]; let vec_b = vec![-7isize, -6, 5, -4, 0, -1, 2, 3]; let ref platform = opencl::hl::get_platforms()[0]; let devices = platform.get_devices(); let ctx = opencl::hl::Context::create_context(&devices[..]); let queues: Vec<opencl::hl::CommandQueue> = devices.iter() .map(|d| ctx.create_command_queue(d)).collect(); for (device, queue) in devices.iter().zip(queues.iter()) { println!("{}", queue.device().name()); let a: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_READ_ONLY); let b: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_READ_ONLY); let c: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_WRITE_ONLY); queue.write(&a, &&vec_a[..], ()); queue.write(&b, &&vec_b[..], ()); let program = ctx.create_program_from_source(ker); program.build(&device).ok().expect("Couldn't build program."); let kernel = program.create_kernel("vector_add"); kernel.set_arg(0, &a); kernel.set_arg(1, &b); kernel.set_arg(2, &c); let event = queue.enqueue_async_kernel(&kernel, vec_a.len(), None, ()); let vec_c: Vec<isize> = queue.get(&c, &event); println!(" {}", string_from_slice(&vec_a[..])); println!("+ {}", string_from_slice(&vec_b[..])); println!("= {}", string_from_slice(&vec_c[..])); } } fn string_from_slice<T: fmt::Display>(slice: &[T]) -> String
{ let mut st = String::from_str("["); let mut first = true; for i in slice.iter() { if !first { st.push_str(", "); } else { first = false; } st.push_str(&*i.to_string()) } st.push_str("]"); return st }
identifier_body
main.rs
#![feature(collections)] extern crate opencl; use opencl::mem::CLBuffer; use std::fmt; fn main() { let ker = include_str!("demo.ocl"); println!("ker {}", ker); let vec_a = vec![0isize, 1, 2, -3, 4, 5, 6, 7]; let vec_b = vec![-7isize, -6, 5, -4, 0, -1, 2, 3]; let ref platform = opencl::hl::get_platforms()[0]; let devices = platform.get_devices(); let ctx = opencl::hl::Context::create_context(&devices[..]); let queues: Vec<opencl::hl::CommandQueue> = devices.iter() .map(|d| ctx.create_command_queue(d)).collect(); for (device, queue) in devices.iter().zip(queues.iter()) { println!("{}", queue.device().name()); let a: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_READ_ONLY); let b: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_READ_ONLY); let c: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_WRITE_ONLY); queue.write(&a, &&vec_a[..], ()); queue.write(&b, &&vec_b[..], ()); let program = ctx.create_program_from_source(ker); program.build(&device).ok().expect("Couldn't build program."); let kernel = program.create_kernel("vector_add"); kernel.set_arg(0, &a); kernel.set_arg(1, &b); kernel.set_arg(2, &c); let event = queue.enqueue_async_kernel(&kernel, vec_a.len(), None, ()); let vec_c: Vec<isize> = queue.get(&c, &event); println!(" {}", string_from_slice(&vec_a[..])); println!("+ {}", string_from_slice(&vec_b[..])); println!("= {}", string_from_slice(&vec_c[..])); } } fn string_from_slice<T: fmt::Display>(slice: &[T]) -> String { let mut st = String::from_str("["); let mut first = true; for i in slice.iter() { if!first
else { first = false; } st.push_str(&*i.to_string()) } st.push_str("]"); return st }
{ st.push_str(", "); }
conditional_block
main.rs
#![feature(collections)] extern crate opencl; use opencl::mem::CLBuffer; use std::fmt; fn main() { let ker = include_str!("demo.ocl"); println!("ker {}", ker); let vec_a = vec![0isize, 1, 2, -3, 4, 5, 6, 7]; let vec_b = vec![-7isize, -6, 5, -4, 0, -1, 2, 3]; let ref platform = opencl::hl::get_platforms()[0]; let devices = platform.get_devices(); let ctx = opencl::hl::Context::create_context(&devices[..]); let queues: Vec<opencl::hl::CommandQueue> = devices.iter() .map(|d| ctx.create_command_queue(d)).collect(); for (device, queue) in devices.iter().zip(queues.iter()) { println!("{}", queue.device().name()); let a: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_READ_ONLY); let b: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_READ_ONLY); let c: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_WRITE_ONLY); queue.write(&a, &&vec_a[..], ()); queue.write(&b, &&vec_b[..], ()); let program = ctx.create_program_from_source(ker); program.build(&device).ok().expect("Couldn't build program."); let kernel = program.create_kernel("vector_add"); kernel.set_arg(0, &a); kernel.set_arg(1, &b); kernel.set_arg(2, &c); let event = queue.enqueue_async_kernel(&kernel, vec_a.len(), None, ()); let vec_c: Vec<isize> = queue.get(&c, &event); println!(" {}", string_from_slice(&vec_a[..])); println!("+ {}", string_from_slice(&vec_b[..])); println!("= {}", string_from_slice(&vec_c[..])); } } fn string_from_slice<T: fmt::Display>(slice: &[T]) -> String { let mut st = String::from_str("["); let mut first = true; for i in slice.iter() { if!first { st.push_str(", "); } else { first = false;
return st }
} st.push_str(&*i.to_string()) } st.push_str("]");
random_line_split
main.rs
#![feature(collections)] extern crate opencl; use opencl::mem::CLBuffer; use std::fmt; fn main() { let ker = include_str!("demo.ocl"); println!("ker {}", ker); let vec_a = vec![0isize, 1, 2, -3, 4, 5, 6, 7]; let vec_b = vec![-7isize, -6, 5, -4, 0, -1, 2, 3]; let ref platform = opencl::hl::get_platforms()[0]; let devices = platform.get_devices(); let ctx = opencl::hl::Context::create_context(&devices[..]); let queues: Vec<opencl::hl::CommandQueue> = devices.iter() .map(|d| ctx.create_command_queue(d)).collect(); for (device, queue) in devices.iter().zip(queues.iter()) { println!("{}", queue.device().name()); let a: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_READ_ONLY); let b: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_READ_ONLY); let c: CLBuffer<isize> = ctx.create_buffer(vec_a.len(), opencl::cl::CL_MEM_WRITE_ONLY); queue.write(&a, &&vec_a[..], ()); queue.write(&b, &&vec_b[..], ()); let program = ctx.create_program_from_source(ker); program.build(&device).ok().expect("Couldn't build program."); let kernel = program.create_kernel("vector_add"); kernel.set_arg(0, &a); kernel.set_arg(1, &b); kernel.set_arg(2, &c); let event = queue.enqueue_async_kernel(&kernel, vec_a.len(), None, ()); let vec_c: Vec<isize> = queue.get(&c, &event); println!(" {}", string_from_slice(&vec_a[..])); println!("+ {}", string_from_slice(&vec_b[..])); println!("= {}", string_from_slice(&vec_c[..])); } } fn
<T: fmt::Display>(slice: &[T]) -> String { let mut st = String::from_str("["); let mut first = true; for i in slice.iter() { if!first { st.push_str(", "); } else { first = false; } st.push_str(&*i.to_string()) } st.push_str("]"); return st }
string_from_slice
identifier_name
static-method-on-struct-and-enum.rs
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-android: FIXME(#10381) // min-lldb-version: 310 // compile-flags:-g // === GDB TESTS =================================================================================== // gdb-command:run // STRUCT // gdb-command:print arg1 // gdb-check:$1 = 1 // gdb-command:print arg2 // gdb-check:$2 = 2 // gdb-command:continue // ENUM // gdb-command:print arg1 // gdb-check:$3 = -3 // gdb-command:print arg2 // gdb-check:$4 = 4.5 // gdb-command:print arg3
// gdb-command:continue // === LLDB TESTS ================================================================================== // lldb-command:run // STRUCT // lldb-command:print arg1 // lldb-check:[...]$0 = 1 // lldb-command:print arg2 // lldb-check:[...]$1 = 2 // lldb-command:continue // ENUM // lldb-command:print arg1 // lldb-check:[...]$2 = -3 // lldb-command:print arg2 // lldb-check:[...]$3 = 4.5 // lldb-command:print arg3 // lldb-check:[...]$4 = 5 // lldb-command:continue #![feature(struct_variant)] struct Struct { x: int } impl Struct { fn static_method(arg1: int, arg2: int) -> int { zzz(); // #break arg1 + arg2 } } enum Enum { Variant1 { x: int }, Variant2, Variant3(f64, int, char), } impl Enum { fn static_method(arg1: int, arg2: f64, arg3: uint) -> int { zzz(); // #break arg1 } } fn main() { Struct::static_method(1, 2); Enum::static_method(-3, 4.5, 5); } fn zzz() {()}
// gdb-check:$5 = 5
random_line_split
static-method-on-struct-and-enum.rs
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-android: FIXME(#10381) // min-lldb-version: 310 // compile-flags:-g // === GDB TESTS =================================================================================== // gdb-command:run // STRUCT // gdb-command:print arg1 // gdb-check:$1 = 1 // gdb-command:print arg2 // gdb-check:$2 = 2 // gdb-command:continue // ENUM // gdb-command:print arg1 // gdb-check:$3 = -3 // gdb-command:print arg2 // gdb-check:$4 = 4.5 // gdb-command:print arg3 // gdb-check:$5 = 5 // gdb-command:continue // === LLDB TESTS ================================================================================== // lldb-command:run // STRUCT // lldb-command:print arg1 // lldb-check:[...]$0 = 1 // lldb-command:print arg2 // lldb-check:[...]$1 = 2 // lldb-command:continue // ENUM // lldb-command:print arg1 // lldb-check:[...]$2 = -3 // lldb-command:print arg2 // lldb-check:[...]$3 = 4.5 // lldb-command:print arg3 // lldb-check:[...]$4 = 5 // lldb-command:continue #![feature(struct_variant)] struct Struct { x: int } impl Struct { fn static_method(arg1: int, arg2: int) -> int { zzz(); // #break arg1 + arg2 } } enum Enum { Variant1 { x: int }, Variant2, Variant3(f64, int, char), } impl Enum { fn static_method(arg1: int, arg2: f64, arg3: uint) -> int { zzz(); // #break arg1 } } fn
() { Struct::static_method(1, 2); Enum::static_method(-3, 4.5, 5); } fn zzz() {()}
main
identifier_name
smb2_ioctl.rs
/* Copyright (C) 2018 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ use nom::IResult; use log::*; use smb::smb::*; use smb::smb2::*; use smb::smb2_records::*; use smb::dcerpc::*; use smb::events::*; use smb::funcs::*; #[derive(Debug)] pub struct SMBTransactionIoctl { pub func: u32, } impl SMBTransactionIoctl { pub fn new(func: u32) -> SMBTransactionIoctl { return SMBTransactionIoctl { func: func, } } } impl SMBState { pub fn new_ioctl_tx(&mut self, hdr: SMBCommonHdr, func: u32) -> (&mut SMBTransaction)
} // IOCTL responses ASYNC don't set the tree id pub fn smb2_ioctl_request_record<'b>(state: &mut SMBState, r: &Smb2Record<'b>) { match parse_smb2_request_ioctl(r.data) { IResult::Done(_, rd) => { SCLogDebug!("IOCTL request data: {:?}", rd); let is_dcerpc = rd.is_pipe && match state.get_service_for_guid(&rd.guid) { (_, x) => x, }; let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); if is_dcerpc { SCLogDebug!("IOCTL request data is_pipe. Calling smb_write_dcerpc_record"); let vercmd = SMBVerCmdStat::new2(SMB2_COMMAND_IOCTL); smb_write_dcerpc_record(state, vercmd, hdr, rd.data); } else { SCLogDebug!("IOCTL {:08x} {}", rd.function, &fsctl_func_to_string(rd.function)); let tx = state.new_ioctl_tx(hdr, rd.function); tx.vercmd.set_smb2_cmd(SMB2_COMMAND_IOCTL); } }, _ => { let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); let tx = state.new_generic_tx(2, r.command, hdr); tx.set_event(SMBEvent::MalformedData); }, }; } // IOCTL responses ASYNC don't set the tree id pub fn smb2_ioctl_response_record<'b>(state: &mut SMBState, r: &Smb2Record<'b>) { match parse_smb2_response_ioctl(r.data) { IResult::Done(_, rd) => { SCLogDebug!("IOCTL response data: {:?}", rd); let is_dcerpc = rd.is_pipe && match state.get_service_for_guid(&rd.guid) { (_, x) => x, }; if is_dcerpc { SCLogDebug!("IOCTL response data is_pipe. Calling smb_read_dcerpc_record"); let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); let vercmd = SMBVerCmdStat::new2_with_ntstatus(SMB2_COMMAND_IOCTL, r.nt_status); SCLogDebug!("TODO passing empty GUID"); smb_read_dcerpc_record(state, vercmd, hdr, &[],rd.data); } else { let tx_key = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); SCLogDebug!("SMB2_COMMAND_IOCTL/SMB_NTSTATUS_PENDING looking for {:?}", tx_key); match state.get_generic_tx(2, SMB2_COMMAND_IOCTL, &tx_key) { Some(tx) => { tx.set_status(r.nt_status, false); if r.nt_status!= SMB_NTSTATUS_PENDING { tx.response_done = true; } }, None => { }, } } }, _ => { let tx_key = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); SCLogDebug!("SMB2_COMMAND_IOCTL/SMB_NTSTATUS_PENDING looking for {:?}", tx_key); match state.get_generic_tx(2, SMB2_COMMAND_IOCTL, &tx_key) { Some(tx) => { SCLogDebug!("updated status of tx {}", tx.id); tx.set_status(r.nt_status, false); if r.nt_status!= SMB_NTSTATUS_PENDING { tx.response_done = true; } // parsing failed for 'SUCCESS' record, set event if r.nt_status == SMB_NTSTATUS_SUCCESS { SCLogDebug!("parse fail {:?}", r); tx.set_event(SMBEvent::MalformedData); } }, _ => { }, } }, }; }
{ let mut tx = self.new_tx(); tx.hdr = hdr; tx.type_data = Some(SMBTransactionTypeData::IOCTL( SMBTransactionIoctl::new(func))); tx.request_done = true; tx.response_done = self.tc_trunc; // no response expected if tc is truncated SCLogDebug!("SMB: TX IOCTL created: ID {} FUNC {:08x}: {}", tx.id, func, &fsctl_func_to_string(func)); self.transactions.push(tx); let tx_ref = self.transactions.last_mut(); return tx_ref.unwrap(); }
identifier_body
smb2_ioctl.rs
/* Copyright (C) 2018 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ use nom::IResult; use log::*; use smb::smb::*; use smb::smb2::*; use smb::smb2_records::*; use smb::dcerpc::*; use smb::events::*; use smb::funcs::*; #[derive(Debug)] pub struct SMBTransactionIoctl { pub func: u32, } impl SMBTransactionIoctl { pub fn new(func: u32) -> SMBTransactionIoctl { return SMBTransactionIoctl { func: func, } } }
pub fn new_ioctl_tx(&mut self, hdr: SMBCommonHdr, func: u32) -> (&mut SMBTransaction) { let mut tx = self.new_tx(); tx.hdr = hdr; tx.type_data = Some(SMBTransactionTypeData::IOCTL( SMBTransactionIoctl::new(func))); tx.request_done = true; tx.response_done = self.tc_trunc; // no response expected if tc is truncated SCLogDebug!("SMB: TX IOCTL created: ID {} FUNC {:08x}: {}", tx.id, func, &fsctl_func_to_string(func)); self.transactions.push(tx); let tx_ref = self.transactions.last_mut(); return tx_ref.unwrap(); } } // IOCTL responses ASYNC don't set the tree id pub fn smb2_ioctl_request_record<'b>(state: &mut SMBState, r: &Smb2Record<'b>) { match parse_smb2_request_ioctl(r.data) { IResult::Done(_, rd) => { SCLogDebug!("IOCTL request data: {:?}", rd); let is_dcerpc = rd.is_pipe && match state.get_service_for_guid(&rd.guid) { (_, x) => x, }; let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); if is_dcerpc { SCLogDebug!("IOCTL request data is_pipe. Calling smb_write_dcerpc_record"); let vercmd = SMBVerCmdStat::new2(SMB2_COMMAND_IOCTL); smb_write_dcerpc_record(state, vercmd, hdr, rd.data); } else { SCLogDebug!("IOCTL {:08x} {}", rd.function, &fsctl_func_to_string(rd.function)); let tx = state.new_ioctl_tx(hdr, rd.function); tx.vercmd.set_smb2_cmd(SMB2_COMMAND_IOCTL); } }, _ => { let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); let tx = state.new_generic_tx(2, r.command, hdr); tx.set_event(SMBEvent::MalformedData); }, }; } // IOCTL responses ASYNC don't set the tree id pub fn smb2_ioctl_response_record<'b>(state: &mut SMBState, r: &Smb2Record<'b>) { match parse_smb2_response_ioctl(r.data) { IResult::Done(_, rd) => { SCLogDebug!("IOCTL response data: {:?}", rd); let is_dcerpc = rd.is_pipe && match state.get_service_for_guid(&rd.guid) { (_, x) => x, }; if is_dcerpc { SCLogDebug!("IOCTL response data is_pipe. Calling smb_read_dcerpc_record"); let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); let vercmd = SMBVerCmdStat::new2_with_ntstatus(SMB2_COMMAND_IOCTL, r.nt_status); SCLogDebug!("TODO passing empty GUID"); smb_read_dcerpc_record(state, vercmd, hdr, &[],rd.data); } else { let tx_key = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); SCLogDebug!("SMB2_COMMAND_IOCTL/SMB_NTSTATUS_PENDING looking for {:?}", tx_key); match state.get_generic_tx(2, SMB2_COMMAND_IOCTL, &tx_key) { Some(tx) => { tx.set_status(r.nt_status, false); if r.nt_status!= SMB_NTSTATUS_PENDING { tx.response_done = true; } }, None => { }, } } }, _ => { let tx_key = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); SCLogDebug!("SMB2_COMMAND_IOCTL/SMB_NTSTATUS_PENDING looking for {:?}", tx_key); match state.get_generic_tx(2, SMB2_COMMAND_IOCTL, &tx_key) { Some(tx) => { SCLogDebug!("updated status of tx {}", tx.id); tx.set_status(r.nt_status, false); if r.nt_status!= SMB_NTSTATUS_PENDING { tx.response_done = true; } // parsing failed for 'SUCCESS' record, set event if r.nt_status == SMB_NTSTATUS_SUCCESS { SCLogDebug!("parse fail {:?}", r); tx.set_event(SMBEvent::MalformedData); } }, _ => { }, } }, }; }
impl SMBState {
random_line_split
smb2_ioctl.rs
/* Copyright (C) 2018 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ use nom::IResult; use log::*; use smb::smb::*; use smb::smb2::*; use smb::smb2_records::*; use smb::dcerpc::*; use smb::events::*; use smb::funcs::*; #[derive(Debug)] pub struct SMBTransactionIoctl { pub func: u32, } impl SMBTransactionIoctl { pub fn new(func: u32) -> SMBTransactionIoctl { return SMBTransactionIoctl { func: func, } } } impl SMBState { pub fn new_ioctl_tx(&mut self, hdr: SMBCommonHdr, func: u32) -> (&mut SMBTransaction) { let mut tx = self.new_tx(); tx.hdr = hdr; tx.type_data = Some(SMBTransactionTypeData::IOCTL( SMBTransactionIoctl::new(func))); tx.request_done = true; tx.response_done = self.tc_trunc; // no response expected if tc is truncated SCLogDebug!("SMB: TX IOCTL created: ID {} FUNC {:08x}: {}", tx.id, func, &fsctl_func_to_string(func)); self.transactions.push(tx); let tx_ref = self.transactions.last_mut(); return tx_ref.unwrap(); } } // IOCTL responses ASYNC don't set the tree id pub fn smb2_ioctl_request_record<'b>(state: &mut SMBState, r: &Smb2Record<'b>) { match parse_smb2_request_ioctl(r.data) { IResult::Done(_, rd) => { SCLogDebug!("IOCTL request data: {:?}", rd); let is_dcerpc = rd.is_pipe && match state.get_service_for_guid(&rd.guid) { (_, x) => x, }; let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); if is_dcerpc { SCLogDebug!("IOCTL request data is_pipe. Calling smb_write_dcerpc_record"); let vercmd = SMBVerCmdStat::new2(SMB2_COMMAND_IOCTL); smb_write_dcerpc_record(state, vercmd, hdr, rd.data); } else { SCLogDebug!("IOCTL {:08x} {}", rd.function, &fsctl_func_to_string(rd.function)); let tx = state.new_ioctl_tx(hdr, rd.function); tx.vercmd.set_smb2_cmd(SMB2_COMMAND_IOCTL); } }, _ => { let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); let tx = state.new_generic_tx(2, r.command, hdr); tx.set_event(SMBEvent::MalformedData); }, }; } // IOCTL responses ASYNC don't set the tree id pub fn smb2_ioctl_response_record<'b>(state: &mut SMBState, r: &Smb2Record<'b>) { match parse_smb2_response_ioctl(r.data) { IResult::Done(_, rd) => { SCLogDebug!("IOCTL response data: {:?}", rd); let is_dcerpc = rd.is_pipe && match state.get_service_for_guid(&rd.guid) { (_, x) => x, }; if is_dcerpc { SCLogDebug!("IOCTL response data is_pipe. Calling smb_read_dcerpc_record"); let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); let vercmd = SMBVerCmdStat::new2_with_ntstatus(SMB2_COMMAND_IOCTL, r.nt_status); SCLogDebug!("TODO passing empty GUID"); smb_read_dcerpc_record(state, vercmd, hdr, &[],rd.data); } else { let tx_key = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); SCLogDebug!("SMB2_COMMAND_IOCTL/SMB_NTSTATUS_PENDING looking for {:?}", tx_key); match state.get_generic_tx(2, SMB2_COMMAND_IOCTL, &tx_key) { Some(tx) => { tx.set_status(r.nt_status, false); if r.nt_status!= SMB_NTSTATUS_PENDING { tx.response_done = true; } }, None =>
, } } }, _ => { let tx_key = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); SCLogDebug!("SMB2_COMMAND_IOCTL/SMB_NTSTATUS_PENDING looking for {:?}", tx_key); match state.get_generic_tx(2, SMB2_COMMAND_IOCTL, &tx_key) { Some(tx) => { SCLogDebug!("updated status of tx {}", tx.id); tx.set_status(r.nt_status, false); if r.nt_status!= SMB_NTSTATUS_PENDING { tx.response_done = true; } // parsing failed for 'SUCCESS' record, set event if r.nt_status == SMB_NTSTATUS_SUCCESS { SCLogDebug!("parse fail {:?}", r); tx.set_event(SMBEvent::MalformedData); } }, _ => { }, } }, }; }
{ }
conditional_block
smb2_ioctl.rs
/* Copyright (C) 2018 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ use nom::IResult; use log::*; use smb::smb::*; use smb::smb2::*; use smb::smb2_records::*; use smb::dcerpc::*; use smb::events::*; use smb::funcs::*; #[derive(Debug)] pub struct SMBTransactionIoctl { pub func: u32, } impl SMBTransactionIoctl { pub fn new(func: u32) -> SMBTransactionIoctl { return SMBTransactionIoctl { func: func, } } } impl SMBState { pub fn new_ioctl_tx(&mut self, hdr: SMBCommonHdr, func: u32) -> (&mut SMBTransaction) { let mut tx = self.new_tx(); tx.hdr = hdr; tx.type_data = Some(SMBTransactionTypeData::IOCTL( SMBTransactionIoctl::new(func))); tx.request_done = true; tx.response_done = self.tc_trunc; // no response expected if tc is truncated SCLogDebug!("SMB: TX IOCTL created: ID {} FUNC {:08x}: {}", tx.id, func, &fsctl_func_to_string(func)); self.transactions.push(tx); let tx_ref = self.transactions.last_mut(); return tx_ref.unwrap(); } } // IOCTL responses ASYNC don't set the tree id pub fn smb2_ioctl_request_record<'b>(state: &mut SMBState, r: &Smb2Record<'b>) { match parse_smb2_request_ioctl(r.data) { IResult::Done(_, rd) => { SCLogDebug!("IOCTL request data: {:?}", rd); let is_dcerpc = rd.is_pipe && match state.get_service_for_guid(&rd.guid) { (_, x) => x, }; let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); if is_dcerpc { SCLogDebug!("IOCTL request data is_pipe. Calling smb_write_dcerpc_record"); let vercmd = SMBVerCmdStat::new2(SMB2_COMMAND_IOCTL); smb_write_dcerpc_record(state, vercmd, hdr, rd.data); } else { SCLogDebug!("IOCTL {:08x} {}", rd.function, &fsctl_func_to_string(rd.function)); let tx = state.new_ioctl_tx(hdr, rd.function); tx.vercmd.set_smb2_cmd(SMB2_COMMAND_IOCTL); } }, _ => { let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); let tx = state.new_generic_tx(2, r.command, hdr); tx.set_event(SMBEvent::MalformedData); }, }; } // IOCTL responses ASYNC don't set the tree id pub fn
<'b>(state: &mut SMBState, r: &Smb2Record<'b>) { match parse_smb2_response_ioctl(r.data) { IResult::Done(_, rd) => { SCLogDebug!("IOCTL response data: {:?}", rd); let is_dcerpc = rd.is_pipe && match state.get_service_for_guid(&rd.guid) { (_, x) => x, }; if is_dcerpc { SCLogDebug!("IOCTL response data is_pipe. Calling smb_read_dcerpc_record"); let hdr = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); let vercmd = SMBVerCmdStat::new2_with_ntstatus(SMB2_COMMAND_IOCTL, r.nt_status); SCLogDebug!("TODO passing empty GUID"); smb_read_dcerpc_record(state, vercmd, hdr, &[],rd.data); } else { let tx_key = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); SCLogDebug!("SMB2_COMMAND_IOCTL/SMB_NTSTATUS_PENDING looking for {:?}", tx_key); match state.get_generic_tx(2, SMB2_COMMAND_IOCTL, &tx_key) { Some(tx) => { tx.set_status(r.nt_status, false); if r.nt_status!= SMB_NTSTATUS_PENDING { tx.response_done = true; } }, None => { }, } } }, _ => { let tx_key = SMBCommonHdr::new(SMBHDR_TYPE_HEADER, r.session_id, 0, r.message_id); SCLogDebug!("SMB2_COMMAND_IOCTL/SMB_NTSTATUS_PENDING looking for {:?}", tx_key); match state.get_generic_tx(2, SMB2_COMMAND_IOCTL, &tx_key) { Some(tx) => { SCLogDebug!("updated status of tx {}", tx.id); tx.set_status(r.nt_status, false); if r.nt_status!= SMB_NTSTATUS_PENDING { tx.response_done = true; } // parsing failed for 'SUCCESS' record, set event if r.nt_status == SMB_NTSTATUS_SUCCESS { SCLogDebug!("parse fail {:?}", r); tx.set_event(SMBEvent::MalformedData); } }, _ => { }, } }, }; }
smb2_ioctl_response_record
identifier_name
thread.rs
use crate::ffi::CStr; use crate::io; use crate::num::NonZeroUsize; use crate::sys::unsupported; use crate::time::Duration; pub struct Thread(!); pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> { unsupported() } pub fn yield_now() {} pub fn set_name(_name: &CStr) {} pub fn sleep(dur: Duration) { use crate::arch::wasm32; use crate::cmp; // Use an atomic wait to block the current thread artificially with a // timeout listed. Note that we should never be notified (return value // of 0) or our comparison should never fail (return value of 1) so we // should always only resume execution through a timeout (return value // 2). let mut nanos = dur.as_nanos(); while nanos > 0 { let amt = cmp::min(i64::MAX as u128, nanos); let mut x = 0; let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) }; debug_assert_eq!(val, 2); nanos -= amt; } } pub fn join(self) {} } pub fn available_parallelism() -> io::Result<NonZeroUsize> { unsupported() } pub mod guard { pub type Guard =!;
pub unsafe fn current() -> Option<Guard> { None } pub unsafe fn init() -> Option<Guard> { None } } // We currently just use our own thread-local to store our // current thread's ID, and then we lazily initialize it to something allocated // from a global counter. pub fn my_id() -> u32 { use crate::sync::atomic::{AtomicU32, Ordering::SeqCst}; static NEXT_ID: AtomicU32 = AtomicU32::new(0); #[thread_local] static mut MY_ID: u32 = 0; unsafe { // If our thread ID isn't set yet then we need to allocate one. Do so // with with a simple "atomically add to a global counter" strategy. // This strategy doesn't handled what happens when the counter // overflows, however, so just abort everything once the counter // overflows and eventually we could have some sort of recycling scheme // (or maybe this is all totally irrelevant by that point!). In any case // though we're using a CAS loop instead of a `fetch_add` to ensure that // the global counter never overflows. if MY_ID == 0 { let mut cur = NEXT_ID.load(SeqCst); MY_ID = loop { let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort()); match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) { Ok(_) => break next, Err(i) => cur = i, } }; } MY_ID } }
random_line_split
thread.rs
use crate::ffi::CStr; use crate::io; use crate::num::NonZeroUsize; use crate::sys::unsupported; use crate::time::Duration; pub struct Thread(!); pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> { unsupported() } pub fn
() {} pub fn set_name(_name: &CStr) {} pub fn sleep(dur: Duration) { use crate::arch::wasm32; use crate::cmp; // Use an atomic wait to block the current thread artificially with a // timeout listed. Note that we should never be notified (return value // of 0) or our comparison should never fail (return value of 1) so we // should always only resume execution through a timeout (return value // 2). let mut nanos = dur.as_nanos(); while nanos > 0 { let amt = cmp::min(i64::MAX as u128, nanos); let mut x = 0; let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) }; debug_assert_eq!(val, 2); nanos -= amt; } } pub fn join(self) {} } pub fn available_parallelism() -> io::Result<NonZeroUsize> { unsupported() } pub mod guard { pub type Guard =!; pub unsafe fn current() -> Option<Guard> { None } pub unsafe fn init() -> Option<Guard> { None } } // We currently just use our own thread-local to store our // current thread's ID, and then we lazily initialize it to something allocated // from a global counter. pub fn my_id() -> u32 { use crate::sync::atomic::{AtomicU32, Ordering::SeqCst}; static NEXT_ID: AtomicU32 = AtomicU32::new(0); #[thread_local] static mut MY_ID: u32 = 0; unsafe { // If our thread ID isn't set yet then we need to allocate one. Do so // with with a simple "atomically add to a global counter" strategy. // This strategy doesn't handled what happens when the counter // overflows, however, so just abort everything once the counter // overflows and eventually we could have some sort of recycling scheme // (or maybe this is all totally irrelevant by that point!). In any case // though we're using a CAS loop instead of a `fetch_add` to ensure that // the global counter never overflows. if MY_ID == 0 { let mut cur = NEXT_ID.load(SeqCst); MY_ID = loop { let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort()); match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) { Ok(_) => break next, Err(i) => cur = i, } }; } MY_ID } }
yield_now
identifier_name
thread.rs
use crate::ffi::CStr; use crate::io; use crate::num::NonZeroUsize; use crate::sys::unsupported; use crate::time::Duration; pub struct Thread(!); pub const DEFAULT_MIN_STACK_SIZE: usize = 4096; impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> { unsupported() } pub fn yield_now() {} pub fn set_name(_name: &CStr) {} pub fn sleep(dur: Duration)
pub fn join(self) {} } pub fn available_parallelism() -> io::Result<NonZeroUsize> { unsupported() } pub mod guard { pub type Guard =!; pub unsafe fn current() -> Option<Guard> { None } pub unsafe fn init() -> Option<Guard> { None } } // We currently just use our own thread-local to store our // current thread's ID, and then we lazily initialize it to something allocated // from a global counter. pub fn my_id() -> u32 { use crate::sync::atomic::{AtomicU32, Ordering::SeqCst}; static NEXT_ID: AtomicU32 = AtomicU32::new(0); #[thread_local] static mut MY_ID: u32 = 0; unsafe { // If our thread ID isn't set yet then we need to allocate one. Do so // with with a simple "atomically add to a global counter" strategy. // This strategy doesn't handled what happens when the counter // overflows, however, so just abort everything once the counter // overflows and eventually we could have some sort of recycling scheme // (or maybe this is all totally irrelevant by that point!). In any case // though we're using a CAS loop instead of a `fetch_add` to ensure that // the global counter never overflows. if MY_ID == 0 { let mut cur = NEXT_ID.load(SeqCst); MY_ID = loop { let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort()); match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) { Ok(_) => break next, Err(i) => cur = i, } }; } MY_ID } }
{ use crate::arch::wasm32; use crate::cmp; // Use an atomic wait to block the current thread artificially with a // timeout listed. Note that we should never be notified (return value // of 0) or our comparison should never fail (return value of 1) so we // should always only resume execution through a timeout (return value // 2). let mut nanos = dur.as_nanos(); while nanos > 0 { let amt = cmp::min(i64::MAX as u128, nanos); let mut x = 0; let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) }; debug_assert_eq!(val, 2); nanos -= amt; } }
identifier_body
fn_queries.rs
use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_middle::ty::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_span::symbol::Symbol; use rustc_target::spec::abi::Abi; /// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> { if tcx.is_const_fn_raw(def_id) { let const_stab = tcx.lookup_const_stability(def_id)?; if const_stab.level.is_unstable() { Some(const_stab.feature) } else { None } } else { None } } pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool { let parent_id = tcx.hir().get_parent_node(hir_id); matches!( tcx.hir().get(parent_id), hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const,.. }), .. }) ) } /// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether /// said intrinsic has a `rustc_const_{un,}stable` attribute. fn
(tcx: TyCtxt<'_>, def_id: DefId) -> bool { let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); let node = tcx.hir().get(hir_id); if let hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..),.. }) = node { // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other // foreign items cannot be evaluated at compile-time. if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = tcx.hir().get_foreign_abi(hir_id) { tcx.lookup_const_stability(def_id).is_some() } else { false } } else if let Some(fn_kind) = node.fn_kind() { if fn_kind.constness() == hir::Constness::Const { return true; } // If the function itself is not annotated with `const`, it may still be a `const fn` // if it resides in a const trait impl. is_parent_const_impl_raw(tcx, hir_id) } else { matches!(node, hir::Node::Ctor(_)) } } fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool { tcx.is_const_fn(def_id) && match tcx.lookup_const_stability(def_id) { Some(stab) => { if cfg!(debug_assertions) && stab.promotable { let sig = tcx.fn_sig(def_id); assert_eq!( sig.unsafety(), hir::Unsafety::Normal, "don't mark const unsafe fns as promotable", // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682 ); } stab.promotable } None => false, } } pub fn provide(providers: &mut Providers) { *providers = Providers { is_const_fn_raw, is_promotable_const_fn,..*providers }; }
is_const_fn_raw
identifier_name
fn_queries.rs
use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_middle::ty::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_span::symbol::Symbol; use rustc_target::spec::abi::Abi; /// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> { if tcx.is_const_fn_raw(def_id) { let const_stab = tcx.lookup_const_stability(def_id)?; if const_stab.level.is_unstable() { Some(const_stab.feature) } else
} else { None } } pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool { let parent_id = tcx.hir().get_parent_node(hir_id); matches!( tcx.hir().get(parent_id), hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const,.. }), .. }) ) } /// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether /// said intrinsic has a `rustc_const_{un,}stable` attribute. fn is_const_fn_raw(tcx: TyCtxt<'_>, def_id: DefId) -> bool { let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); let node = tcx.hir().get(hir_id); if let hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..),.. }) = node { // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other // foreign items cannot be evaluated at compile-time. if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = tcx.hir().get_foreign_abi(hir_id) { tcx.lookup_const_stability(def_id).is_some() } else { false } } else if let Some(fn_kind) = node.fn_kind() { if fn_kind.constness() == hir::Constness::Const { return true; } // If the function itself is not annotated with `const`, it may still be a `const fn` // if it resides in a const trait impl. is_parent_const_impl_raw(tcx, hir_id) } else { matches!(node, hir::Node::Ctor(_)) } } fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool { tcx.is_const_fn(def_id) && match tcx.lookup_const_stability(def_id) { Some(stab) => { if cfg!(debug_assertions) && stab.promotable { let sig = tcx.fn_sig(def_id); assert_eq!( sig.unsafety(), hir::Unsafety::Normal, "don't mark const unsafe fns as promotable", // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682 ); } stab.promotable } None => false, } } pub fn provide(providers: &mut Providers) { *providers = Providers { is_const_fn_raw, is_promotable_const_fn,..*providers }; }
{ None }
conditional_block
fn_queries.rs
use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_middle::ty::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_span::symbol::Symbol; use rustc_target::spec::abi::Abi; /// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> { if tcx.is_const_fn_raw(def_id) { let const_stab = tcx.lookup_const_stability(def_id)?; if const_stab.level.is_unstable() { Some(const_stab.feature) } else { None } } else { None } } pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool { let parent_id = tcx.hir().get_parent_node(hir_id); matches!( tcx.hir().get(parent_id), hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const,.. }), .. }) ) } /// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether /// said intrinsic has a `rustc_const_{un,}stable` attribute. fn is_const_fn_raw(tcx: TyCtxt<'_>, def_id: DefId) -> bool { let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); let node = tcx.hir().get(hir_id); if let hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..),.. }) = node { // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other // foreign items cannot be evaluated at compile-time. if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = tcx.hir().get_foreign_abi(hir_id) { tcx.lookup_const_stability(def_id).is_some() } else { false } } else if let Some(fn_kind) = node.fn_kind() { if fn_kind.constness() == hir::Constness::Const { return true; } // If the function itself is not annotated with `const`, it may still be a `const fn` // if it resides in a const trait impl. is_parent_const_impl_raw(tcx, hir_id) } else { matches!(node, hir::Node::Ctor(_)) } } fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool { tcx.is_const_fn(def_id) && match tcx.lookup_const_stability(def_id) { Some(stab) => { if cfg!(debug_assertions) && stab.promotable { let sig = tcx.fn_sig(def_id); assert_eq!( sig.unsafety(),
stab.promotable } None => false, } } pub fn provide(providers: &mut Providers) { *providers = Providers { is_const_fn_raw, is_promotable_const_fn,..*providers }; }
hir::Unsafety::Normal, "don't mark const unsafe fns as promotable", // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682 ); }
random_line_split
fn_queries.rs
use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_middle::ty::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_span::symbol::Symbol; use rustc_target::spec::abi::Abi; /// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> { if tcx.is_const_fn_raw(def_id) { let const_stab = tcx.lookup_const_stability(def_id)?; if const_stab.level.is_unstable() { Some(const_stab.feature) } else { None } } else { None } } pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool { let parent_id = tcx.hir().get_parent_node(hir_id); matches!( tcx.hir().get(parent_id), hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const,.. }), .. }) ) } /// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether /// said intrinsic has a `rustc_const_{un,}stable` attribute. fn is_const_fn_raw(tcx: TyCtxt<'_>, def_id: DefId) -> bool { let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); let node = tcx.hir().get(hir_id); if let hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..),.. }) = node { // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other // foreign items cannot be evaluated at compile-time. if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = tcx.hir().get_foreign_abi(hir_id) { tcx.lookup_const_stability(def_id).is_some() } else { false } } else if let Some(fn_kind) = node.fn_kind() { if fn_kind.constness() == hir::Constness::Const { return true; } // If the function itself is not annotated with `const`, it may still be a `const fn` // if it resides in a const trait impl. is_parent_const_impl_raw(tcx, hir_id) } else { matches!(node, hir::Node::Ctor(_)) } } fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool { tcx.is_const_fn(def_id) && match tcx.lookup_const_stability(def_id) { Some(stab) => { if cfg!(debug_assertions) && stab.promotable { let sig = tcx.fn_sig(def_id); assert_eq!( sig.unsafety(), hir::Unsafety::Normal, "don't mark const unsafe fns as promotable", // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682 ); } stab.promotable } None => false, } } pub fn provide(providers: &mut Providers)
{ *providers = Providers { is_const_fn_raw, is_promotable_const_fn, ..*providers }; }
identifier_body
clone.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The `Clone` trait for types that cannot be 'implicitly copied' //! //! In Rust, some simple types are "implicitly copyable" and when you //! assign them or pass them as arguments, the receiver will get a copy, //! leaving the original value in place. These types do not require //! allocation to copy and do not have finalizers (i.e. they do not //! contain owned boxes or implement `Drop`), so the compiler considers //! them cheap and safe to copy. For other types copies must be made //! explicitly, by convention implementing the `Clone` trait and calling //! the `clone` method. #![stable(feature = "rust1", since = "1.0.0")] use marker::Sized; /// A common trait for cloning an object. #[stable(feature = "rust1", since = "1.0.0")] pub trait Clone : Sized { /// Returns a copy of the value. #[stable(feature = "rust1", since = "1.0.0")] fn clone(&self) -> Self; /// Perform copy-assignment from `source`. /// /// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality, /// but can be overridden to reuse the resources of `a` to avoid unnecessary /// allocations. #[inline(always)] #[unstable(feature = "core", reason = "this function is rarely used")] fn clone_from(&mut self, source: &Self) { *self = source.clone() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T:?Sized> Clone for &'a T { /// Return a shallow copy of the reference. #[inline] fn clone(&self) -> &'a T
} macro_rules! clone_impl { ($t:ty) => { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for $t { /// Return a deep copy of the value. #[inline] fn clone(&self) -> $t { *self } } } } clone_impl! { int } clone_impl! { i8 } clone_impl! { i16 } clone_impl! { i32 } clone_impl! { i64 } clone_impl! { uint } clone_impl! { u8 } clone_impl! { u16 } clone_impl! { u32 } clone_impl! { u64 } clone_impl! { f32 } clone_impl! { f64 } clone_impl! { () } clone_impl! { bool } clone_impl! { char } macro_rules! extern_fn_clone { ($($A:ident),*) => ( #[unstable(feature = "core", reason = "this may not be sufficient for fns with region parameters")] impl<$($A,)* ReturnType> Clone for extern "Rust" fn($($A),*) -> ReturnType { /// Return a copy of a function pointer #[inline] fn clone(&self) -> extern "Rust" fn($($A),*) -> ReturnType { *self } } ) } extern_fn_clone! {} extern_fn_clone! { A } extern_fn_clone! { A, B } extern_fn_clone! { A, B, C } extern_fn_clone! { A, B, C, D } extern_fn_clone! { A, B, C, D, E } extern_fn_clone! { A, B, C, D, E, F } extern_fn_clone! { A, B, C, D, E, F, G } extern_fn_clone! { A, B, C, D, E, F, G, H }
{ *self }
identifier_body
clone.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The `Clone` trait for types that cannot be 'implicitly copied' //! //! In Rust, some simple types are "implicitly copyable" and when you //! assign them or pass them as arguments, the receiver will get a copy, //! leaving the original value in place. These types do not require //! allocation to copy and do not have finalizers (i.e. they do not //! contain owned boxes or implement `Drop`), so the compiler considers //! them cheap and safe to copy. For other types copies must be made //! explicitly, by convention implementing the `Clone` trait and calling //! the `clone` method. #![stable(feature = "rust1", since = "1.0.0")] use marker::Sized; /// A common trait for cloning an object. #[stable(feature = "rust1", since = "1.0.0")] pub trait Clone : Sized { /// Returns a copy of the value. #[stable(feature = "rust1", since = "1.0.0")] fn clone(&self) -> Self; /// Perform copy-assignment from `source`. /// /// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality, /// but can be overridden to reuse the resources of `a` to avoid unnecessary /// allocations. #[inline(always)] #[unstable(feature = "core", reason = "this function is rarely used")] fn clone_from(&mut self, source: &Self) { *self = source.clone() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T:?Sized> Clone for &'a T { /// Return a shallow copy of the reference. #[inline] fn clone(&self) -> &'a T { *self } } macro_rules! clone_impl { ($t:ty) => { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for $t { /// Return a deep copy of the value. #[inline] fn clone(&self) -> $t { *self } } } } clone_impl! { int } clone_impl! { i8 } clone_impl! { i16 } clone_impl! { i32 } clone_impl! { i64 } clone_impl! { uint } clone_impl! { u8 } clone_impl! { u16 } clone_impl! { u32 } clone_impl! { u64 } clone_impl! { f32 } clone_impl! { f64 }
macro_rules! extern_fn_clone { ($($A:ident),*) => ( #[unstable(feature = "core", reason = "this may not be sufficient for fns with region parameters")] impl<$($A,)* ReturnType> Clone for extern "Rust" fn($($A),*) -> ReturnType { /// Return a copy of a function pointer #[inline] fn clone(&self) -> extern "Rust" fn($($A),*) -> ReturnType { *self } } ) } extern_fn_clone! {} extern_fn_clone! { A } extern_fn_clone! { A, B } extern_fn_clone! { A, B, C } extern_fn_clone! { A, B, C, D } extern_fn_clone! { A, B, C, D, E } extern_fn_clone! { A, B, C, D, E, F } extern_fn_clone! { A, B, C, D, E, F, G } extern_fn_clone! { A, B, C, D, E, F, G, H }
clone_impl! { () } clone_impl! { bool } clone_impl! { char }
random_line_split
clone.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The `Clone` trait for types that cannot be 'implicitly copied' //! //! In Rust, some simple types are "implicitly copyable" and when you //! assign them or pass them as arguments, the receiver will get a copy, //! leaving the original value in place. These types do not require //! allocation to copy and do not have finalizers (i.e. they do not //! contain owned boxes or implement `Drop`), so the compiler considers //! them cheap and safe to copy. For other types copies must be made //! explicitly, by convention implementing the `Clone` trait and calling //! the `clone` method. #![stable(feature = "rust1", since = "1.0.0")] use marker::Sized; /// A common trait for cloning an object. #[stable(feature = "rust1", since = "1.0.0")] pub trait Clone : Sized { /// Returns a copy of the value. #[stable(feature = "rust1", since = "1.0.0")] fn clone(&self) -> Self; /// Perform copy-assignment from `source`. /// /// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality, /// but can be overridden to reuse the resources of `a` to avoid unnecessary /// allocations. #[inline(always)] #[unstable(feature = "core", reason = "this function is rarely used")] fn clone_from(&mut self, source: &Self) { *self = source.clone() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T:?Sized> Clone for &'a T { /// Return a shallow copy of the reference. #[inline] fn
(&self) -> &'a T { *self } } macro_rules! clone_impl { ($t:ty) => { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for $t { /// Return a deep copy of the value. #[inline] fn clone(&self) -> $t { *self } } } } clone_impl! { int } clone_impl! { i8 } clone_impl! { i16 } clone_impl! { i32 } clone_impl! { i64 } clone_impl! { uint } clone_impl! { u8 } clone_impl! { u16 } clone_impl! { u32 } clone_impl! { u64 } clone_impl! { f32 } clone_impl! { f64 } clone_impl! { () } clone_impl! { bool } clone_impl! { char } macro_rules! extern_fn_clone { ($($A:ident),*) => ( #[unstable(feature = "core", reason = "this may not be sufficient for fns with region parameters")] impl<$($A,)* ReturnType> Clone for extern "Rust" fn($($A),*) -> ReturnType { /// Return a copy of a function pointer #[inline] fn clone(&self) -> extern "Rust" fn($($A),*) -> ReturnType { *self } } ) } extern_fn_clone! {} extern_fn_clone! { A } extern_fn_clone! { A, B } extern_fn_clone! { A, B, C } extern_fn_clone! { A, B, C, D } extern_fn_clone! { A, B, C, D, E } extern_fn_clone! { A, B, C, D, E, F } extern_fn_clone! { A, B, C, D, E, F, G } extern_fn_clone! { A, B, C, D, E, F, G, H }
clone
identifier_name
lib.rs
#![crate_name = "librespot"]
#![cfg_attr(feature = "cargo-clippy", allow(unused_io_amount))] // TODO: many items from tokio-core::io have been deprecated in favour of tokio-io #![allow(deprecated)] #[macro_use] extern crate log; #[macro_use] extern crate serde_json; #[macro_use] extern crate serde_derive; extern crate base64; extern crate crypto; extern crate futures; extern crate hyper; extern crate mdns; extern crate num_bigint; extern crate protobuf; extern crate rand; extern crate tokio_core; extern crate url; pub extern crate librespot_audio as audio; pub extern crate librespot_core as core; pub extern crate librespot_protocol as protocol; pub extern crate librespot_metadata as metadata; #[cfg(feature = "alsa-backend")] extern crate alsa; #[cfg(feature = "portaudio-rs")] extern crate portaudio_rs; #[cfg(feature = "libpulse-sys")] extern crate libpulse_sys; pub mod audio_backend; pub mod discovery; pub mod keymaster; pub mod mixer; pub mod player; include!(concat!(env!("OUT_DIR"), "/lib.rs"));
random_line_split
spinner.rs
// This file is part of rgtk. // // rgtk is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // rgtk is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with rgtk. If not, see <http://www.gnu.org/licenses/>. //! Show a spinner animation use gtk::cast::GTK_SPINNER; use gtk::ffi; /// Spinner — Show a spinner animation struct_Widget!(Spinner) impl Spinner { pub fn new() -> Option<Spinner> { let tmp_pointer = unsafe { ffi::gtk_spinner_new() }; check_pointer!(tmp_pointer, Spinner) } pub fn start(&mut self) -> () { unsafe { ffi::gtk_spinner_start(GTK_SPINNER(self.pointer)) } } pub fn st
mut self) -> () { unsafe { ffi::gtk_spinner_stop(GTK_SPINNER(self.pointer)) } } } impl_drop!(Spinner) impl_TraitWidget!(Spinner) impl_widget_events!(Spinner)
op(&
identifier_name
spinner.rs
// This file is part of rgtk. // // rgtk is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version.
// but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with rgtk. If not, see <http://www.gnu.org/licenses/>. //! Show a spinner animation use gtk::cast::GTK_SPINNER; use gtk::ffi; /// Spinner — Show a spinner animation struct_Widget!(Spinner) impl Spinner { pub fn new() -> Option<Spinner> { let tmp_pointer = unsafe { ffi::gtk_spinner_new() }; check_pointer!(tmp_pointer, Spinner) } pub fn start(&mut self) -> () { unsafe { ffi::gtk_spinner_start(GTK_SPINNER(self.pointer)) } } pub fn stop(&mut self) -> () { unsafe { ffi::gtk_spinner_stop(GTK_SPINNER(self.pointer)) } } } impl_drop!(Spinner) impl_TraitWidget!(Spinner) impl_widget_events!(Spinner)
// // rgtk is distributed in the hope that it will be useful,
random_line_split
spinner.rs
// This file is part of rgtk. // // rgtk is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // rgtk is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with rgtk. If not, see <http://www.gnu.org/licenses/>. //! Show a spinner animation use gtk::cast::GTK_SPINNER; use gtk::ffi; /// Spinner — Show a spinner animation struct_Widget!(Spinner) impl Spinner { pub fn new() -> Option<Spinner> { let tmp_pointer = unsafe { ffi::gtk_spinner_new() }; check_pointer!(tmp_pointer, Spinner) } pub fn start(&mut self) -> () {
pub fn stop(&mut self) -> () { unsafe { ffi::gtk_spinner_stop(GTK_SPINNER(self.pointer)) } } } impl_drop!(Spinner) impl_TraitWidget!(Spinner) impl_widget_events!(Spinner)
unsafe { ffi::gtk_spinner_start(GTK_SPINNER(self.pointer)) } }
identifier_body
simd.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! SIMD vectors. //! //! These types can be used for accessing basic SIMD operations. Each of them //! implements the standard arithmetic operator traits (Add, Sub, Mul, Div, //! Rem, Shl, Shr) through compiler magic, rather than explicitly. Currently //! comparison operators are not implemented. To use SSE3+, you must enable //! the features, like `-C target-feature=sse3,sse4.1,sse4.2`, or a more //! specific `target-cpu`. No other SIMD intrinsics or high-level wrappers are //! provided beyond this module. //! //! ```rust //! #[allow(experimental)]; //! //! fn main() { //! use std::simd::f32x4; //! let a = f32x4(40.0, 41.0, 42.0, 43.0); //! let b = f32x4(1.0, 1.1, 3.4, 9.8); //! println!("{}", a + b); //! } //! ``` //! //! ## Stability Note //! //! These are all experimental. The interface may change entirely, without //! warning. #![allow(non_camel_case_types)] #![allow(missing_doc)] #[experimental] #[simd] #[deriving(Show)] pub struct i8x16(pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8);
#[simd] #[deriving(Show)] pub struct i16x8(pub i16, pub i16, pub i16, pub i16, pub i16, pub i16, pub i16, pub i16); #[experimental] #[simd] #[deriving(Show)] pub struct i32x4(pub i32, pub i32, pub i32, pub i32); #[experimental] #[simd] #[deriving(Show)] pub struct i64x2(pub i64, pub i64); #[experimental] #[simd] #[deriving(Show)] pub struct u8x16(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8); #[experimental] #[simd] #[deriving(Show)] pub struct u16x8(pub u16, pub u16, pub u16, pub u16, pub u16, pub u16, pub u16, pub u16); #[experimental] #[simd] #[deriving(Show)] pub struct u32x4(pub u32, pub u32, pub u32, pub u32); #[experimental] #[simd] #[deriving(Show)] pub struct u64x2(pub u64, pub u64); #[experimental] #[simd] #[deriving(Show)] pub struct f32x4(pub f32, pub f32, pub f32, pub f32); #[experimental] #[simd] #[deriving(Show)] pub struct f64x2(pub f64, pub f64);
#[experimental]
random_line_split
simd.rs
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! SIMD vectors. //! //! These types can be used for accessing basic SIMD operations. Each of them //! implements the standard arithmetic operator traits (Add, Sub, Mul, Div, //! Rem, Shl, Shr) through compiler magic, rather than explicitly. Currently //! comparison operators are not implemented. To use SSE3+, you must enable //! the features, like `-C target-feature=sse3,sse4.1,sse4.2`, or a more //! specific `target-cpu`. No other SIMD intrinsics or high-level wrappers are //! provided beyond this module. //! //! ```rust //! #[allow(experimental)]; //! //! fn main() { //! use std::simd::f32x4; //! let a = f32x4(40.0, 41.0, 42.0, 43.0); //! let b = f32x4(1.0, 1.1, 3.4, 9.8); //! println!("{}", a + b); //! } //! ``` //! //! ## Stability Note //! //! These are all experimental. The interface may change entirely, without //! warning. #![allow(non_camel_case_types)] #![allow(missing_doc)] #[experimental] #[simd] #[deriving(Show)] pub struct i8x16(pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8, pub i8); #[experimental] #[simd] #[deriving(Show)] pub struct i16x8(pub i16, pub i16, pub i16, pub i16, pub i16, pub i16, pub i16, pub i16); #[experimental] #[simd] #[deriving(Show)] pub struct i32x4(pub i32, pub i32, pub i32, pub i32); #[experimental] #[simd] #[deriving(Show)] pub struct i64x2(pub i64, pub i64); #[experimental] #[simd] #[deriving(Show)] pub struct
(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8, pub u8); #[experimental] #[simd] #[deriving(Show)] pub struct u16x8(pub u16, pub u16, pub u16, pub u16, pub u16, pub u16, pub u16, pub u16); #[experimental] #[simd] #[deriving(Show)] pub struct u32x4(pub u32, pub u32, pub u32, pub u32); #[experimental] #[simd] #[deriving(Show)] pub struct u64x2(pub u64, pub u64); #[experimental] #[simd] #[deriving(Show)] pub struct f32x4(pub f32, pub f32, pub f32, pub f32); #[experimental] #[simd] #[deriving(Show)] pub struct f64x2(pub f64, pub f64);
u8x16
identifier_name
sampler.rs
//! # OpenTelemetry ShouldSample Interface //! //! ## Sampling //! //! Sampling is a mechanism to control the noise and overhead introduced by //! OpenTelemetry by reducing the number of samples of traces collected and //! sent to the backend. //! //! Sampling may be implemented on different stages of a trace collection. //! OpenTelemetry SDK defines a `ShouldSample` interface that can be used at //! instrumentation points by libraries to check the sampling `SamplingDecision` //! early and optimize the amount of telemetry that needs to be collected. //! //! All other sampling algorithms may be implemented on SDK layer in exporters, //! or even out of process in Agent or Collector. //! //! The OpenTelemetry API has two properties responsible for the data collection: //! //! * `is_recording` method on a `Span`. If `true` the current `Span` records //! tracing events (attributes, events, status, etc.), otherwise all tracing //! events are dropped. Users can use this property to determine if expensive //! trace events can be avoided. `SpanProcessor`s will receive //! all spans with this flag set. However, `SpanExporter`s will //! not receive them unless the `Sampled` flag was set. //! * `Sampled` flag in `trace_flags` on `SpanContext`. This flag is propagated //! via the `SpanContext` to child Spans. For more details see the [W3C //! specification](https://w3c.github.io/trace-context/). This flag indicates //! that the `Span` has been `sampled` and will be exported. `SpanProcessor`s //! and `SpanExporter`s will receive spans with the `Sampled` flag set for //! processing. //! //! The flag combination `Sampled == false` and `is_recording` == true` means //! that the current `Span` does record information, but most likely the child //! `Span` will not. //! //! The flag combination `Sampled == true` and `is_recording == false` could //! cause gaps in the distributed trace, and because of this OpenTelemetry API //! MUST NOT allow this combination. use crate::InstrumentationLibrary; use opentelemetry_api::{ trace::{ Link, SamplingDecision, SamplingResult, SpanKind, TraceContextExt, TraceId, TraceState, }, Context, KeyValue, }; use std::convert::TryInto; /// The `ShouldSample` interface allows implementations to provide samplers /// which will return a sampling `SamplingResult` based on information that /// is typically available just before the `Span` was created. pub trait ShouldSample: Send + Sync + std::fmt::Debug { /// Returns the `SamplingDecision` for a `Span` to be created. #[allow(clippy::too_many_arguments)] fn should_sample( &self, parent_context: Option<&Context>, trace_id: TraceId, name: &str, span_kind: &SpanKind, attributes: &[KeyValue], links: &[Link], instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult; } /// Sampling options #[derive(Clone, Debug, PartialEq)] pub enum Sampler { /// Always sample the trace AlwaysOn, /// Never sample the trace AlwaysOff, /// Respects the parent span's sampling decision or delegates a delegate sampler for root spans. ParentBased(Box<Sampler>), /// Sample a given fraction of traces. Fractions >= 1 will always sample. If the parent span is /// sampled, then it's child spans will automatically be sampled. Fractions < 0 are treated as /// zero, but spans may still be sampled if their parent is. TraceIdRatioBased(f64), } impl ShouldSample for Sampler { fn
( &self, parent_context: Option<&Context>, trace_id: TraceId, name: &str, span_kind: &SpanKind, attributes: &[KeyValue], links: &[Link], instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult { let decision = match self { // Always sample the trace Sampler::AlwaysOn => SamplingDecision::RecordAndSample, // Never sample the trace Sampler::AlwaysOff => SamplingDecision::Drop, // The parent decision if sampled; otherwise the decision of delegate_sampler Sampler::ParentBased(delegate_sampler) => { parent_context.filter(|cx| cx.has_active_span()).map_or( delegate_sampler .should_sample( parent_context, trace_id, name, span_kind, attributes, links, instrumentation_library, ) .decision, |ctx| { let span = ctx.span(); let parent_span_context = span.span_context(); if parent_span_context.is_sampled() { SamplingDecision::RecordAndSample } else { SamplingDecision::Drop } }, ) } // Probabilistically sample the trace. Sampler::TraceIdRatioBased(prob) => { if *prob >= 1.0 { SamplingDecision::RecordAndSample } else { let prob_upper_bound = (prob.max(0.0) * (1u64 << 63) as f64) as u64; // TODO: update behavior when the spec definition resolves // https://github.com/open-telemetry/opentelemetry-specification/issues/1413 let bytes = trace_id.to_bytes(); let (_, low) = bytes.split_at(8); let trace_id_low = u64::from_be_bytes(low.try_into().unwrap()); let rnd_from_trace_id = trace_id_low >> 1; if rnd_from_trace_id < prob_upper_bound { SamplingDecision::RecordAndSample } else { SamplingDecision::Drop } } } }; SamplingResult { decision, // No extra attributes ever set by the SDK samplers. attributes: Vec::new(), // all sampler in SDK will not modify trace state. trace_state: match parent_context { Some(ctx) => ctx.span().span_context().trace_state().clone(), None => TraceState::default(), }, } } } #[cfg(all(test, feature = "testing", feature = "trace"))] mod tests { use super::*; use crate::testing::trace::TestSpan; use crate::trace::{Sampler, ShouldSample}; use opentelemetry_api::trace::{SamplingDecision, SpanContext, SpanId, TraceFlags, TraceState}; use rand::Rng; #[rustfmt::skip] fn sampler_data() -> Vec<(&'static str, Sampler, f64, bool, bool)> { vec![ // Span w/o a parent ("never_sample", Sampler::AlwaysOff, 0.0, false, false), ("always_sample", Sampler::AlwaysOn, 1.0, false, false), ("ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, false, false), ("ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, false, false), ("ratio_.50", Sampler::TraceIdRatioBased(0.50), 0.5, false, false), ("ratio_.75", Sampler::TraceIdRatioBased(0.75), 0.75, false, false), ("ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, false, false), // Spans w/o a parent delegate ("delegate_to_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 1.0, false, false), ("delegate_to_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 0.0, false, false), ("delegate_to_ratio_-1", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(-1.0))), 0.0, false, false), ("delegate_to_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 0.25, false, false), ("delegate_to_ratio_.50", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.50))), 0.50, false, false), ("delegate_to_ratio_.75", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.75))), 0.75, false, false), ("delegate_to_ratio_2.0", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(2.0))), 1.0, false, false), // Spans with a parent that is *not* sampled act like spans w/o a parent ("unsampled_parent_with_ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, true, false), ("unsampled_parent_with_ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, true, false), ("unsampled_parent_with_ratio_.50", Sampler::TraceIdRatioBased(0.50), 0.5, true, false), ("unsampled_parent_with_ratio_.75", Sampler::TraceIdRatioBased(0.75), 0.75, true, false), ("unsampled_parent_with_ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, true, false), ("unsampled_parent_or_else_with_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 0.0, true, false), ("unsampled_parent_or_else_with_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 0.0, true, false), ("unsampled_parent_or_else_with_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 0.0, true, false), // A ratio sampler with a parent that is sampled will ignore the parent ("sampled_parent_with_ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, true, true), ("sampled_parent_with_ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, true, true), ("sampled_parent_with_ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, true, true), // Spans with a parent that is sampled, will always sample, regardless of the delegate sampler ("sampled_parent_or_else_with_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 1.0, true, true), ("sampled_parent_or_else_with_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 1.0, true, true), ("sampled_parent_or_else_with_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 1.0, true, true), // Spans with a sampled parent, but when using the NeverSample Sampler, aren't sampled ("sampled_parent_span_with_never_sample", Sampler::AlwaysOff, 0.0, true, true), ] } #[test] fn sampling() { let total = 10_000; let mut rng = rand::thread_rng(); for (name, sampler, expectation, parent, sample_parent) in sampler_data() { let mut sampled = 0; for _ in 0..total { let parent_context = if parent { let trace_flags = if sample_parent { TraceFlags::SAMPLED } else { TraceFlags::default() }; let span_context = SpanContext::new( TraceId::from_u128(1), SpanId::from_u64(1), trace_flags, false, TraceState::default(), ); Some(Context::current_with_span(TestSpan(span_context))) } else { None }; let trace_id = TraceId::from(rng.gen::<[u8; 16]>()); if sampler .should_sample( parent_context.as_ref(), trace_id, name, &SpanKind::Internal, &[], &[], &InstrumentationLibrary::default(), ) .decision == SamplingDecision::RecordAndSample { sampled += 1; } } let mut tolerance = 0.0; let got = sampled as f64 / total as f64; if expectation > 0.0 && expectation < 1.0 { // See https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval let z = 4.75342; // This should succeed 99.9999% of the time tolerance = z * (got * (1.0 - got) / total as f64).sqrt(); } let diff = (got - expectation).abs(); assert!( diff <= tolerance, "{} got {:?} (diff: {}), expected {} (w/tolerance: {})", name, got, diff, expectation, tolerance ); } } #[test] fn filter_parent_sampler_for_active_spans() { let sampler = Sampler::ParentBased(Box::new(Sampler::AlwaysOn)); let cx = Context::current_with_value("some_value"); let instrumentation_library = InstrumentationLibrary::default(); let result = sampler.should_sample( Some(&cx), TraceId::from_u128(1), "should sample", &SpanKind::Internal, &[], &[], &instrumentation_library, ); assert_eq!(result.decision, SamplingDecision::RecordAndSample); } }
should_sample
identifier_name
sampler.rs
//! # OpenTelemetry ShouldSample Interface //! //! ## Sampling //! //! Sampling is a mechanism to control the noise and overhead introduced by //! OpenTelemetry by reducing the number of samples of traces collected and //! sent to the backend. //! //! Sampling may be implemented on different stages of a trace collection. //! OpenTelemetry SDK defines a `ShouldSample` interface that can be used at //! instrumentation points by libraries to check the sampling `SamplingDecision` //! early and optimize the amount of telemetry that needs to be collected. //! //! All other sampling algorithms may be implemented on SDK layer in exporters, //! or even out of process in Agent or Collector. //! //! The OpenTelemetry API has two properties responsible for the data collection: //! //! * `is_recording` method on a `Span`. If `true` the current `Span` records //! tracing events (attributes, events, status, etc.), otherwise all tracing //! events are dropped. Users can use this property to determine if expensive //! trace events can be avoided. `SpanProcessor`s will receive //! all spans with this flag set. However, `SpanExporter`s will //! not receive them unless the `Sampled` flag was set. //! * `Sampled` flag in `trace_flags` on `SpanContext`. This flag is propagated //! via the `SpanContext` to child Spans. For more details see the [W3C //! specification](https://w3c.github.io/trace-context/). This flag indicates //! that the `Span` has been `sampled` and will be exported. `SpanProcessor`s //! and `SpanExporter`s will receive spans with the `Sampled` flag set for //! processing. //! //! The flag combination `Sampled == false` and `is_recording` == true` means //! that the current `Span` does record information, but most likely the child //! `Span` will not. //! //! The flag combination `Sampled == true` and `is_recording == false` could //! cause gaps in the distributed trace, and because of this OpenTelemetry API //! MUST NOT allow this combination. use crate::InstrumentationLibrary; use opentelemetry_api::{ trace::{ Link, SamplingDecision, SamplingResult, SpanKind, TraceContextExt, TraceId, TraceState, }, Context, KeyValue, }; use std::convert::TryInto; /// The `ShouldSample` interface allows implementations to provide samplers /// which will return a sampling `SamplingResult` based on information that /// is typically available just before the `Span` was created. pub trait ShouldSample: Send + Sync + std::fmt::Debug { /// Returns the `SamplingDecision` for a `Span` to be created. #[allow(clippy::too_many_arguments)] fn should_sample( &self, parent_context: Option<&Context>, trace_id: TraceId, name: &str, span_kind: &SpanKind, attributes: &[KeyValue], links: &[Link], instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult; } /// Sampling options #[derive(Clone, Debug, PartialEq)] pub enum Sampler { /// Always sample the trace AlwaysOn, /// Never sample the trace AlwaysOff, /// Respects the parent span's sampling decision or delegates a delegate sampler for root spans. ParentBased(Box<Sampler>), /// Sample a given fraction of traces. Fractions >= 1 will always sample. If the parent span is /// sampled, then it's child spans will automatically be sampled. Fractions < 0 are treated as /// zero, but spans may still be sampled if their parent is. TraceIdRatioBased(f64), } impl ShouldSample for Sampler { fn should_sample( &self, parent_context: Option<&Context>, trace_id: TraceId, name: &str, span_kind: &SpanKind, attributes: &[KeyValue], links: &[Link], instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult { let decision = match self { // Always sample the trace Sampler::AlwaysOn => SamplingDecision::RecordAndSample, // Never sample the trace Sampler::AlwaysOff => SamplingDecision::Drop, // The parent decision if sampled; otherwise the decision of delegate_sampler Sampler::ParentBased(delegate_sampler) => { parent_context.filter(|cx| cx.has_active_span()).map_or( delegate_sampler .should_sample( parent_context, trace_id, name, span_kind, attributes, links, instrumentation_library, ) .decision, |ctx| { let span = ctx.span(); let parent_span_context = span.span_context(); if parent_span_context.is_sampled()
else { SamplingDecision::Drop } }, ) } // Probabilistically sample the trace. Sampler::TraceIdRatioBased(prob) => { if *prob >= 1.0 { SamplingDecision::RecordAndSample } else { let prob_upper_bound = (prob.max(0.0) * (1u64 << 63) as f64) as u64; // TODO: update behavior when the spec definition resolves // https://github.com/open-telemetry/opentelemetry-specification/issues/1413 let bytes = trace_id.to_bytes(); let (_, low) = bytes.split_at(8); let trace_id_low = u64::from_be_bytes(low.try_into().unwrap()); let rnd_from_trace_id = trace_id_low >> 1; if rnd_from_trace_id < prob_upper_bound { SamplingDecision::RecordAndSample } else { SamplingDecision::Drop } } } }; SamplingResult { decision, // No extra attributes ever set by the SDK samplers. attributes: Vec::new(), // all sampler in SDK will not modify trace state. trace_state: match parent_context { Some(ctx) => ctx.span().span_context().trace_state().clone(), None => TraceState::default(), }, } } } #[cfg(all(test, feature = "testing", feature = "trace"))] mod tests { use super::*; use crate::testing::trace::TestSpan; use crate::trace::{Sampler, ShouldSample}; use opentelemetry_api::trace::{SamplingDecision, SpanContext, SpanId, TraceFlags, TraceState}; use rand::Rng; #[rustfmt::skip] fn sampler_data() -> Vec<(&'static str, Sampler, f64, bool, bool)> { vec![ // Span w/o a parent ("never_sample", Sampler::AlwaysOff, 0.0, false, false), ("always_sample", Sampler::AlwaysOn, 1.0, false, false), ("ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, false, false), ("ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, false, false), ("ratio_.50", Sampler::TraceIdRatioBased(0.50), 0.5, false, false), ("ratio_.75", Sampler::TraceIdRatioBased(0.75), 0.75, false, false), ("ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, false, false), // Spans w/o a parent delegate ("delegate_to_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 1.0, false, false), ("delegate_to_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 0.0, false, false), ("delegate_to_ratio_-1", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(-1.0))), 0.0, false, false), ("delegate_to_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 0.25, false, false), ("delegate_to_ratio_.50", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.50))), 0.50, false, false), ("delegate_to_ratio_.75", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.75))), 0.75, false, false), ("delegate_to_ratio_2.0", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(2.0))), 1.0, false, false), // Spans with a parent that is *not* sampled act like spans w/o a parent ("unsampled_parent_with_ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, true, false), ("unsampled_parent_with_ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, true, false), ("unsampled_parent_with_ratio_.50", Sampler::TraceIdRatioBased(0.50), 0.5, true, false), ("unsampled_parent_with_ratio_.75", Sampler::TraceIdRatioBased(0.75), 0.75, true, false), ("unsampled_parent_with_ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, true, false), ("unsampled_parent_or_else_with_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 0.0, true, false), ("unsampled_parent_or_else_with_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 0.0, true, false), ("unsampled_parent_or_else_with_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 0.0, true, false), // A ratio sampler with a parent that is sampled will ignore the parent ("sampled_parent_with_ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, true, true), ("sampled_parent_with_ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, true, true), ("sampled_parent_with_ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, true, true), // Spans with a parent that is sampled, will always sample, regardless of the delegate sampler ("sampled_parent_or_else_with_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 1.0, true, true), ("sampled_parent_or_else_with_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 1.0, true, true), ("sampled_parent_or_else_with_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 1.0, true, true), // Spans with a sampled parent, but when using the NeverSample Sampler, aren't sampled ("sampled_parent_span_with_never_sample", Sampler::AlwaysOff, 0.0, true, true), ] } #[test] fn sampling() { let total = 10_000; let mut rng = rand::thread_rng(); for (name, sampler, expectation, parent, sample_parent) in sampler_data() { let mut sampled = 0; for _ in 0..total { let parent_context = if parent { let trace_flags = if sample_parent { TraceFlags::SAMPLED } else { TraceFlags::default() }; let span_context = SpanContext::new( TraceId::from_u128(1), SpanId::from_u64(1), trace_flags, false, TraceState::default(), ); Some(Context::current_with_span(TestSpan(span_context))) } else { None }; let trace_id = TraceId::from(rng.gen::<[u8; 16]>()); if sampler .should_sample( parent_context.as_ref(), trace_id, name, &SpanKind::Internal, &[], &[], &InstrumentationLibrary::default(), ) .decision == SamplingDecision::RecordAndSample { sampled += 1; } } let mut tolerance = 0.0; let got = sampled as f64 / total as f64; if expectation > 0.0 && expectation < 1.0 { // See https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval let z = 4.75342; // This should succeed 99.9999% of the time tolerance = z * (got * (1.0 - got) / total as f64).sqrt(); } let diff = (got - expectation).abs(); assert!( diff <= tolerance, "{} got {:?} (diff: {}), expected {} (w/tolerance: {})", name, got, diff, expectation, tolerance ); } } #[test] fn filter_parent_sampler_for_active_spans() { let sampler = Sampler::ParentBased(Box::new(Sampler::AlwaysOn)); let cx = Context::current_with_value("some_value"); let instrumentation_library = InstrumentationLibrary::default(); let result = sampler.should_sample( Some(&cx), TraceId::from_u128(1), "should sample", &SpanKind::Internal, &[], &[], &instrumentation_library, ); assert_eq!(result.decision, SamplingDecision::RecordAndSample); } }
{ SamplingDecision::RecordAndSample }
conditional_block
sampler.rs
//! # OpenTelemetry ShouldSample Interface //! //! ## Sampling //! //! Sampling is a mechanism to control the noise and overhead introduced by //! OpenTelemetry by reducing the number of samples of traces collected and //! sent to the backend. //! //! Sampling may be implemented on different stages of a trace collection. //! OpenTelemetry SDK defines a `ShouldSample` interface that can be used at //! instrumentation points by libraries to check the sampling `SamplingDecision` //! early and optimize the amount of telemetry that needs to be collected. //! //! All other sampling algorithms may be implemented on SDK layer in exporters, //! or even out of process in Agent or Collector. //! //! The OpenTelemetry API has two properties responsible for the data collection: //! //! * `is_recording` method on a `Span`. If `true` the current `Span` records //! tracing events (attributes, events, status, etc.), otherwise all tracing //! events are dropped. Users can use this property to determine if expensive //! trace events can be avoided. `SpanProcessor`s will receive //! all spans with this flag set. However, `SpanExporter`s will //! not receive them unless the `Sampled` flag was set. //! * `Sampled` flag in `trace_flags` on `SpanContext`. This flag is propagated //! via the `SpanContext` to child Spans. For more details see the [W3C //! specification](https://w3c.github.io/trace-context/). This flag indicates //! that the `Span` has been `sampled` and will be exported. `SpanProcessor`s //! and `SpanExporter`s will receive spans with the `Sampled` flag set for //! processing. //! //! The flag combination `Sampled == false` and `is_recording` == true` means //! that the current `Span` does record information, but most likely the child //! `Span` will not. //! //! The flag combination `Sampled == true` and `is_recording == false` could //! cause gaps in the distributed trace, and because of this OpenTelemetry API //! MUST NOT allow this combination. use crate::InstrumentationLibrary; use opentelemetry_api::{ trace::{ Link, SamplingDecision, SamplingResult, SpanKind, TraceContextExt, TraceId, TraceState, }, Context, KeyValue, }; use std::convert::TryInto; /// The `ShouldSample` interface allows implementations to provide samplers /// which will return a sampling `SamplingResult` based on information that /// is typically available just before the `Span` was created. pub trait ShouldSample: Send + Sync + std::fmt::Debug { /// Returns the `SamplingDecision` for a `Span` to be created. #[allow(clippy::too_many_arguments)] fn should_sample( &self, parent_context: Option<&Context>, trace_id: TraceId, name: &str, span_kind: &SpanKind, attributes: &[KeyValue], links: &[Link], instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult; } /// Sampling options #[derive(Clone, Debug, PartialEq)] pub enum Sampler { /// Always sample the trace AlwaysOn, /// Never sample the trace AlwaysOff,
/// zero, but spans may still be sampled if their parent is. TraceIdRatioBased(f64), } impl ShouldSample for Sampler { fn should_sample( &self, parent_context: Option<&Context>, trace_id: TraceId, name: &str, span_kind: &SpanKind, attributes: &[KeyValue], links: &[Link], instrumentation_library: &InstrumentationLibrary, ) -> SamplingResult { let decision = match self { // Always sample the trace Sampler::AlwaysOn => SamplingDecision::RecordAndSample, // Never sample the trace Sampler::AlwaysOff => SamplingDecision::Drop, // The parent decision if sampled; otherwise the decision of delegate_sampler Sampler::ParentBased(delegate_sampler) => { parent_context.filter(|cx| cx.has_active_span()).map_or( delegate_sampler .should_sample( parent_context, trace_id, name, span_kind, attributes, links, instrumentation_library, ) .decision, |ctx| { let span = ctx.span(); let parent_span_context = span.span_context(); if parent_span_context.is_sampled() { SamplingDecision::RecordAndSample } else { SamplingDecision::Drop } }, ) } // Probabilistically sample the trace. Sampler::TraceIdRatioBased(prob) => { if *prob >= 1.0 { SamplingDecision::RecordAndSample } else { let prob_upper_bound = (prob.max(0.0) * (1u64 << 63) as f64) as u64; // TODO: update behavior when the spec definition resolves // https://github.com/open-telemetry/opentelemetry-specification/issues/1413 let bytes = trace_id.to_bytes(); let (_, low) = bytes.split_at(8); let trace_id_low = u64::from_be_bytes(low.try_into().unwrap()); let rnd_from_trace_id = trace_id_low >> 1; if rnd_from_trace_id < prob_upper_bound { SamplingDecision::RecordAndSample } else { SamplingDecision::Drop } } } }; SamplingResult { decision, // No extra attributes ever set by the SDK samplers. attributes: Vec::new(), // all sampler in SDK will not modify trace state. trace_state: match parent_context { Some(ctx) => ctx.span().span_context().trace_state().clone(), None => TraceState::default(), }, } } } #[cfg(all(test, feature = "testing", feature = "trace"))] mod tests { use super::*; use crate::testing::trace::TestSpan; use crate::trace::{Sampler, ShouldSample}; use opentelemetry_api::trace::{SamplingDecision, SpanContext, SpanId, TraceFlags, TraceState}; use rand::Rng; #[rustfmt::skip] fn sampler_data() -> Vec<(&'static str, Sampler, f64, bool, bool)> { vec![ // Span w/o a parent ("never_sample", Sampler::AlwaysOff, 0.0, false, false), ("always_sample", Sampler::AlwaysOn, 1.0, false, false), ("ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, false, false), ("ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, false, false), ("ratio_.50", Sampler::TraceIdRatioBased(0.50), 0.5, false, false), ("ratio_.75", Sampler::TraceIdRatioBased(0.75), 0.75, false, false), ("ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, false, false), // Spans w/o a parent delegate ("delegate_to_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 1.0, false, false), ("delegate_to_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 0.0, false, false), ("delegate_to_ratio_-1", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(-1.0))), 0.0, false, false), ("delegate_to_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 0.25, false, false), ("delegate_to_ratio_.50", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.50))), 0.50, false, false), ("delegate_to_ratio_.75", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.75))), 0.75, false, false), ("delegate_to_ratio_2.0", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(2.0))), 1.0, false, false), // Spans with a parent that is *not* sampled act like spans w/o a parent ("unsampled_parent_with_ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, true, false), ("unsampled_parent_with_ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, true, false), ("unsampled_parent_with_ratio_.50", Sampler::TraceIdRatioBased(0.50), 0.5, true, false), ("unsampled_parent_with_ratio_.75", Sampler::TraceIdRatioBased(0.75), 0.75, true, false), ("unsampled_parent_with_ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, true, false), ("unsampled_parent_or_else_with_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 0.0, true, false), ("unsampled_parent_or_else_with_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 0.0, true, false), ("unsampled_parent_or_else_with_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 0.0, true, false), // A ratio sampler with a parent that is sampled will ignore the parent ("sampled_parent_with_ratio_-1", Sampler::TraceIdRatioBased(-1.0), 0.0, true, true), ("sampled_parent_with_ratio_.25", Sampler::TraceIdRatioBased(0.25), 0.25, true, true), ("sampled_parent_with_ratio_2.0", Sampler::TraceIdRatioBased(2.0), 1.0, true, true), // Spans with a parent that is sampled, will always sample, regardless of the delegate sampler ("sampled_parent_or_else_with_always_on", Sampler::ParentBased(Box::new(Sampler::AlwaysOn)), 1.0, true, true), ("sampled_parent_or_else_with_always_off", Sampler::ParentBased(Box::new(Sampler::AlwaysOff)), 1.0, true, true), ("sampled_parent_or_else_with_ratio_.25", Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(0.25))), 1.0, true, true), // Spans with a sampled parent, but when using the NeverSample Sampler, aren't sampled ("sampled_parent_span_with_never_sample", Sampler::AlwaysOff, 0.0, true, true), ] } #[test] fn sampling() { let total = 10_000; let mut rng = rand::thread_rng(); for (name, sampler, expectation, parent, sample_parent) in sampler_data() { let mut sampled = 0; for _ in 0..total { let parent_context = if parent { let trace_flags = if sample_parent { TraceFlags::SAMPLED } else { TraceFlags::default() }; let span_context = SpanContext::new( TraceId::from_u128(1), SpanId::from_u64(1), trace_flags, false, TraceState::default(), ); Some(Context::current_with_span(TestSpan(span_context))) } else { None }; let trace_id = TraceId::from(rng.gen::<[u8; 16]>()); if sampler .should_sample( parent_context.as_ref(), trace_id, name, &SpanKind::Internal, &[], &[], &InstrumentationLibrary::default(), ) .decision == SamplingDecision::RecordAndSample { sampled += 1; } } let mut tolerance = 0.0; let got = sampled as f64 / total as f64; if expectation > 0.0 && expectation < 1.0 { // See https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval let z = 4.75342; // This should succeed 99.9999% of the time tolerance = z * (got * (1.0 - got) / total as f64).sqrt(); } let diff = (got - expectation).abs(); assert!( diff <= tolerance, "{} got {:?} (diff: {}), expected {} (w/tolerance: {})", name, got, diff, expectation, tolerance ); } } #[test] fn filter_parent_sampler_for_active_spans() { let sampler = Sampler::ParentBased(Box::new(Sampler::AlwaysOn)); let cx = Context::current_with_value("some_value"); let instrumentation_library = InstrumentationLibrary::default(); let result = sampler.should_sample( Some(&cx), TraceId::from_u128(1), "should sample", &SpanKind::Internal, &[], &[], &instrumentation_library, ); assert_eq!(result.decision, SamplingDecision::RecordAndSample); } }
/// Respects the parent span's sampling decision or delegates a delegate sampler for root spans. ParentBased(Box<Sampler>), /// Sample a given fraction of traces. Fractions >= 1 will always sample. If the parent span is /// sampled, then it's child spans will automatically be sampled. Fractions < 0 are treated as
random_line_split
const-adt-align-mismatch.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at
// option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass #![allow(dead_code)] #![allow(deprecated)] use std::mem; #[derive(PartialEq, Debug)] enum Foo { A(u32), Bar([u16; 4]), C } // NOTE(eddyb) Don't make this a const, needs to be a static // so it is always instantiated as a LLVM constant value. static FOO: Foo = Foo::C; fn main() { assert_eq!(FOO, Foo::C); assert_eq!(mem::size_of::<Foo>(), 12); assert_eq!(mem::min_align_of::<Foo>(), 4); }
// http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
random_line_split
const-adt-align-mismatch.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass #![allow(dead_code)] #![allow(deprecated)] use std::mem; #[derive(PartialEq, Debug)] enum Foo { A(u32), Bar([u16; 4]), C } // NOTE(eddyb) Don't make this a const, needs to be a static // so it is always instantiated as a LLVM constant value. static FOO: Foo = Foo::C; fn main()
{ assert_eq!(FOO, Foo::C); assert_eq!(mem::size_of::<Foo>(), 12); assert_eq!(mem::min_align_of::<Foo>(), 4); }
identifier_body
const-adt-align-mismatch.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass #![allow(dead_code)] #![allow(deprecated)] use std::mem; #[derive(PartialEq, Debug)] enum Foo { A(u32), Bar([u16; 4]), C } // NOTE(eddyb) Don't make this a const, needs to be a static // so it is always instantiated as a LLVM constant value. static FOO: Foo = Foo::C; fn
() { assert_eq!(FOO, Foo::C); assert_eq!(mem::size_of::<Foo>(), 12); assert_eq!(mem::min_align_of::<Foo>(), 4); }
main
identifier_name
truncate.rs
#![allow(unstable)] use std::old_io as io; use std::old_io::process::Command; static PROGNAME: &'static str = "./truncate"; static TFILE1: &'static str = "truncate_test_1"; static TFILE2: &'static str = "truncate_test_2"; fn
(name: &str) -> io::File { match io::File::create(&Path::new(name)) { Ok(f) => f, Err(_) => panic!() } } #[test] fn test_increase_file_size() { let mut file = make_file(TFILE1); if!Command::new(PROGNAME).args(&["-s", "+5K", TFILE1]).status().unwrap().success() { panic!(); } file.seek(0, io::SeekEnd).unwrap(); if file.tell().unwrap()!= 5 * 1024 { panic!(); } io::fs::unlink(&Path::new(TFILE1)).unwrap(); } #[test] fn test_decrease_file_size() { let mut file = make_file(TFILE2); file.write_all(b"1234567890").unwrap(); if!Command::new(PROGNAME).args(&["--size=-4", TFILE2]).status().unwrap().success() { panic!(); } file.seek(0, io::SeekEnd).unwrap(); if file.tell().unwrap()!= 6 { println!("{:?}", file.tell()); panic!(); } io::fs::unlink(&Path::new(TFILE2)).unwrap(); }
make_file
identifier_name
truncate.rs
#![allow(unstable)] use std::old_io as io; use std::old_io::process::Command; static PROGNAME: &'static str = "./truncate"; static TFILE1: &'static str = "truncate_test_1"; static TFILE2: &'static str = "truncate_test_2"; fn make_file(name: &str) -> io::File { match io::File::create(&Path::new(name)) { Ok(f) => f, Err(_) => panic!() } } #[test] fn test_increase_file_size() { let mut file = make_file(TFILE1); if!Command::new(PROGNAME).args(&["-s", "+5K", TFILE1]).status().unwrap().success() { panic!(); } file.seek(0, io::SeekEnd).unwrap(); if file.tell().unwrap()!= 5 * 1024 { panic!(); } io::fs::unlink(&Path::new(TFILE1)).unwrap(); }
fn test_decrease_file_size() { let mut file = make_file(TFILE2); file.write_all(b"1234567890").unwrap(); if!Command::new(PROGNAME).args(&["--size=-4", TFILE2]).status().unwrap().success() { panic!(); } file.seek(0, io::SeekEnd).unwrap(); if file.tell().unwrap()!= 6 { println!("{:?}", file.tell()); panic!(); } io::fs::unlink(&Path::new(TFILE2)).unwrap(); }
#[test]
random_line_split
truncate.rs
#![allow(unstable)] use std::old_io as io; use std::old_io::process::Command; static PROGNAME: &'static str = "./truncate"; static TFILE1: &'static str = "truncate_test_1"; static TFILE2: &'static str = "truncate_test_2"; fn make_file(name: &str) -> io::File { match io::File::create(&Path::new(name)) { Ok(f) => f, Err(_) => panic!() } } #[test] fn test_increase_file_size()
#[test] fn test_decrease_file_size() { let mut file = make_file(TFILE2); file.write_all(b"1234567890").unwrap(); if!Command::new(PROGNAME).args(&["--size=-4", TFILE2]).status().unwrap().success() { panic!(); } file.seek(0, io::SeekEnd).unwrap(); if file.tell().unwrap()!= 6 { println!("{:?}", file.tell()); panic!(); } io::fs::unlink(&Path::new(TFILE2)).unwrap(); }
{ let mut file = make_file(TFILE1); if !Command::new(PROGNAME).args(&["-s", "+5K", TFILE1]).status().unwrap().success() { panic!(); } file.seek(0, io::SeekEnd).unwrap(); if file.tell().unwrap() != 5 * 1024 { panic!(); } io::fs::unlink(&Path::new(TFILE1)).unwrap(); }
identifier_body
css_provider.rs
// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> use std::fmt::{self, Display, Formatter}; use ffi::{self, GtkCssProvider}; use glib::translate::{ToGlibPtr, from_glib_full}; use glib::{self, GlibContainer}; #[repr(C)] pub struct CssProvider { pointer: *mut GtkCssProvider } impl ::StyleProviderTrait for CssProvider {} impl CssProvider { pub fn new() -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_new() } } } pub fn get_default() -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_default() } } } pub fn get_named(name: &str, variant: &str) -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_named(name.to_glib_none().0, variant.to_glib_none().0) } } } pub fn load_from_path(path: &str) -> Result<CssProvider, glib::Error> { unsafe { let pointer = ffi::gtk_css_provider_new(); let mut error = ::std::ptr::null_mut(); ffi::gtk_css_provider_load_from_path(pointer, path.to_glib_none().0, &mut error); if error.is_null()
else { Err(glib::Error::wrap(error)) } } } } impl Display for CssProvider { fn fmt(&self, f: &mut Formatter) -> fmt::Result { let tmp: String = unsafe { from_glib_full(ffi::gtk_css_provider_to_string(self.pointer)) }; write!(f, "{}", tmp) } } impl_GObjectFunctions!(CssProvider, GtkCssProvider); impl_TraitObject!(CssProvider, GtkCssProvider);
{ Ok(CssProvider { pointer: pointer }) }
conditional_block
css_provider.rs
// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> use std::fmt::{self, Display, Formatter}; use ffi::{self, GtkCssProvider}; use glib::translate::{ToGlibPtr, from_glib_full}; use glib::{self, GlibContainer}; #[repr(C)] pub struct CssProvider { pointer: *mut GtkCssProvider } impl ::StyleProviderTrait for CssProvider {} impl CssProvider { pub fn new() -> Self
pub fn get_default() -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_default() } } } pub fn get_named(name: &str, variant: &str) -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_named(name.to_glib_none().0, variant.to_glib_none().0) } } } pub fn load_from_path(path: &str) -> Result<CssProvider, glib::Error> { unsafe { let pointer = ffi::gtk_css_provider_new(); let mut error = ::std::ptr::null_mut(); ffi::gtk_css_provider_load_from_path(pointer, path.to_glib_none().0, &mut error); if error.is_null() { Ok(CssProvider { pointer: pointer }) } else { Err(glib::Error::wrap(error)) } } } } impl Display for CssProvider { fn fmt(&self, f: &mut Formatter) -> fmt::Result { let tmp: String = unsafe { from_glib_full(ffi::gtk_css_provider_to_string(self.pointer)) }; write!(f, "{}", tmp) } } impl_GObjectFunctions!(CssProvider, GtkCssProvider); impl_TraitObject!(CssProvider, GtkCssProvider);
{ unsafe { CssProvider { pointer: ffi::gtk_css_provider_new() } } }
identifier_body
css_provider.rs
// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> use std::fmt::{self, Display, Formatter}; use ffi::{self, GtkCssProvider}; use glib::translate::{ToGlibPtr, from_glib_full}; use glib::{self, GlibContainer}; #[repr(C)] pub struct CssProvider { pointer: *mut GtkCssProvider } impl ::StyleProviderTrait for CssProvider {} impl CssProvider { pub fn new() -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_new() } } } pub fn get_default() -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_default() } } } pub fn get_named(name: &str, variant: &str) -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_named(name.to_glib_none().0, variant.to_glib_none().0) } } } pub fn
(path: &str) -> Result<CssProvider, glib::Error> { unsafe { let pointer = ffi::gtk_css_provider_new(); let mut error = ::std::ptr::null_mut(); ffi::gtk_css_provider_load_from_path(pointer, path.to_glib_none().0, &mut error); if error.is_null() { Ok(CssProvider { pointer: pointer }) } else { Err(glib::Error::wrap(error)) } } } } impl Display for CssProvider { fn fmt(&self, f: &mut Formatter) -> fmt::Result { let tmp: String = unsafe { from_glib_full(ffi::gtk_css_provider_to_string(self.pointer)) }; write!(f, "{}", tmp) } } impl_GObjectFunctions!(CssProvider, GtkCssProvider); impl_TraitObject!(CssProvider, GtkCssProvider);
load_from_path
identifier_name
css_provider.rs
// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> use std::fmt::{self, Display, Formatter}; use ffi::{self, GtkCssProvider}; use glib::translate::{ToGlibPtr, from_glib_full}; use glib::{self, GlibContainer}; #[repr(C)] pub struct CssProvider {
pointer: *mut GtkCssProvider } impl ::StyleProviderTrait for CssProvider {} impl CssProvider { pub fn new() -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_new() } } } pub fn get_default() -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_default() } } } pub fn get_named(name: &str, variant: &str) -> Self { unsafe { CssProvider { pointer: ffi::gtk_css_provider_get_named(name.to_glib_none().0, variant.to_glib_none().0) } } } pub fn load_from_path(path: &str) -> Result<CssProvider, glib::Error> { unsafe { let pointer = ffi::gtk_css_provider_new(); let mut error = ::std::ptr::null_mut(); ffi::gtk_css_provider_load_from_path(pointer, path.to_glib_none().0, &mut error); if error.is_null() { Ok(CssProvider { pointer: pointer }) } else { Err(glib::Error::wrap(error)) } } } } impl Display for CssProvider { fn fmt(&self, f: &mut Formatter) -> fmt::Result { let tmp: String = unsafe { from_glib_full(ffi::gtk_css_provider_to_string(self.pointer)) }; write!(f, "{}", tmp) } } impl_GObjectFunctions!(CssProvider, GtkCssProvider); impl_TraitObject!(CssProvider, GtkCssProvider);
random_line_split
lib.rs
extern crate hyper; extern crate hyper_native_tls; #[macro_use] extern crate serde_derive; extern crate serde_json; static URL: &'static str = "http://api.openaq.org"; mod entities; pub use entities::*; pub mod json { use hyper::Client; use hyper::net::HttpsConnector; use hyper_native_tls::NativeTlsClient; use std::io::Read; use super::*; pub mod requests { pub const CITIES: &str = "/v1/cities"; pub const COUNTRIES: &str = "/v1/countries"; pub const PARAMETERS: &str = "/v1/parameters"; pub const FETCHES: &str = "/v1/fetches"; } pub struct GetOpts { pub query: Option<String>, } pub fn get_cities_query(opts: GetCitiesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string() + "&country=" + opts.country; } pub fn get_countries_query(opts: GetCountriesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string(); } pub fn get_fetches_query(opts: GetFetchesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string(); } pub fn get(req: &str, opts: Option<GetOpts>) -> String { let ssl = NativeTlsClient::new().unwrap(); let connector = HttpsConnector::new(ssl); let client = Client::with_connector(connector); let full_url = match &opts { &None => String::from(URL) + req, &Some(ref o) => { match &o.query { &None => String::from(URL) + req, &Some(ref q) => String::from(URL) + req + "?" + q, } } }; let mut res = client.get(&full_url).send().unwrap(); assert_eq!(res.status, hyper::Ok); let mut s = String::new(); res.read_to_string(&mut s).unwrap(); return s; } } pub struct GetCitiesQueryOpts<'a> { pub page: u32, pub limit: u32, pub country: &'a str, } pub struct GetCountriesQueryOpts { pub page: u32, pub limit: u32, } pub struct GetFetchesQueryOpts { pub page: u32, pub limit: u32, } macro_rules! extract_results { ($T: ty, $endpoint: expr, $opts: expr) => { #[derive(Debug, Serialize, Deserialize)] struct RequestResult { results: Vec<$T>, } let result = json::get($endpoint, $opts); let v: RequestResult = serde_json::from_str(&result).unwrap(); return v.results; }; } macro_rules! extract_results_with_opts { ($get_query: expr, $T: ty, $endpoint: expr, $opts: expr) => { let get_opts = match $opts { None => None, Some(o) => { let query = $get_query(o); let opts = json::GetOpts { query: Some(query) }; Some(opts) } }; extract_results!($T, $endpoint, get_opts); }; } pub fn get_cities(cities_query_opts: Option<GetCitiesQueryOpts>) -> Vec<City>
pub fn get_fetches(fetches_query_opts: Option<GetFetchesQueryOpts>) -> Vec<Fetch> { extract_results_with_opts!(json::get_fetches_query, Fetch, json::requests::FETCHES, fetches_query_opts); } pub fn get_countries(countries_query_opts: Option<GetCountriesQueryOpts>) -> Vec<Country> { let get_opts = match countries_query_opts { None => None, Some(o) => { let countries_query = json::get_countries_query(o); let opts = json::GetOpts { query: Some(countries_query) }; Some(opts) } }; extract_results!(Country, json::requests::COUNTRIES, get_opts); } pub fn get_parameters() -> Vec<Parameter> { extract_results!(Parameter, json::requests::PARAMETERS, None); }
{ extract_results_with_opts!(json::get_cities_query, City, json::requests::CITIES, cities_query_opts); }
identifier_body
lib.rs
extern crate hyper; extern crate hyper_native_tls; #[macro_use] extern crate serde_derive; extern crate serde_json; static URL: &'static str = "http://api.openaq.org"; mod entities; pub use entities::*; pub mod json { use hyper::Client; use hyper::net::HttpsConnector; use hyper_native_tls::NativeTlsClient; use std::io::Read; use super::*; pub mod requests { pub const CITIES: &str = "/v1/cities"; pub const COUNTRIES: &str = "/v1/countries"; pub const PARAMETERS: &str = "/v1/parameters"; pub const FETCHES: &str = "/v1/fetches"; } pub struct GetOpts { pub query: Option<String>, } pub fn get_cities_query(opts: GetCitiesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string() + "&country=" + opts.country; } pub fn
(opts: GetCountriesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string(); } pub fn get_fetches_query(opts: GetFetchesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string(); } pub fn get(req: &str, opts: Option<GetOpts>) -> String { let ssl = NativeTlsClient::new().unwrap(); let connector = HttpsConnector::new(ssl); let client = Client::with_connector(connector); let full_url = match &opts { &None => String::from(URL) + req, &Some(ref o) => { match &o.query { &None => String::from(URL) + req, &Some(ref q) => String::from(URL) + req + "?" + q, } } }; let mut res = client.get(&full_url).send().unwrap(); assert_eq!(res.status, hyper::Ok); let mut s = String::new(); res.read_to_string(&mut s).unwrap(); return s; } } pub struct GetCitiesQueryOpts<'a> { pub page: u32, pub limit: u32, pub country: &'a str, } pub struct GetCountriesQueryOpts { pub page: u32, pub limit: u32, } pub struct GetFetchesQueryOpts { pub page: u32, pub limit: u32, } macro_rules! extract_results { ($T: ty, $endpoint: expr, $opts: expr) => { #[derive(Debug, Serialize, Deserialize)] struct RequestResult { results: Vec<$T>, } let result = json::get($endpoint, $opts); let v: RequestResult = serde_json::from_str(&result).unwrap(); return v.results; }; } macro_rules! extract_results_with_opts { ($get_query: expr, $T: ty, $endpoint: expr, $opts: expr) => { let get_opts = match $opts { None => None, Some(o) => { let query = $get_query(o); let opts = json::GetOpts { query: Some(query) }; Some(opts) } }; extract_results!($T, $endpoint, get_opts); }; } pub fn get_cities(cities_query_opts: Option<GetCitiesQueryOpts>) -> Vec<City> { extract_results_with_opts!(json::get_cities_query, City, json::requests::CITIES, cities_query_opts); } pub fn get_fetches(fetches_query_opts: Option<GetFetchesQueryOpts>) -> Vec<Fetch> { extract_results_with_opts!(json::get_fetches_query, Fetch, json::requests::FETCHES, fetches_query_opts); } pub fn get_countries(countries_query_opts: Option<GetCountriesQueryOpts>) -> Vec<Country> { let get_opts = match countries_query_opts { None => None, Some(o) => { let countries_query = json::get_countries_query(o); let opts = json::GetOpts { query: Some(countries_query) }; Some(opts) } }; extract_results!(Country, json::requests::COUNTRIES, get_opts); } pub fn get_parameters() -> Vec<Parameter> { extract_results!(Parameter, json::requests::PARAMETERS, None); }
get_countries_query
identifier_name
lib.rs
extern crate hyper; extern crate hyper_native_tls; #[macro_use] extern crate serde_derive; extern crate serde_json; static URL: &'static str = "http://api.openaq.org"; mod entities; pub use entities::*; pub mod json { use hyper::Client; use hyper::net::HttpsConnector; use hyper_native_tls::NativeTlsClient; use std::io::Read; use super::*; pub mod requests { pub const CITIES: &str = "/v1/cities"; pub const COUNTRIES: &str = "/v1/countries"; pub const PARAMETERS: &str = "/v1/parameters"; pub const FETCHES: &str = "/v1/fetches"; } pub struct GetOpts { pub query: Option<String>, } pub fn get_cities_query(opts: GetCitiesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string() + "&country=" + opts.country; } pub fn get_countries_query(opts: GetCountriesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string(); } pub fn get_fetches_query(opts: GetFetchesQueryOpts) -> String { return String::from("page=") + &opts.page.to_string() + "&limit=" + &opts.limit.to_string(); } pub fn get(req: &str, opts: Option<GetOpts>) -> String { let ssl = NativeTlsClient::new().unwrap(); let connector = HttpsConnector::new(ssl); let client = Client::with_connector(connector); let full_url = match &opts { &None => String::from(URL) + req, &Some(ref o) => { match &o.query { &None => String::from(URL) + req, &Some(ref q) => String::from(URL) + req + "?" + q, } } }; let mut res = client.get(&full_url).send().unwrap(); assert_eq!(res.status, hyper::Ok); let mut s = String::new(); res.read_to_string(&mut s).unwrap(); return s; } } pub struct GetCitiesQueryOpts<'a> { pub page: u32, pub limit: u32, pub country: &'a str, } pub struct GetCountriesQueryOpts { pub page: u32, pub limit: u32, } pub struct GetFetchesQueryOpts { pub page: u32, pub limit: u32, } macro_rules! extract_results { ($T: ty, $endpoint: expr, $opts: expr) => { #[derive(Debug, Serialize, Deserialize)] struct RequestResult { results: Vec<$T>, } let result = json::get($endpoint, $opts); let v: RequestResult = serde_json::from_str(&result).unwrap(); return v.results; }; } macro_rules! extract_results_with_opts { ($get_query: expr, $T: ty, $endpoint: expr, $opts: expr) => { let get_opts = match $opts { None => None, Some(o) => { let query = $get_query(o); let opts = json::GetOpts { query: Some(query) }; Some(opts) } }; extract_results!($T, $endpoint, get_opts); }; } pub fn get_cities(cities_query_opts: Option<GetCitiesQueryOpts>) -> Vec<City> { extract_results_with_opts!(json::get_cities_query, City, json::requests::CITIES, cities_query_opts); } pub fn get_fetches(fetches_query_opts: Option<GetFetchesQueryOpts>) -> Vec<Fetch> { extract_results_with_opts!(json::get_fetches_query, Fetch, json::requests::FETCHES, fetches_query_opts); } pub fn get_countries(countries_query_opts: Option<GetCountriesQueryOpts>) -> Vec<Country> { let get_opts = match countries_query_opts {
let opts = json::GetOpts { query: Some(countries_query) }; Some(opts) } }; extract_results!(Country, json::requests::COUNTRIES, get_opts); } pub fn get_parameters() -> Vec<Parameter> { extract_results!(Parameter, json::requests::PARAMETERS, None); }
None => None, Some(o) => { let countries_query = json::get_countries_query(o);
random_line_split
window-properties.rs
extern crate sdl2; use sdl2::pixels::Color; pub fn main()
running = false }, _ => {} } } { // Update the window title. // &sdl_context is needed to safely access the Window and to ensure that the event loop // isn't running (which could mutate the Window). // Note: if you don't use renderer: window.properties(&sdl_context); let mut props = renderer.window_properties(&sdl_context).unwrap(); let position = props.get_position(); let size = props.get_size(); let title = format!("Window - pos({}x{}), size({}x{}): {}", position.0, position.1, size.0, size.1, tick); props.set_title(&title); tick += 1; } let mut drawer = renderer.drawer(); drawer.set_draw_color(Color::RGB(0, 0, 0)); drawer.clear(); drawer.present(); } }
{ let mut sdl_context = sdl2::init().video().unwrap(); let window = sdl_context.window("rust-sdl2 demo: Window", 800, 600) .resizable() .build() .unwrap(); let mut renderer = window.renderer().present_vsync().build().unwrap(); let mut running = true; let mut tick = 0; while running { for event in sdl_context.event_pump().poll_iter() { use sdl2::event::Event; use sdl2::keycode::KeyCode; match event { Event::Quit {..} | Event::KeyDown { keycode: KeyCode::Escape, .. } => {
identifier_body