file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
build.rs
|
use std::path::Path;
use std::path::PathBuf;
use std::env;
use std::process::Command;
fn main() {
let out_dir = env::var("OUT_DIR").unwrap();
let target = env::var("TARGET").unwrap();
// http://doc.crates.io/build-script.html#inputs-to-the-build-script
// "the build script’s current directory is the source directory of the build script’s package."
let current_dir = env::current_dir().unwrap();
let src_dir = current_dir.join("src");
let qt_dir = env::var("QT_DIR").map(|p| PathBuf::from(p)).unwrap_or({
println!("Environnement variable 'QT_DIR' not set!");
println!("Defaulting to ${{HOME}}/Qt/${{QT_VER}}/${{QT_COMP}} where:");
let home_dir = env::home_dir().map(|p| PathBuf::from(p)).unwrap();
let default_qt_ver = "5.7".to_string();
let default_qt_comp = if target.contains("linux") {
"gcc_64".to_string()
} else if target.contains("darwin") {
"clang_64".to_string()
} else {
panic!("Unsuported platform in gallery's build.rs!")
};
println!(" QT_VER: {}", default_qt_ver);
|
let qt_dir_default = home_dir.join("Qt")
.join(env::var("QT_VER").unwrap_or(default_qt_ver))
.join(env::var("QT_COMP").unwrap_or(default_qt_comp));
qt_dir_default
});
println!("Using Qt directory: {:?}", qt_dir);
println!("Use QT_DIR environment variable to ovewrite.");
let qmake = qt_dir.join("bin").join("qmake");
// Run qmake to create a library with the reousrce file
let output = Command::new(qmake).args(&[src_dir])
.current_dir(&Path::new(&out_dir))
.output()
.expect("failed to execute 'qmake' process");
println!("output.status: {}", output.status);
println!("output.stdout: {}", String::from_utf8_lossy(&output.stdout));
println!("output.stderr: {}", String::from_utf8_lossy(&output.stderr));
assert!(output.status.success(), "failed to execute qmake process");
// Call'make'
let output = Command::new("make").current_dir(&Path::new(&out_dir))
.output()
.expect("failed to execute'make' process");
println!("output.status: {}", output.status);
println!("output.stdout: {}", String::from_utf8_lossy(&output.stdout));
println!("output.stderr: {}", String::from_utf8_lossy(&output.stderr));
assert!(output.status.success(), "failed to execute make process");
println!("cargo:rustc-link-search={}", out_dir);
println!("cargo:rustc-link-lib=static=galleryresources");
}
|
println!(" QT_COMP: {}", default_qt_comp);
|
random_line_split
|
build.rs
|
use std::path::Path;
use std::path::PathBuf;
use std::env;
use std::process::Command;
fn main() {
let out_dir = env::var("OUT_DIR").unwrap();
let target = env::var("TARGET").unwrap();
// http://doc.crates.io/build-script.html#inputs-to-the-build-script
// "the build script’s current directory is the source directory of the build script’s package."
let current_dir = env::current_dir().unwrap();
let src_dir = current_dir.join("src");
let qt_dir = env::var("QT_DIR").map(|p| PathBuf::from(p)).unwrap_or({
println!("Environnement variable 'QT_DIR' not set!");
println!("Defaulting to ${{HOME}}/Qt/${{QT_VER}}/${{QT_COMP}} where:");
let home_dir = env::home_dir().map(|p| PathBuf::from(p)).unwrap();
let default_qt_ver = "5.7".to_string();
let default_qt_comp = if target.contains("linux") {
"gcc_64".to_string()
} else if target.contains("darwin") {
|
e {
panic!("Unsuported platform in gallery's build.rs!")
};
println!(" QT_VER: {}", default_qt_ver);
println!(" QT_COMP: {}", default_qt_comp);
let qt_dir_default = home_dir.join("Qt")
.join(env::var("QT_VER").unwrap_or(default_qt_ver))
.join(env::var("QT_COMP").unwrap_or(default_qt_comp));
qt_dir_default
});
println!("Using Qt directory: {:?}", qt_dir);
println!("Use QT_DIR environment variable to ovewrite.");
let qmake = qt_dir.join("bin").join("qmake");
// Run qmake to create a library with the reousrce file
let output = Command::new(qmake).args(&[src_dir])
.current_dir(&Path::new(&out_dir))
.output()
.expect("failed to execute 'qmake' process");
println!("output.status: {}", output.status);
println!("output.stdout: {}", String::from_utf8_lossy(&output.stdout));
println!("output.stderr: {}", String::from_utf8_lossy(&output.stderr));
assert!(output.status.success(), "failed to execute qmake process");
// Call'make'
let output = Command::new("make").current_dir(&Path::new(&out_dir))
.output()
.expect("failed to execute'make' process");
println!("output.status: {}", output.status);
println!("output.stdout: {}", String::from_utf8_lossy(&output.stdout));
println!("output.stderr: {}", String::from_utf8_lossy(&output.stderr));
assert!(output.status.success(), "failed to execute make process");
println!("cargo:rustc-link-search={}", out_dir);
println!("cargo:rustc-link-lib=static=galleryresources");
}
|
"clang_64".to_string()
} els
|
conditional_block
|
tokens.rs
|
use std::collections::HashMap;
pub use self::Block::*;
pub use self::Inline::*;
pub type Document = Vec<Block>;
pub type Text = Vec<Inline>;
pub type LinkMap = HashMap<String, LinkDescription>;
pub struct LinkDescription {
pub id: String,
pub link: String,
pub title: Option<String>
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Block {
Heading {
level: usize,
content: Text
},
BlockQuote(Document),
BlockCode {
tag: Option<String>,
content: String
},
OrderedList {
start_index: usize,
items: Vec<Document>
},
UnorderedList {
items: Vec<Document>
},
Paragraph(Text),
HorizontalRule
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Inline{
LineBreak,
Chunk(String),
Emphasis(Text),
MoreEmphasis(Text),
Code(String),
Link {
text: Option<Text>, // None for automatic links
link: Option<String>,
title: Option<String>,
id: Option<String>
},
Image {
alt: Text,
link: Option<String>,
title: Option<String>,
id: Option<String>
}
}
pub trait FixLinks {
#[inline]
fn fix_links_opt(&mut self, link_map: Option<&LinkMap>) {
match link_map {
Some(hm) => self.fix_links(hm),
None => {}
}
}
fn fix_links(&mut self, link_map: &LinkMap);
}
impl FixLinks for Block {
fn fix_links(&mut self, link_map: &LinkMap) {
match *self {
BlockQuote(ref mut content) => content.fix_links(link_map),
OrderedList { ref mut items,.. } | UnorderedList { ref mut items } =>
for item in items.iter_mut() {
item.fix_links(link_map);
},
Paragraph(ref mut content) | Heading { ref mut content,.. } =>
content.fix_links(link_map),
_ => {}
}
}
}
impl FixLinks for Document {
fn fix_links(&mut self, link_map: &LinkMap) {
for b in self.iter_mut() {
b.fix_links(link_map);
}
}
}
impl FixLinks for Text {
fn fix_links(&mut self, link_map: &LinkMap) {
for i in self.iter_mut() {
i.fix_links(link_map);
}
}
}
impl FixLinks for Inline {
fn fix_links(&mut self, link_map: &LinkMap)
|
}
}
}
|
{
match *self {
Emphasis(ref mut content) | MoreEmphasis(ref mut content) =>
content.fix_links(link_map),
Link { ref mut link, ref mut title, id: Some(ref id), .. } => {
match link_map.get(id) {
Some(ld) => {
if link.is_none() {
*link = Some(ld.link.clone());
}
if title.is_none() && ld.title.is_none() {
*title = ld.title.clone();
}
}
None => {}
}
}
_ => {}
|
identifier_body
|
tokens.rs
|
use std::collections::HashMap;
pub use self::Block::*;
pub use self::Inline::*;
pub type Document = Vec<Block>;
pub type Text = Vec<Inline>;
pub type LinkMap = HashMap<String, LinkDescription>;
pub struct LinkDescription {
pub id: String,
pub link: String,
pub title: Option<String>
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Block {
Heading {
level: usize,
content: Text
},
BlockQuote(Document),
BlockCode {
tag: Option<String>,
content: String
},
OrderedList {
start_index: usize,
items: Vec<Document>
},
UnorderedList {
items: Vec<Document>
},
Paragraph(Text),
HorizontalRule
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Inline{
LineBreak,
Chunk(String),
Emphasis(Text),
MoreEmphasis(Text),
Code(String),
Link {
text: Option<Text>, // None for automatic links
link: Option<String>,
title: Option<String>,
id: Option<String>
},
Image {
alt: Text,
link: Option<String>,
title: Option<String>,
id: Option<String>
}
}
pub trait FixLinks {
#[inline]
fn fix_links_opt(&mut self, link_map: Option<&LinkMap>) {
match link_map {
Some(hm) => self.fix_links(hm),
None => {}
}
}
fn fix_links(&mut self, link_map: &LinkMap);
}
|
OrderedList { ref mut items,.. } | UnorderedList { ref mut items } =>
for item in items.iter_mut() {
item.fix_links(link_map);
},
Paragraph(ref mut content) | Heading { ref mut content,.. } =>
content.fix_links(link_map),
_ => {}
}
}
}
impl FixLinks for Document {
fn fix_links(&mut self, link_map: &LinkMap) {
for b in self.iter_mut() {
b.fix_links(link_map);
}
}
}
impl FixLinks for Text {
fn fix_links(&mut self, link_map: &LinkMap) {
for i in self.iter_mut() {
i.fix_links(link_map);
}
}
}
impl FixLinks for Inline {
fn fix_links(&mut self, link_map: &LinkMap) {
match *self {
Emphasis(ref mut content) | MoreEmphasis(ref mut content) =>
content.fix_links(link_map),
Link { ref mut link, ref mut title, id: Some(ref id),.. } => {
match link_map.get(id) {
Some(ld) => {
if link.is_none() {
*link = Some(ld.link.clone());
}
if title.is_none() && ld.title.is_none() {
*title = ld.title.clone();
}
}
None => {}
}
}
_ => {}
}
}
}
|
impl FixLinks for Block {
fn fix_links(&mut self, link_map: &LinkMap) {
match *self {
BlockQuote(ref mut content) => content.fix_links(link_map),
|
random_line_split
|
tokens.rs
|
use std::collections::HashMap;
pub use self::Block::*;
pub use self::Inline::*;
pub type Document = Vec<Block>;
pub type Text = Vec<Inline>;
pub type LinkMap = HashMap<String, LinkDescription>;
pub struct LinkDescription {
pub id: String,
pub link: String,
pub title: Option<String>
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Block {
Heading {
level: usize,
content: Text
},
BlockQuote(Document),
BlockCode {
tag: Option<String>,
content: String
},
OrderedList {
start_index: usize,
items: Vec<Document>
},
UnorderedList {
items: Vec<Document>
},
Paragraph(Text),
HorizontalRule
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Inline{
LineBreak,
Chunk(String),
Emphasis(Text),
MoreEmphasis(Text),
Code(String),
Link {
text: Option<Text>, // None for automatic links
link: Option<String>,
title: Option<String>,
id: Option<String>
},
Image {
alt: Text,
link: Option<String>,
title: Option<String>,
id: Option<String>
}
}
pub trait FixLinks {
#[inline]
fn fix_links_opt(&mut self, link_map: Option<&LinkMap>) {
match link_map {
Some(hm) => self.fix_links(hm),
None =>
|
}
}
fn fix_links(&mut self, link_map: &LinkMap);
}
impl FixLinks for Block {
fn fix_links(&mut self, link_map: &LinkMap) {
match *self {
BlockQuote(ref mut content) => content.fix_links(link_map),
OrderedList { ref mut items,.. } | UnorderedList { ref mut items } =>
for item in items.iter_mut() {
item.fix_links(link_map);
},
Paragraph(ref mut content) | Heading { ref mut content,.. } =>
content.fix_links(link_map),
_ => {}
}
}
}
impl FixLinks for Document {
fn fix_links(&mut self, link_map: &LinkMap) {
for b in self.iter_mut() {
b.fix_links(link_map);
}
}
}
impl FixLinks for Text {
fn fix_links(&mut self, link_map: &LinkMap) {
for i in self.iter_mut() {
i.fix_links(link_map);
}
}
}
impl FixLinks for Inline {
fn fix_links(&mut self, link_map: &LinkMap) {
match *self {
Emphasis(ref mut content) | MoreEmphasis(ref mut content) =>
content.fix_links(link_map),
Link { ref mut link, ref mut title, id: Some(ref id),.. } => {
match link_map.get(id) {
Some(ld) => {
if link.is_none() {
*link = Some(ld.link.clone());
}
if title.is_none() && ld.title.is_none() {
*title = ld.title.clone();
}
}
None => {}
}
}
_ => {}
}
}
}
|
{}
|
conditional_block
|
tokens.rs
|
use std::collections::HashMap;
pub use self::Block::*;
pub use self::Inline::*;
pub type Document = Vec<Block>;
pub type Text = Vec<Inline>;
pub type LinkMap = HashMap<String, LinkDescription>;
pub struct LinkDescription {
pub id: String,
pub link: String,
pub title: Option<String>
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Block {
Heading {
level: usize,
content: Text
},
BlockQuote(Document),
BlockCode {
tag: Option<String>,
content: String
},
OrderedList {
start_index: usize,
items: Vec<Document>
},
UnorderedList {
items: Vec<Document>
},
Paragraph(Text),
HorizontalRule
}
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Inline{
LineBreak,
Chunk(String),
Emphasis(Text),
MoreEmphasis(Text),
Code(String),
Link {
text: Option<Text>, // None for automatic links
link: Option<String>,
title: Option<String>,
id: Option<String>
},
Image {
alt: Text,
link: Option<String>,
title: Option<String>,
id: Option<String>
}
}
pub trait FixLinks {
#[inline]
fn fix_links_opt(&mut self, link_map: Option<&LinkMap>) {
match link_map {
Some(hm) => self.fix_links(hm),
None => {}
}
}
fn fix_links(&mut self, link_map: &LinkMap);
}
impl FixLinks for Block {
fn fix_links(&mut self, link_map: &LinkMap) {
match *self {
BlockQuote(ref mut content) => content.fix_links(link_map),
OrderedList { ref mut items,.. } | UnorderedList { ref mut items } =>
for item in items.iter_mut() {
item.fix_links(link_map);
},
Paragraph(ref mut content) | Heading { ref mut content,.. } =>
content.fix_links(link_map),
_ => {}
}
}
}
impl FixLinks for Document {
fn fix_links(&mut self, link_map: &LinkMap) {
for b in self.iter_mut() {
b.fix_links(link_map);
}
}
}
impl FixLinks for Text {
fn fix_links(&mut self, link_map: &LinkMap) {
for i in self.iter_mut() {
i.fix_links(link_map);
}
}
}
impl FixLinks for Inline {
fn
|
(&mut self, link_map: &LinkMap) {
match *self {
Emphasis(ref mut content) | MoreEmphasis(ref mut content) =>
content.fix_links(link_map),
Link { ref mut link, ref mut title, id: Some(ref id),.. } => {
match link_map.get(id) {
Some(ld) => {
if link.is_none() {
*link = Some(ld.link.clone());
}
if title.is_none() && ld.title.is_none() {
*title = ld.title.clone();
}
}
None => {}
}
}
_ => {}
}
}
}
|
fix_links
|
identifier_name
|
skip_while.rs
|
use core::iter::*;
#[test]
fn test_iterator_skip_while() {
let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
let ys = [15, 16, 17, 19];
let it = xs.iter().skip_while(|&x| *x < 15);
let mut i = 0;
for x in it {
assert_eq!(*x, ys[i]);
i += 1;
}
assert_eq!(i, ys.len());
}
#[test]
fn test_iterator_skip_while_fold() {
let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
let ys = [15, 16, 17, 19];
let it = xs.iter().skip_while(|&x| *x < 15);
let i = it.fold(0, |i, &x| {
assert_eq!(x, ys[i]);
i + 1
});
assert_eq!(i, ys.len());
let mut it = xs.iter().skip_while(|&x| *x < 15);
assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
let i = it.fold(1, |i, &x| {
assert_eq!(x, ys[i]);
i + 1
});
assert_eq!(i, ys.len());
}
#[test]
fn
|
() {
let f = &|acc, x| i32::checked_add(2 * acc, x);
fn p(&x: &i32) -> bool {
(x % 10) <= 5
}
assert_eq!((1..20).skip_while(p).try_fold(7, f), (6..20).try_fold(7, f));
let mut iter = (1..20).skip_while(p);
assert_eq!(iter.nth(5), Some(11));
assert_eq!(iter.try_fold(7, f), (12..20).try_fold(7, f));
let mut iter = (0..50).skip_while(|&x| (x % 20) < 15);
assert_eq!(iter.try_fold(0, i8::checked_add), None);
assert_eq!(iter.next(), Some(23));
}
|
test_skip_while_try_fold
|
identifier_name
|
skip_while.rs
|
use core::iter::*;
#[test]
fn test_iterator_skip_while() {
let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
let ys = [15, 16, 17, 19];
let it = xs.iter().skip_while(|&x| *x < 15);
let mut i = 0;
for x in it {
assert_eq!(*x, ys[i]);
i += 1;
}
assert_eq!(i, ys.len());
}
#[test]
fn test_iterator_skip_while_fold() {
let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
let ys = [15, 16, 17, 19];
let it = xs.iter().skip_while(|&x| *x < 15);
let i = it.fold(0, |i, &x| {
assert_eq!(x, ys[i]);
i + 1
});
assert_eq!(i, ys.len());
let mut it = xs.iter().skip_while(|&x| *x < 15);
assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
let i = it.fold(1, |i, &x| {
assert_eq!(x, ys[i]);
i + 1
|
#[test]
fn test_skip_while_try_fold() {
let f = &|acc, x| i32::checked_add(2 * acc, x);
fn p(&x: &i32) -> bool {
(x % 10) <= 5
}
assert_eq!((1..20).skip_while(p).try_fold(7, f), (6..20).try_fold(7, f));
let mut iter = (1..20).skip_while(p);
assert_eq!(iter.nth(5), Some(11));
assert_eq!(iter.try_fold(7, f), (12..20).try_fold(7, f));
let mut iter = (0..50).skip_while(|&x| (x % 20) < 15);
assert_eq!(iter.try_fold(0, i8::checked_add), None);
assert_eq!(iter.next(), Some(23));
}
|
});
assert_eq!(i, ys.len());
}
|
random_line_split
|
skip_while.rs
|
use core::iter::*;
#[test]
fn test_iterator_skip_while() {
let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
let ys = [15, 16, 17, 19];
let it = xs.iter().skip_while(|&x| *x < 15);
let mut i = 0;
for x in it {
assert_eq!(*x, ys[i]);
i += 1;
}
assert_eq!(i, ys.len());
}
#[test]
fn test_iterator_skip_while_fold() {
let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
let ys = [15, 16, 17, 19];
let it = xs.iter().skip_while(|&x| *x < 15);
let i = it.fold(0, |i, &x| {
assert_eq!(x, ys[i]);
i + 1
});
assert_eq!(i, ys.len());
let mut it = xs.iter().skip_while(|&x| *x < 15);
assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
let i = it.fold(1, |i, &x| {
assert_eq!(x, ys[i]);
i + 1
});
assert_eq!(i, ys.len());
}
#[test]
fn test_skip_while_try_fold() {
let f = &|acc, x| i32::checked_add(2 * acc, x);
fn p(&x: &i32) -> bool
|
assert_eq!((1..20).skip_while(p).try_fold(7, f), (6..20).try_fold(7, f));
let mut iter = (1..20).skip_while(p);
assert_eq!(iter.nth(5), Some(11));
assert_eq!(iter.try_fold(7, f), (12..20).try_fold(7, f));
let mut iter = (0..50).skip_while(|&x| (x % 20) < 15);
assert_eq!(iter.try_fold(0, i8::checked_add), None);
assert_eq!(iter.next(), Some(23));
}
|
{
(x % 10) <= 5
}
|
identifier_body
|
simpleapi.rs
|
// Simple API example, ported from http://lua-users.org/wiki/SimpleLuaApiExample
// This is a simple introductory example of how to interface to Lua from Rust.
// The Rust program loads a Lua script file, sets some Lua variables, runs the
// Lua script, and reads back the return value.
#![allow(uppercase_variables)]
extern crate lua;
use std::{io, os};
use std::iter::range_inclusive;
fn main()
|
* want the script to receive it, then ask Lua to run it.
*/
L.newtable(); // We will pass a table
/*
* To put values into the table, we first push the index, then the
* value, and then call rawset() with the index of the table in the
* stack. Let's see why it's -3: In Lua, the value -1 always refers to
* the top of the stack. When you create the table with newtable(),
* the table gets pushed into the top of the stack. When you push the
* index and then the cell value, the stack looks like:
*
* - [stack bottom] -- table, index, value [top]
*
* So the -1 will refer to the cell value, thus -3 is used to refer to
* the table itself. Note that rawset() pops the last two elements
* of the stack, so that after it has been called, the table is at the
* top of the stack.
*/
for i in range_inclusive(1, 5) {
L.pushinteger(i); // Push the table index
L.pushinteger(i*2); // Push the cell value
L.rawset(-3); // Stores the pair in the table
}
// By what name is the script going to reference our table?
L.setglobal("foo");
// Ask Lua to run our little script
match L.pcall(0, lua::MULTRET, 0) {
Ok(()) => (),
Err(_) => {
let _ = writeln!(&mut io::stderr(),
"Failed to run script: {}", L.describe(-1));
os::set_exit_status(1);
return;
}
}
// Get the returned value at the to of the stack (index -1)
let sum = L.tonumber(-1);
println!("Script returned: {}", sum);
L.pop(1); // Take the returned value out of the stack
// L's destructor will close the state for us
}
|
{
let mut L = lua::State::new();
L.openlibs(); // Load Lua libraries
// Load the file containing the script we are going to run
let path = Path::new("simpleapi.lua");
match L.loadfile(Some(&path)) {
Ok(_) => (),
Err(_) => {
// If something went wrong, error message is at the top of the stack
let _ = writeln!(&mut io::stderr(),
"Couldn't load file: {}", L.describe(-1));
os::set_exit_status(1);
return;
}
}
/*
* Ok, now here we go: We pass data to the lua script on the stack.
* That is, we first have to prepare Lua's virtual stack the way we
|
identifier_body
|
simpleapi.rs
|
// Simple API example, ported from http://lua-users.org/wiki/SimpleLuaApiExample
// This is a simple introductory example of how to interface to Lua from Rust.
// The Rust program loads a Lua script file, sets some Lua variables, runs the
// Lua script, and reads back the return value.
#![allow(uppercase_variables)]
extern crate lua;
use std::{io, os};
use std::iter::range_inclusive;
fn main() {
let mut L = lua::State::new();
L.openlibs(); // Load Lua libraries
// Load the file containing the script we are going to run
let path = Path::new("simpleapi.lua");
match L.loadfile(Some(&path)) {
Ok(_) => (),
Err(_) => {
// If something went wrong, error message is at the top of the stack
let _ = writeln!(&mut io::stderr(),
"Couldn't load file: {}", L.describe(-1));
os::set_exit_status(1);
return;
}
}
/*
* Ok, now here we go: We pass data to the lua script on the stack.
* That is, we first have to prepare Lua's virtual stack the way we
* want the script to receive it, then ask Lua to run it.
*/
L.newtable(); // We will pass a table
/*
* To put values into the table, we first push the index, then the
* value, and then call rawset() with the index of the table in the
* stack. Let's see why it's -3: In Lua, the value -1 always refers to
* the top of the stack. When you create the table with newtable(),
* the table gets pushed into the top of the stack. When you push the
* index and then the cell value, the stack looks like:
*
* - [stack bottom] -- table, index, value [top]
*
* So the -1 will refer to the cell value, thus -3 is used to refer to
* the table itself. Note that rawset() pops the last two elements
* of the stack, so that after it has been called, the table is at the
* top of the stack.
*/
for i in range_inclusive(1, 5) {
L.pushinteger(i); // Push the table index
L.pushinteger(i*2); // Push the cell value
L.rawset(-3); // Stores the pair in the table
}
// By what name is the script going to reference our table?
L.setglobal("foo");
// Ask Lua to run our little script
match L.pcall(0, lua::MULTRET, 0) {
Ok(()) => (),
Err(_) => {
let _ = writeln!(&mut io::stderr(),
|
// Get the returned value at the to of the stack (index -1)
let sum = L.tonumber(-1);
println!("Script returned: {}", sum);
L.pop(1); // Take the returned value out of the stack
// L's destructor will close the state for us
}
|
"Failed to run script: {}", L.describe(-1));
os::set_exit_status(1);
return;
}
}
|
random_line_split
|
simpleapi.rs
|
// Simple API example, ported from http://lua-users.org/wiki/SimpleLuaApiExample
// This is a simple introductory example of how to interface to Lua from Rust.
// The Rust program loads a Lua script file, sets some Lua variables, runs the
// Lua script, and reads back the return value.
#![allow(uppercase_variables)]
extern crate lua;
use std::{io, os};
use std::iter::range_inclusive;
fn
|
() {
let mut L = lua::State::new();
L.openlibs(); // Load Lua libraries
// Load the file containing the script we are going to run
let path = Path::new("simpleapi.lua");
match L.loadfile(Some(&path)) {
Ok(_) => (),
Err(_) => {
// If something went wrong, error message is at the top of the stack
let _ = writeln!(&mut io::stderr(),
"Couldn't load file: {}", L.describe(-1));
os::set_exit_status(1);
return;
}
}
/*
* Ok, now here we go: We pass data to the lua script on the stack.
* That is, we first have to prepare Lua's virtual stack the way we
* want the script to receive it, then ask Lua to run it.
*/
L.newtable(); // We will pass a table
/*
* To put values into the table, we first push the index, then the
* value, and then call rawset() with the index of the table in the
* stack. Let's see why it's -3: In Lua, the value -1 always refers to
* the top of the stack. When you create the table with newtable(),
* the table gets pushed into the top of the stack. When you push the
* index and then the cell value, the stack looks like:
*
* - [stack bottom] -- table, index, value [top]
*
* So the -1 will refer to the cell value, thus -3 is used to refer to
* the table itself. Note that rawset() pops the last two elements
* of the stack, so that after it has been called, the table is at the
* top of the stack.
*/
for i in range_inclusive(1, 5) {
L.pushinteger(i); // Push the table index
L.pushinteger(i*2); // Push the cell value
L.rawset(-3); // Stores the pair in the table
}
// By what name is the script going to reference our table?
L.setglobal("foo");
// Ask Lua to run our little script
match L.pcall(0, lua::MULTRET, 0) {
Ok(()) => (),
Err(_) => {
let _ = writeln!(&mut io::stderr(),
"Failed to run script: {}", L.describe(-1));
os::set_exit_status(1);
return;
}
}
// Get the returned value at the to of the stack (index -1)
let sum = L.tonumber(-1);
println!("Script returned: {}", sum);
L.pop(1); // Take the returned value out of the stack
// L's destructor will close the state for us
}
|
main
|
identifier_name
|
extend_corpora.rs
|
// Copyright 2015-2018 Deyan Ginev. See the LICENSE
// file at the top-level directory of this distribution.
//
// Licensed under the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed
// except according to those terms.
use cortex::backend::Backend;
use cortex::importer::Importer;
use cortex::models::Corpus;
use std::env;
/// Extends all corpora registered with the `CorTeX` backend, with any new available sources
/// (example usage: arXiv.org releases new source bundles every month, which warrant an update at
/// the same frequency.)
fn
|
() {
// Note that we realize the initial import via a real cortex worker, but use a simply utility
// script for extensions. this is the case since the goal here is to do a simple sysadmin
// "maintenance update", rather than a full-blown "semantic" union operation
let backend = Backend::default();
// If input is provided, only extend the corpus of the given name/path.
let mut input_args = env::args();
let _ = input_args.next();
let corpora = if let Some(path) = input_args.next() {
if let Ok(corpus) = Corpus::find_by_path(&path, &backend.connection) {
vec![corpus]
} else {
panic!(
"No corpus could be found at path {:?}. Make sure path matches DB registration.",
path
);
}
} else {
backend.corpora()
};
for corpus in corpora {
// First, build an importer, which will perform the extension
let importer = Importer {
corpus: corpus.clone(),
backend: Backend::default(),
cwd: Importer::cwd(),
};
// Extend the already imported corpus. I prefer that method name to "update", as we won't yet
// implement downsizing on deletion.
let extend_start = time::get_time();
println!("-- Extending: {:?}", corpus.name);
match importer.extend_corpus() {
Ok(_) => {},
Err(e) => println!("Corpus extension panicked: {:?}", e),
};
let extend_end = time::get_time();
let extend_duration = (extend_end - extend_start).num_milliseconds();
println!(
"-- Extending corpus {:?} took {:?}ms",
corpus.name, extend_duration
);
// Then re-register all services, so that they pick up on the tasks
let register_start = time::get_time();
match corpus.select_services(&backend.connection) {
Ok(services) => {
for service in services {
let service_id = service.id;
if service_id > 2 {
println!(
" Extending service {:?} on corpus {:?}",
service.name, corpus.name
);
backend.extend_service(&service, &corpus.path).unwrap();
}
}
},
Err(e) => println!("Services could not be fetched: {:?}", e),
};
let register_end = time::get_time();
let register_duration = (register_end - register_start).num_milliseconds();
println!(
"-- Service registration on corpus {:?} took {:?}ms",
corpus.name, register_duration
);
}
}
|
main
|
identifier_name
|
extend_corpora.rs
|
// Copyright 2015-2018 Deyan Ginev. See the LICENSE
// file at the top-level directory of this distribution.
//
// Licensed under the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed
// except according to those terms.
use cortex::backend::Backend;
use cortex::importer::Importer;
use cortex::models::Corpus;
use std::env;
/// Extends all corpora registered with the `CorTeX` backend, with any new available sources
/// (example usage: arXiv.org releases new source bundles every month, which warrant an update at
/// the same frequency.)
fn main() {
// Note that we realize the initial import via a real cortex worker, but use a simply utility
// script for extensions. this is the case since the goal here is to do a simple sysadmin
// "maintenance update", rather than a full-blown "semantic" union operation
let backend = Backend::default();
// If input is provided, only extend the corpus of the given name/path.
let mut input_args = env::args();
let _ = input_args.next();
let corpora = if let Some(path) = input_args.next() {
if let Ok(corpus) = Corpus::find_by_path(&path, &backend.connection) {
vec![corpus]
} else {
panic!(
"No corpus could be found at path {:?}. Make sure path matches DB registration.",
path
);
}
} else {
backend.corpora()
};
for corpus in corpora {
// First, build an importer, which will perform the extension
let importer = Importer {
corpus: corpus.clone(),
backend: Backend::default(),
cwd: Importer::cwd(),
};
// Extend the already imported corpus. I prefer that method name to "update", as we won't yet
// implement downsizing on deletion.
let extend_start = time::get_time();
println!("-- Extending: {:?}", corpus.name);
match importer.extend_corpus() {
Ok(_) => {},
Err(e) => println!("Corpus extension panicked: {:?}", e),
};
let extend_end = time::get_time();
|
// Then re-register all services, so that they pick up on the tasks
let register_start = time::get_time();
match corpus.select_services(&backend.connection) {
Ok(services) => {
for service in services {
let service_id = service.id;
if service_id > 2 {
println!(
" Extending service {:?} on corpus {:?}",
service.name, corpus.name
);
backend.extend_service(&service, &corpus.path).unwrap();
}
}
},
Err(e) => println!("Services could not be fetched: {:?}", e),
};
let register_end = time::get_time();
let register_duration = (register_end - register_start).num_milliseconds();
println!(
"-- Service registration on corpus {:?} took {:?}ms",
corpus.name, register_duration
);
}
}
|
let extend_duration = (extend_end - extend_start).num_milliseconds();
println!(
"-- Extending corpus {:?} took {:?}ms",
corpus.name, extend_duration
);
|
random_line_split
|
extend_corpora.rs
|
// Copyright 2015-2018 Deyan Ginev. See the LICENSE
// file at the top-level directory of this distribution.
//
// Licensed under the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed
// except according to those terms.
use cortex::backend::Backend;
use cortex::importer::Importer;
use cortex::models::Corpus;
use std::env;
/// Extends all corpora registered with the `CorTeX` backend, with any new available sources
/// (example usage: arXiv.org releases new source bundles every month, which warrant an update at
/// the same frequency.)
fn main()
|
};
for corpus in corpora {
// First, build an importer, which will perform the extension
let importer = Importer {
corpus: corpus.clone(),
backend: Backend::default(),
cwd: Importer::cwd(),
};
// Extend the already imported corpus. I prefer that method name to "update", as we won't yet
// implement downsizing on deletion.
let extend_start = time::get_time();
println!("-- Extending: {:?}", corpus.name);
match importer.extend_corpus() {
Ok(_) => {},
Err(e) => println!("Corpus extension panicked: {:?}", e),
};
let extend_end = time::get_time();
let extend_duration = (extend_end - extend_start).num_milliseconds();
println!(
"-- Extending corpus {:?} took {:?}ms",
corpus.name, extend_duration
);
// Then re-register all services, so that they pick up on the tasks
let register_start = time::get_time();
match corpus.select_services(&backend.connection) {
Ok(services) => {
for service in services {
let service_id = service.id;
if service_id > 2 {
println!(
" Extending service {:?} on corpus {:?}",
service.name, corpus.name
);
backend.extend_service(&service, &corpus.path).unwrap();
}
}
},
Err(e) => println!("Services could not be fetched: {:?}", e),
};
let register_end = time::get_time();
let register_duration = (register_end - register_start).num_milliseconds();
println!(
"-- Service registration on corpus {:?} took {:?}ms",
corpus.name, register_duration
);
}
}
|
{
// Note that we realize the initial import via a real cortex worker, but use a simply utility
// script for extensions. this is the case since the goal here is to do a simple sysadmin
// "maintenance update", rather than a full-blown "semantic" union operation
let backend = Backend::default();
// If input is provided, only extend the corpus of the given name/path.
let mut input_args = env::args();
let _ = input_args.next();
let corpora = if let Some(path) = input_args.next() {
if let Ok(corpus) = Corpus::find_by_path(&path, &backend.connection) {
vec![corpus]
} else {
panic!(
"No corpus could be found at path {:?}. Make sure path matches DB registration.",
path
);
}
} else {
backend.corpora()
|
identifier_body
|
mod.rs
|
//! cache/mod.rs - Provides the ModuleCache struct which the ante compiler
//! pervasively uses to cache and store results from various compiler phases.
//! The most important things the cache stores are additional information about
//! the parse tree. For example, for each variable definition, there is a corresponding
//! `DefinitionInfo` struct and a `DefinitionInfoId` key that can be used on the
//! cache to access this struct. The `DefinitionInfo` struct stores additional information
//! about a definition like the `ast::Definition` node it was defined in, its name,
//! whether it is mutable, and how many times it is referenced in the program.
//! This XXXInfo and XXXInfoId pattern is also used for TraitDefinitions, TraitImpls, and Types.
//! See the corresponding structs further down in this file for more information.
//!
//! The ModuleCache itself is kept the entirely of compilation - its contents may be
//! used in any phase and thus nothing is freed until the program is fully linked.
//! Any pass-specific information that isn't needed for later phases shouldn't be
//! kept in the ModuleCache and instead should be in a special data structure for
//! the relevant phase. An example is the `llvm::Generator` in the llvm codegen phase.
use crate::nameresolution::NameResolver;
use crate::types::{ TypeVariableId, TypeInfoId, TypeInfo, Type, TypeInfoBody };
use crate::types::{ TypeBinding, LetBindingLevel, Kind };
use crate::types::traits::{ RequiredImpl, RequiredTrait };
use crate::error::location::{ Location, Locatable };
use crate::parser::ast::{ Ast, Definition, TraitDefinition, TraitImpl, TypeAnnotation };
use crate::cache::unsafecache::UnsafeCache;
use std::path::{ Path, PathBuf };
use std::collections::HashMap;
mod unsafecache;
/// The ModuleCache is for information needed until compilation is completely finished
/// (ie. not just for one phase). Accessing each `Vec` inside the `ModuleCache` is done
/// only through the XXXInfoId keys returned as a result of each of the `push_xxx` methods
/// on the `ModuleCache`. These keys are also often stored in the AST itself as a result
/// of various passes. For example, name resolution will fill in the `definition: Option<DefinitionInfoId>`
/// field of each `ast::Variable` node, which has the effect of pointing each variable to
/// where it was defined.
pub struct ModuleCache<'a> {
/// All the 'root' directories for imports. In practice this will contain
/// the directory of the driver module as well as all directories containing
/// any libraries used by the program, including the standard library.
pub relative_roots: Vec<PathBuf>,
/// Maps ModuleId -> Ast
/// Contains all the parse trees parsed by the program.
pub parse_trees: UnsafeCache<'a, Ast<'a>>,
/// Used to map paths to parse trees or name resolvers
pub modules: HashMap<PathBuf, ModuleId>,
/// Maps ModuleId -> NameResolver
pub name_resolvers: UnsafeCache<'a, NameResolver>,
/// Holds all the previously seen filenames referenced by Locations
/// Used to lengthen the lifetime of Locations and the parse tree past
/// the lifetime of the file that was read from.
pub filepaths: Vec<PathBuf>,
/// Maps DefinitionInfoId -> DefinitionInfo
/// Filled out during name resolution.
pub definition_infos: Vec<DefinitionInfo<'a>>,
/// Maps VariableInfoId -> VariableInfo
/// Each ast::Variable node stores the required impls for use while
/// codegening the variable's definition. These impls are filled out
/// during type inference (see typechecker::find_impl). Unlike
/// DefinitionInfos, VariableInfos are per usage of the variable.
pub trait_bindings: Vec<TraitBinding<'a>>,
/// Maps TypeVariableId -> Type
/// Unique TypeVariableIds are generated during name
/// resolution and are unified during type inference
pub type_bindings: Vec<TypeBinding>,
/// Maps TypeInfoId -> TypeInfo
/// Filled out during name resolution
pub type_infos: Vec<TypeInfo<'a>>,
/// Maps TraitInfoId -> TraitInfo
/// Filled out during name resolution
pub trait_infos: Vec<TraitInfo<'a>>,
/// Maps ImplInfoId -> ImplInfo
/// Filled out during name resolution, though
/// definitions within impls aren't publically exposed.
pub impl_infos: Vec<ImplInfo<'a>>,
/// Maps ImplScopeId -> Vec<ImplInfo>
/// Name resolution needs to store the impls visible to
/// each variable so when any UnknownTraitImpls are resolved
/// during type inference the inferencer can quickly get the
/// impls that should be in scope and select an instance.
pub impl_scopes: Vec<Vec<ImplInfoId>>,
/// Ante represents each member access (foo.bar) as a trait (.foo)
/// that is generated for each new field name used globally.
pub member_access_traits: HashMap<String, TraitInfoId>,
/// The builtin `Int a` trait that arises when using polymorphic
/// integer literals.
pub int_trait: TraitInfoId,
/// Used to give a unique ID to each node so they can later be
/// used during static trait dispatch.
pub variable_nodes: Vec</* name: */String>,
/// The filepath to ante's stdlib/prelude.an file to be automatically
/// included when defining a new ante module.
pub prelude_path: PathBuf,
}
/// The key for accessing parse trees or `NameResolver`s
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct ModuleId(pub usize);
#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
pub struct DefinitionInfoId(pub usize);
impl std::fmt::Debug for DefinitionInfoId {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
write!(f, "${}", self.0)
}
}
#[derive(Debug)]
pub enum DefinitionKind<'a> {
/// A variable/function definition in the form `a = b`
Definition(&'a mut Definition<'a>),
/// A trait definition in the form `trait A a with...`
TraitDefinition(&'a mut TraitDefinition<'a>),
/// An extern FFI definition with no body
Extern(&'a mut TypeAnnotation<'a>),
/// A TypeConstructor function to construct a type.
/// If the constructed type is a tagged union, tag will
/// be Some, otherwise if it is a struct, tag is None.
TypeConstructor { name: String, tag: Option<u8> },
Parameter,
/// Any variable declared in a match pattern. E.g. 'a' in
/// match None with
/// | a -> ()
MatchPattern,
}
/// Carries additional information about a variable's definition.
|
/// The corresponding DefinitionInfoId is attatched to each
/// `ast::Variable` during name resolution.
#[derive(Debug)]
pub struct DefinitionInfo<'a> {
pub name: String,
pub location: Location<'a>,
/// Where this name was defined. It is expected that type checking
/// this Definition kind should result in self.typ being filled out.
pub definition: Option<DefinitionKind<'a>>,
/// True if this definition can be reassigned to.
pub mutable: bool,
/// Some(trait_id) if this is a definition from a trait. Note that
/// this is still None for definitions from trait impls.
pub trait_info: Option<TraitInfoId>,
/// For a given definition like:
/// foo (a: a) -> a
/// given Add a, Print a =...
/// required_traits is the "given..." part of the signature
pub required_traits: Vec<RequiredTrait>,
/// The type of this definition. Filled out during type inference,
/// and is guarenteed to be Some afterward.
pub typ: Option<Type>,
/// A count of how many times was this variable referenced in the program.
/// Used primarily for issuing unused warnings.
pub uses: u32,
}
impl<'a> Locatable<'a> for DefinitionInfo<'a> {
fn locate(&self) -> Location<'a> {
self.location
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TraitBindingId(pub usize);
/// TraitBindingIds are stored on ast::Variables and detail any
/// required_impls needed to compile the definitions
/// of these variables. These are filled out during type inference.
pub struct TraitBinding<'a> {
pub required_impls: Vec<RequiredImpl>,
pub location: Location<'a>,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
pub struct TraitInfoId(pub usize);
/// Additional information on the definition of a trait.
/// The corresponding TraitInfoId is attatched to each
/// `ast::TraitDefinition` during name resolution.
///
/// Note that the builtin `Int a` trait as well as the builtin
/// member access family of traits also have their own TraitInfo.
#[derive(Debug)]
pub struct TraitInfo<'a> {
pub name: String,
/// The type arguments of this trait. These are the
/// `a b c` in `trait Foo a b c -> d e f with...`
/// Note that all traits must have at least 1 type
/// argument, otherwise there is no type to implement
/// the trait for.
pub typeargs: Vec<TypeVariableId>,
/// The possibly-empty functional dependencies of this trait.
/// These are the `d e f` in `trait Foo a b c -> d e f with...`
pub fundeps: Vec<TypeVariableId>,
pub location: Location<'a>,
/// The definitions included in this trait defintion.
/// The term `defintion` is used somewhat loosely here
/// since none of these functions/variables have bodies.
/// They're merely declarations that impl definitions will
/// later have to conform to.
pub definitions: Vec<DefinitionInfoId>,
/// The Ast node that defines this trait.
/// A value of None means this trait was builtin to the compiler
pub trait_node: Option<&'a mut TraitDefinition<'a>>,
pub uses: u32,
}
impl<'a> TraitInfo<'a> {
/// Member access traits are special in that they're automatically
/// defined and implemented by the compiler.
pub fn is_member_access(&self) -> bool {
self.name.starts_with(".")
}
/// The `name` of a member access trait is `.field`
/// where `field` is the name of the described field.
/// E.g. `.name Person string` is a trait constraining
/// the `Person` type to have a `name` field of type `string`.
pub fn get_field_name(&self) -> &str {
assert!(self.is_member_access());
&self.name[1..]
}
}
impl<'a> Locatable<'a> for TraitInfo<'a> {
fn locate(&self) -> Location<'a> {
self.location
}
}
/// An ImplScopeId is attached to an ast::Variable to remember
/// the impls that were in scope when it was used since scopes are
/// thrown away after name resolution but the impls in scope are still
/// needed during type inference.
/// TODO: The concept of an ImplScope is somewhat of a wart in the trait inference
/// algorithm. Getting rid of them would likely make it both cleaner and faster.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct ImplScopeId(pub usize);
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct ImplInfoId(pub usize);
/// Corresponds to a `ast::TraitImpl` node, carrying extra information
/// on it. These are filled out during name resolution.
#[derive(Debug)]
pub struct ImplInfo<'a> {
pub trait_id: TraitInfoId,
pub typeargs: Vec<Type>,
pub location: Location<'a>,
pub definitions: Vec<DefinitionInfoId>,
pub given: Vec<RequiredTrait>,
pub trait_impl: &'a mut TraitImpl<'a>,
}
/// Each `ast::Variable` node corresponds to a VariableId that identifies it,
/// filled out during name resolution. These are currently used to identify the
/// origin/callsites of traits for trait dispatch.
/// `TraitConstraints` are passed around during type inference carrying these so
/// that once they're finally resolved, the correct variable can be linked to the
/// correct impl definition.
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
pub struct VariableId(pub usize);
impl<'a> ModuleCache<'a> {
pub fn new(project_directory: &'a Path) -> ModuleCache<'a> {
let mut cache = ModuleCache {
relative_roots: vec![project_directory.to_owned(), dirs::config_dir().unwrap().join("ante/stdlib")],
int_trait: TraitInfoId(0), // Dummy value since we must have the cache to push a trait
prelude_path: dirs::config_dir().unwrap().join("stdlib/prelude"),
// Really wish you could do..Default::default() for the remaining fields
modules: HashMap::default(),
parse_trees: UnsafeCache::default(),
name_resolvers: UnsafeCache::default(),
filepaths: Vec::default(),
definition_infos: Vec::default(),
trait_bindings: Vec::default(),
type_bindings: Vec::default(),
type_infos: Vec::default(),
trait_infos: Vec::default(),
impl_infos: Vec::default(),
impl_scopes: Vec::default(),
member_access_traits: HashMap::default(),
variable_nodes: Vec::default(),
};
let new_typevar = cache.next_type_variable_id(LetBindingLevel(std::usize::MAX));
cache.push_trait_definition("Int".to_string(), vec![new_typevar], vec![], None, Location::builtin());
cache
}
pub fn push_filepath(&mut self, path: PathBuf) -> &'a Path {
let index = self.filepaths.len();
self.filepaths.push(path);
let path: &Path = &self.filepaths[index];
unsafe { std::mem::transmute(path) }
}
pub fn push_definition(&mut self, name: &str, mutable: bool, location: Location<'a>) -> DefinitionInfoId {
let id = self.definition_infos.len();
self.definition_infos.push(DefinitionInfo {
name: name.to_string(),
definition: None,
trait_info: None,
required_traits: vec![],
mutable,
location,
typ: None,
uses: 0,
});
DefinitionInfoId(id)
}
pub fn push_ast(&mut self, ast: Ast<'a>) -> ModuleId {
ModuleId(self.parse_trees.push(ast))
}
pub fn push_type_info(&mut self, name: String, args: Vec<TypeVariableId>, location: Location<'a>) -> TypeInfoId {
let id = self.type_infos.len();
let type_info = TypeInfo { name, args, location, uses: 0, body: TypeInfoBody::Unknown };
self.type_infos.push(type_info);
TypeInfoId(id)
}
pub fn get_name_resolver_by_path(&self, path: &Path) -> Option<&mut NameResolver> {
let id = self.modules.get(path)?;
self.name_resolvers.get_mut(id.0)
}
pub fn next_type_variable_id(&mut self, level: LetBindingLevel) -> TypeVariableId {
let id = self.type_bindings.len();
self.type_bindings.push(TypeBinding::Unbound(level, Kind::Normal(0)));
TypeVariableId(id)
}
pub fn next_type_variable(&mut self, level: LetBindingLevel) -> Type {
let id = self.next_type_variable_id(level);
Type::TypeVariable(id)
}
pub fn push_trait_definition(&mut self, name: String, typeargs: Vec<TypeVariableId>,
fundeps: Vec<TypeVariableId>, trait_node: Option<&'a mut TraitDefinition<'a>>,
location: Location<'a>) -> TraitInfoId
{
let id = self.trait_infos.len();
self.trait_infos.push(TraitInfo {
name,
typeargs,
fundeps,
definitions: vec![],
trait_node,
location,
uses: 0,
});
TraitInfoId(id)
}
pub fn push_trait_impl(&mut self, trait_id: TraitInfoId, typeargs: Vec<Type>,
definitions: Vec<DefinitionInfoId>, trait_impl: &'a mut TraitImpl<'a>,
given: Vec<RequiredTrait>, location: Location<'a>) -> ImplInfoId {
let id = self.impl_infos.len();
self.impl_infos.push(ImplInfo {
trait_id,
typeargs,
definitions,
location,
given,
trait_impl,
});
ImplInfoId(id)
}
pub fn push_impl_scope(&mut self) -> ImplScopeId {
let id = self.impl_scopes.len();
self.impl_scopes.push(vec![]);
ImplScopeId(id)
}
pub fn push_trait_binding(&mut self, location: Location<'a>) -> TraitBindingId {
let id = self.trait_bindings.len();
self.trait_bindings.push(TraitBinding {
required_impls: vec![],
location,
});
TraitBindingId(id)
}
/// Get or create an instance of the '.' trait family for the given field name
pub fn get_member_access_trait(&mut self, field_name: &str, level: LetBindingLevel) -> TraitInfoId {
match self.member_access_traits.get(field_name) {
Some(id) => *id,
None => {
let trait_name = ".".to_string() + field_name;
let collection_type = self.next_type_variable_id(level);
let field_type = self.next_type_variable_id(level);
let id = self.push_trait_definition(trait_name, vec![collection_type], vec![field_type], None, Location::builtin());
self.member_access_traits.insert(field_name.to_string(), id);
id
},
}
}
pub fn push_variable_node(&mut self, name: &str) -> VariableId {
let id = VariableId(self.variable_nodes.len());
self.variable_nodes.push(name.to_string());
id
}
}
|
/// Note that two variables defined in the same pattern, e.g: `(a, b) = c`
/// will have their own unique `DefinitionInfo`s, but each DefinitionInfo
/// will refer to the same `ast::Definition` in its definition field.
///
|
random_line_split
|
mod.rs
|
//! cache/mod.rs - Provides the ModuleCache struct which the ante compiler
//! pervasively uses to cache and store results from various compiler phases.
//! The most important things the cache stores are additional information about
//! the parse tree. For example, for each variable definition, there is a corresponding
//! `DefinitionInfo` struct and a `DefinitionInfoId` key that can be used on the
//! cache to access this struct. The `DefinitionInfo` struct stores additional information
//! about a definition like the `ast::Definition` node it was defined in, its name,
//! whether it is mutable, and how many times it is referenced in the program.
//! This XXXInfo and XXXInfoId pattern is also used for TraitDefinitions, TraitImpls, and Types.
//! See the corresponding structs further down in this file for more information.
//!
//! The ModuleCache itself is kept the entirely of compilation - its contents may be
//! used in any phase and thus nothing is freed until the program is fully linked.
//! Any pass-specific information that isn't needed for later phases shouldn't be
//! kept in the ModuleCache and instead should be in a special data structure for
//! the relevant phase. An example is the `llvm::Generator` in the llvm codegen phase.
use crate::nameresolution::NameResolver;
use crate::types::{ TypeVariableId, TypeInfoId, TypeInfo, Type, TypeInfoBody };
use crate::types::{ TypeBinding, LetBindingLevel, Kind };
use crate::types::traits::{ RequiredImpl, RequiredTrait };
use crate::error::location::{ Location, Locatable };
use crate::parser::ast::{ Ast, Definition, TraitDefinition, TraitImpl, TypeAnnotation };
use crate::cache::unsafecache::UnsafeCache;
use std::path::{ Path, PathBuf };
use std::collections::HashMap;
mod unsafecache;
/// The ModuleCache is for information needed until compilation is completely finished
/// (ie. not just for one phase). Accessing each `Vec` inside the `ModuleCache` is done
/// only through the XXXInfoId keys returned as a result of each of the `push_xxx` methods
/// on the `ModuleCache`. These keys are also often stored in the AST itself as a result
/// of various passes. For example, name resolution will fill in the `definition: Option<DefinitionInfoId>`
/// field of each `ast::Variable` node, which has the effect of pointing each variable to
/// where it was defined.
pub struct ModuleCache<'a> {
/// All the 'root' directories for imports. In practice this will contain
/// the directory of the driver module as well as all directories containing
/// any libraries used by the program, including the standard library.
pub relative_roots: Vec<PathBuf>,
/// Maps ModuleId -> Ast
/// Contains all the parse trees parsed by the program.
pub parse_trees: UnsafeCache<'a, Ast<'a>>,
/// Used to map paths to parse trees or name resolvers
pub modules: HashMap<PathBuf, ModuleId>,
/// Maps ModuleId -> NameResolver
pub name_resolvers: UnsafeCache<'a, NameResolver>,
/// Holds all the previously seen filenames referenced by Locations
/// Used to lengthen the lifetime of Locations and the parse tree past
/// the lifetime of the file that was read from.
pub filepaths: Vec<PathBuf>,
/// Maps DefinitionInfoId -> DefinitionInfo
/// Filled out during name resolution.
pub definition_infos: Vec<DefinitionInfo<'a>>,
/// Maps VariableInfoId -> VariableInfo
/// Each ast::Variable node stores the required impls for use while
/// codegening the variable's definition. These impls are filled out
/// during type inference (see typechecker::find_impl). Unlike
/// DefinitionInfos, VariableInfos are per usage of the variable.
pub trait_bindings: Vec<TraitBinding<'a>>,
/// Maps TypeVariableId -> Type
/// Unique TypeVariableIds are generated during name
/// resolution and are unified during type inference
pub type_bindings: Vec<TypeBinding>,
/// Maps TypeInfoId -> TypeInfo
/// Filled out during name resolution
pub type_infos: Vec<TypeInfo<'a>>,
/// Maps TraitInfoId -> TraitInfo
/// Filled out during name resolution
pub trait_infos: Vec<TraitInfo<'a>>,
/// Maps ImplInfoId -> ImplInfo
/// Filled out during name resolution, though
/// definitions within impls aren't publically exposed.
pub impl_infos: Vec<ImplInfo<'a>>,
/// Maps ImplScopeId -> Vec<ImplInfo>
/// Name resolution needs to store the impls visible to
/// each variable so when any UnknownTraitImpls are resolved
/// during type inference the inferencer can quickly get the
/// impls that should be in scope and select an instance.
pub impl_scopes: Vec<Vec<ImplInfoId>>,
/// Ante represents each member access (foo.bar) as a trait (.foo)
/// that is generated for each new field name used globally.
pub member_access_traits: HashMap<String, TraitInfoId>,
/// The builtin `Int a` trait that arises when using polymorphic
/// integer literals.
pub int_trait: TraitInfoId,
/// Used to give a unique ID to each node so they can later be
/// used during static trait dispatch.
pub variable_nodes: Vec</* name: */String>,
/// The filepath to ante's stdlib/prelude.an file to be automatically
/// included when defining a new ante module.
pub prelude_path: PathBuf,
}
/// The key for accessing parse trees or `NameResolver`s
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct ModuleId(pub usize);
#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
pub struct DefinitionInfoId(pub usize);
impl std::fmt::Debug for DefinitionInfoId {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
write!(f, "${}", self.0)
}
}
#[derive(Debug)]
pub enum DefinitionKind<'a> {
/// A variable/function definition in the form `a = b`
Definition(&'a mut Definition<'a>),
/// A trait definition in the form `trait A a with...`
TraitDefinition(&'a mut TraitDefinition<'a>),
/// An extern FFI definition with no body
Extern(&'a mut TypeAnnotation<'a>),
/// A TypeConstructor function to construct a type.
/// If the constructed type is a tagged union, tag will
/// be Some, otherwise if it is a struct, tag is None.
TypeConstructor { name: String, tag: Option<u8> },
Parameter,
/// Any variable declared in a match pattern. E.g. 'a' in
/// match None with
/// | a -> ()
MatchPattern,
}
/// Carries additional information about a variable's definition.
/// Note that two variables defined in the same pattern, e.g: `(a, b) = c`
/// will have their own unique `DefinitionInfo`s, but each DefinitionInfo
/// will refer to the same `ast::Definition` in its definition field.
///
/// The corresponding DefinitionInfoId is attatched to each
/// `ast::Variable` during name resolution.
#[derive(Debug)]
pub struct DefinitionInfo<'a> {
pub name: String,
pub location: Location<'a>,
/// Where this name was defined. It is expected that type checking
/// this Definition kind should result in self.typ being filled out.
pub definition: Option<DefinitionKind<'a>>,
/// True if this definition can be reassigned to.
pub mutable: bool,
/// Some(trait_id) if this is a definition from a trait. Note that
/// this is still None for definitions from trait impls.
pub trait_info: Option<TraitInfoId>,
/// For a given definition like:
/// foo (a: a) -> a
/// given Add a, Print a =...
/// required_traits is the "given..." part of the signature
pub required_traits: Vec<RequiredTrait>,
/// The type of this definition. Filled out during type inference,
/// and is guarenteed to be Some afterward.
pub typ: Option<Type>,
/// A count of how many times was this variable referenced in the program.
/// Used primarily for issuing unused warnings.
pub uses: u32,
}
impl<'a> Locatable<'a> for DefinitionInfo<'a> {
fn locate(&self) -> Location<'a> {
self.location
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TraitBindingId(pub usize);
/// TraitBindingIds are stored on ast::Variables and detail any
/// required_impls needed to compile the definitions
/// of these variables. These are filled out during type inference.
pub struct TraitBinding<'a> {
pub required_impls: Vec<RequiredImpl>,
pub location: Location<'a>,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
pub struct TraitInfoId(pub usize);
/// Additional information on the definition of a trait.
/// The corresponding TraitInfoId is attatched to each
/// `ast::TraitDefinition` during name resolution.
///
/// Note that the builtin `Int a` trait as well as the builtin
/// member access family of traits also have their own TraitInfo.
#[derive(Debug)]
pub struct TraitInfo<'a> {
pub name: String,
/// The type arguments of this trait. These are the
/// `a b c` in `trait Foo a b c -> d e f with...`
/// Note that all traits must have at least 1 type
/// argument, otherwise there is no type to implement
/// the trait for.
pub typeargs: Vec<TypeVariableId>,
/// The possibly-empty functional dependencies of this trait.
/// These are the `d e f` in `trait Foo a b c -> d e f with...`
pub fundeps: Vec<TypeVariableId>,
pub location: Location<'a>,
/// The definitions included in this trait defintion.
/// The term `defintion` is used somewhat loosely here
/// since none of these functions/variables have bodies.
/// They're merely declarations that impl definitions will
/// later have to conform to.
pub definitions: Vec<DefinitionInfoId>,
/// The Ast node that defines this trait.
/// A value of None means this trait was builtin to the compiler
pub trait_node: Option<&'a mut TraitDefinition<'a>>,
pub uses: u32,
}
impl<'a> TraitInfo<'a> {
/// Member access traits are special in that they're automatically
/// defined and implemented by the compiler.
pub fn is_member_access(&self) -> bool {
self.name.starts_with(".")
}
/// The `name` of a member access trait is `.field`
/// where `field` is the name of the described field.
/// E.g. `.name Person string` is a trait constraining
/// the `Person` type to have a `name` field of type `string`.
pub fn get_field_name(&self) -> &str {
assert!(self.is_member_access());
&self.name[1..]
}
}
impl<'a> Locatable<'a> for TraitInfo<'a> {
fn
|
(&self) -> Location<'a> {
self.location
}
}
/// An ImplScopeId is attached to an ast::Variable to remember
/// the impls that were in scope when it was used since scopes are
/// thrown away after name resolution but the impls in scope are still
/// needed during type inference.
/// TODO: The concept of an ImplScope is somewhat of a wart in the trait inference
/// algorithm. Getting rid of them would likely make it both cleaner and faster.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct ImplScopeId(pub usize);
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct ImplInfoId(pub usize);
/// Corresponds to a `ast::TraitImpl` node, carrying extra information
/// on it. These are filled out during name resolution.
#[derive(Debug)]
pub struct ImplInfo<'a> {
pub trait_id: TraitInfoId,
pub typeargs: Vec<Type>,
pub location: Location<'a>,
pub definitions: Vec<DefinitionInfoId>,
pub given: Vec<RequiredTrait>,
pub trait_impl: &'a mut TraitImpl<'a>,
}
/// Each `ast::Variable` node corresponds to a VariableId that identifies it,
/// filled out during name resolution. These are currently used to identify the
/// origin/callsites of traits for trait dispatch.
/// `TraitConstraints` are passed around during type inference carrying these so
/// that once they're finally resolved, the correct variable can be linked to the
/// correct impl definition.
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
pub struct VariableId(pub usize);
impl<'a> ModuleCache<'a> {
pub fn new(project_directory: &'a Path) -> ModuleCache<'a> {
let mut cache = ModuleCache {
relative_roots: vec![project_directory.to_owned(), dirs::config_dir().unwrap().join("ante/stdlib")],
int_trait: TraitInfoId(0), // Dummy value since we must have the cache to push a trait
prelude_path: dirs::config_dir().unwrap().join("stdlib/prelude"),
// Really wish you could do..Default::default() for the remaining fields
modules: HashMap::default(),
parse_trees: UnsafeCache::default(),
name_resolvers: UnsafeCache::default(),
filepaths: Vec::default(),
definition_infos: Vec::default(),
trait_bindings: Vec::default(),
type_bindings: Vec::default(),
type_infos: Vec::default(),
trait_infos: Vec::default(),
impl_infos: Vec::default(),
impl_scopes: Vec::default(),
member_access_traits: HashMap::default(),
variable_nodes: Vec::default(),
};
let new_typevar = cache.next_type_variable_id(LetBindingLevel(std::usize::MAX));
cache.push_trait_definition("Int".to_string(), vec![new_typevar], vec![], None, Location::builtin());
cache
}
pub fn push_filepath(&mut self, path: PathBuf) -> &'a Path {
let index = self.filepaths.len();
self.filepaths.push(path);
let path: &Path = &self.filepaths[index];
unsafe { std::mem::transmute(path) }
}
pub fn push_definition(&mut self, name: &str, mutable: bool, location: Location<'a>) -> DefinitionInfoId {
let id = self.definition_infos.len();
self.definition_infos.push(DefinitionInfo {
name: name.to_string(),
definition: None,
trait_info: None,
required_traits: vec![],
mutable,
location,
typ: None,
uses: 0,
});
DefinitionInfoId(id)
}
pub fn push_ast(&mut self, ast: Ast<'a>) -> ModuleId {
ModuleId(self.parse_trees.push(ast))
}
pub fn push_type_info(&mut self, name: String, args: Vec<TypeVariableId>, location: Location<'a>) -> TypeInfoId {
let id = self.type_infos.len();
let type_info = TypeInfo { name, args, location, uses: 0, body: TypeInfoBody::Unknown };
self.type_infos.push(type_info);
TypeInfoId(id)
}
pub fn get_name_resolver_by_path(&self, path: &Path) -> Option<&mut NameResolver> {
let id = self.modules.get(path)?;
self.name_resolvers.get_mut(id.0)
}
pub fn next_type_variable_id(&mut self, level: LetBindingLevel) -> TypeVariableId {
let id = self.type_bindings.len();
self.type_bindings.push(TypeBinding::Unbound(level, Kind::Normal(0)));
TypeVariableId(id)
}
pub fn next_type_variable(&mut self, level: LetBindingLevel) -> Type {
let id = self.next_type_variable_id(level);
Type::TypeVariable(id)
}
pub fn push_trait_definition(&mut self, name: String, typeargs: Vec<TypeVariableId>,
fundeps: Vec<TypeVariableId>, trait_node: Option<&'a mut TraitDefinition<'a>>,
location: Location<'a>) -> TraitInfoId
{
let id = self.trait_infos.len();
self.trait_infos.push(TraitInfo {
name,
typeargs,
fundeps,
definitions: vec![],
trait_node,
location,
uses: 0,
});
TraitInfoId(id)
}
pub fn push_trait_impl(&mut self, trait_id: TraitInfoId, typeargs: Vec<Type>,
definitions: Vec<DefinitionInfoId>, trait_impl: &'a mut TraitImpl<'a>,
given: Vec<RequiredTrait>, location: Location<'a>) -> ImplInfoId {
let id = self.impl_infos.len();
self.impl_infos.push(ImplInfo {
trait_id,
typeargs,
definitions,
location,
given,
trait_impl,
});
ImplInfoId(id)
}
pub fn push_impl_scope(&mut self) -> ImplScopeId {
let id = self.impl_scopes.len();
self.impl_scopes.push(vec![]);
ImplScopeId(id)
}
pub fn push_trait_binding(&mut self, location: Location<'a>) -> TraitBindingId {
let id = self.trait_bindings.len();
self.trait_bindings.push(TraitBinding {
required_impls: vec![],
location,
});
TraitBindingId(id)
}
/// Get or create an instance of the '.' trait family for the given field name
pub fn get_member_access_trait(&mut self, field_name: &str, level: LetBindingLevel) -> TraitInfoId {
match self.member_access_traits.get(field_name) {
Some(id) => *id,
None => {
let trait_name = ".".to_string() + field_name;
let collection_type = self.next_type_variable_id(level);
let field_type = self.next_type_variable_id(level);
let id = self.push_trait_definition(trait_name, vec![collection_type], vec![field_type], None, Location::builtin());
self.member_access_traits.insert(field_name.to_string(), id);
id
},
}
}
pub fn push_variable_node(&mut self, name: &str) -> VariableId {
let id = VariableId(self.variable_nodes.len());
self.variable_nodes.push(name.to_string());
id
}
}
|
locate
|
identifier_name
|
ide.rs
|
use machine::{outb, inb, inl};
use malloc::must_allocate;
use core::slice;
use collections::Vec;
use paging::PAGE_SIZE;
const SECTOR_BYTES: usize = 512;
const PORTS: [u16; 2] = [0x1f0, 0x170];
#[inline]
fn controller(drive: u8) -> u8 { (drive >> 1) & 1 }
#[inline]
fn channel(drive: u8) -> u8 { drive & 1 }
#[inline]
fn port(drive: u8) -> u16 {
PORTS[controller(drive) as usize]
}
#[inline]
fn get_status(drive: u8) -> u8 {
inb(port(drive) + 7)
}
const BSY: u8 = 0x80;
const RDY: u8 = 0x40;
const DRQ: u8 = 0x8;
#[inline]
fn wait_for_drive(drive: u8) {
while (get_status(drive) & BSY)!= 0 {}
while (get_status(drive) & RDY) == 0 {}
}
fn wait_for_data(drive: u8) {
wait_for_drive(drive);
while (get_status(drive) & DRQ) == 0 {}
}
// Returns the number of LBA32 sectors on the drive, if there is one
pub fn sector_count(drive: u8) -> Option<u32> {
let base = port(drive);
let channel = channel(drive);
outb(base + 6, 0xA0 | (channel << 4));
// Exclusive range
for i in 2..6 {
outb(base + i, 0);
}
outb(base + 7, 0xEC);
if get_status(drive) == 0 {
return None;
}
wait_for_data(drive);
let mut sector_count = 0;
for i in 0..128 { // 128 32-bit integers on port
let result = inl(base);
if i == 30
|
}
Some(sector_count)
}
pub fn read_sectors(drive: u8, start_sector: u32, buffer: &mut [u32]) {
let base = port(drive);
let channel = channel(drive);
let longs = buffer.len() as u32;
let num_sectors = longs >> 7;
let mut sector = start_sector;
let end_sector = start_sector + num_sectors;
let mut buffer_idx = 0;
while sector < end_sector {
// Read as many sectors as you can
let sector_count = (end_sector - sector) as u8;
outb(base + 2, sector_count);
outb(base + 3, sector as u8);
outb(base + 4, (sector >> 8) as u8);
outb(base + 5, (sector >> 16) as u8);
outb(base + 6, 0xE0 | (channel << 4) | (((sector >> 24) & 0xf) as u8));
outb(base + 7, 0x20);
for _ in 0..sector_count {
wait_for_data(drive);
for _ in 0..(SECTOR_BYTES / ::core::mem::size_of::<u32>()) {
buffer[buffer_idx] = inl(base);
buffer_idx += 1;
}
}
sector += sector_count as u32;
}
}
// You have to manually free the pointer when you stop caring
pub fn slurp_drive(drive: u8) -> Option<Vec<u32>> {
sector_count(drive).map(|count| {
let bytes = (count as usize) * SECTOR_BYTES;
let longs = bytes >> 2;
unsafe {
let pointer = must_allocate(bytes, PAGE_SIZE) as *mut u32; // Code should be page-aligned
read_sectors(drive, 0, slice::from_raw_parts_mut(pointer, longs));
Vec::from_raw_parts(pointer, longs, longs)
}
})
}
|
{
sector_count = result;
}
|
conditional_block
|
ide.rs
|
use machine::{outb, inb, inl};
use malloc::must_allocate;
use core::slice;
use collections::Vec;
use paging::PAGE_SIZE;
const SECTOR_BYTES: usize = 512;
const PORTS: [u16; 2] = [0x1f0, 0x170];
#[inline]
fn controller(drive: u8) -> u8 { (drive >> 1) & 1 }
#[inline]
fn channel(drive: u8) -> u8 { drive & 1 }
#[inline]
fn port(drive: u8) -> u16 {
PORTS[controller(drive) as usize]
}
#[inline]
fn get_status(drive: u8) -> u8 {
inb(port(drive) + 7)
}
const BSY: u8 = 0x80;
const RDY: u8 = 0x40;
const DRQ: u8 = 0x8;
#[inline]
fn wait_for_drive(drive: u8) {
while (get_status(drive) & BSY)!= 0 {}
while (get_status(drive) & RDY) == 0 {}
}
fn wait_for_data(drive: u8) {
wait_for_drive(drive);
while (get_status(drive) & DRQ) == 0 {}
}
// Returns the number of LBA32 sectors on the drive, if there is one
pub fn sector_count(drive: u8) -> Option<u32> {
let base = port(drive);
let channel = channel(drive);
outb(base + 6, 0xA0 | (channel << 4));
// Exclusive range
for i in 2..6 {
outb(base + i, 0);
}
outb(base + 7, 0xEC);
if get_status(drive) == 0 {
return None;
}
wait_for_data(drive);
let mut sector_count = 0;
for i in 0..128 { // 128 32-bit integers on port
let result = inl(base);
if i == 30 {
sector_count = result;
}
}
Some(sector_count)
}
pub fn read_sectors(drive: u8, start_sector: u32, buffer: &mut [u32])
|
wait_for_data(drive);
for _ in 0..(SECTOR_BYTES / ::core::mem::size_of::<u32>()) {
buffer[buffer_idx] = inl(base);
buffer_idx += 1;
}
}
sector += sector_count as u32;
}
}
// You have to manually free the pointer when you stop caring
pub fn slurp_drive(drive: u8) -> Option<Vec<u32>> {
sector_count(drive).map(|count| {
let bytes = (count as usize) * SECTOR_BYTES;
let longs = bytes >> 2;
unsafe {
let pointer = must_allocate(bytes, PAGE_SIZE) as *mut u32; // Code should be page-aligned
read_sectors(drive, 0, slice::from_raw_parts_mut(pointer, longs));
Vec::from_raw_parts(pointer, longs, longs)
}
})
}
|
{
let base = port(drive);
let channel = channel(drive);
let longs = buffer.len() as u32;
let num_sectors = longs >> 7;
let mut sector = start_sector;
let end_sector = start_sector + num_sectors;
let mut buffer_idx = 0;
while sector < end_sector {
// Read as many sectors as you can
let sector_count = (end_sector - sector) as u8;
outb(base + 2, sector_count);
outb(base + 3, sector as u8);
outb(base + 4, (sector >> 8) as u8);
outb(base + 5, (sector >> 16) as u8);
outb(base + 6, 0xE0 | (channel << 4) | (((sector >> 24) & 0xf) as u8));
outb(base + 7, 0x20);
for _ in 0..sector_count {
|
identifier_body
|
ide.rs
|
use machine::{outb, inb, inl};
use malloc::must_allocate;
use core::slice;
use collections::Vec;
use paging::PAGE_SIZE;
const SECTOR_BYTES: usize = 512;
const PORTS: [u16; 2] = [0x1f0, 0x170];
#[inline]
fn controller(drive: u8) -> u8 { (drive >> 1) & 1 }
#[inline]
fn channel(drive: u8) -> u8 { drive & 1 }
#[inline]
fn port(drive: u8) -> u16 {
PORTS[controller(drive) as usize]
}
#[inline]
fn get_status(drive: u8) -> u8 {
inb(port(drive) + 7)
}
const BSY: u8 = 0x80;
const RDY: u8 = 0x40;
const DRQ: u8 = 0x8;
#[inline]
fn wait_for_drive(drive: u8) {
while (get_status(drive) & BSY)!= 0 {}
while (get_status(drive) & RDY) == 0 {}
}
fn wait_for_data(drive: u8) {
wait_for_drive(drive);
while (get_status(drive) & DRQ) == 0 {}
}
// Returns the number of LBA32 sectors on the drive, if there is one
pub fn sector_count(drive: u8) -> Option<u32> {
let base = port(drive);
let channel = channel(drive);
outb(base + 6, 0xA0 | (channel << 4));
// Exclusive range
for i in 2..6 {
outb(base + i, 0);
}
outb(base + 7, 0xEC);
if get_status(drive) == 0 {
return None;
}
wait_for_data(drive);
let mut sector_count = 0;
for i in 0..128 { // 128 32-bit integers on port
let result = inl(base);
if i == 30 {
sector_count = result;
}
}
Some(sector_count)
}
pub fn read_sectors(drive: u8, start_sector: u32, buffer: &mut [u32]) {
let base = port(drive);
let channel = channel(drive);
let longs = buffer.len() as u32;
let num_sectors = longs >> 7;
let mut sector = start_sector;
let end_sector = start_sector + num_sectors;
let mut buffer_idx = 0;
while sector < end_sector {
// Read as many sectors as you can
let sector_count = (end_sector - sector) as u8;
|
outb(base + 7, 0x20);
for _ in 0..sector_count {
wait_for_data(drive);
for _ in 0..(SECTOR_BYTES / ::core::mem::size_of::<u32>()) {
buffer[buffer_idx] = inl(base);
buffer_idx += 1;
}
}
sector += sector_count as u32;
}
}
// You have to manually free the pointer when you stop caring
pub fn slurp_drive(drive: u8) -> Option<Vec<u32>> {
sector_count(drive).map(|count| {
let bytes = (count as usize) * SECTOR_BYTES;
let longs = bytes >> 2;
unsafe {
let pointer = must_allocate(bytes, PAGE_SIZE) as *mut u32; // Code should be page-aligned
read_sectors(drive, 0, slice::from_raw_parts_mut(pointer, longs));
Vec::from_raw_parts(pointer, longs, longs)
}
})
}
|
outb(base + 2, sector_count);
outb(base + 3, sector as u8);
outb(base + 4, (sector >> 8) as u8);
outb(base + 5, (sector >> 16) as u8);
outb(base + 6, 0xE0 | (channel << 4) | (((sector >> 24) & 0xf) as u8));
|
random_line_split
|
ide.rs
|
use machine::{outb, inb, inl};
use malloc::must_allocate;
use core::slice;
use collections::Vec;
use paging::PAGE_SIZE;
const SECTOR_BYTES: usize = 512;
const PORTS: [u16; 2] = [0x1f0, 0x170];
#[inline]
fn controller(drive: u8) -> u8 { (drive >> 1) & 1 }
#[inline]
fn channel(drive: u8) -> u8 { drive & 1 }
#[inline]
fn port(drive: u8) -> u16 {
PORTS[controller(drive) as usize]
}
#[inline]
fn get_status(drive: u8) -> u8 {
inb(port(drive) + 7)
}
const BSY: u8 = 0x80;
const RDY: u8 = 0x40;
const DRQ: u8 = 0x8;
#[inline]
fn wait_for_drive(drive: u8) {
while (get_status(drive) & BSY)!= 0 {}
while (get_status(drive) & RDY) == 0 {}
}
fn wait_for_data(drive: u8) {
wait_for_drive(drive);
while (get_status(drive) & DRQ) == 0 {}
}
// Returns the number of LBA32 sectors on the drive, if there is one
pub fn sector_count(drive: u8) -> Option<u32> {
let base = port(drive);
let channel = channel(drive);
outb(base + 6, 0xA0 | (channel << 4));
// Exclusive range
for i in 2..6 {
outb(base + i, 0);
}
outb(base + 7, 0xEC);
if get_status(drive) == 0 {
return None;
}
wait_for_data(drive);
let mut sector_count = 0;
for i in 0..128 { // 128 32-bit integers on port
let result = inl(base);
if i == 30 {
sector_count = result;
}
}
Some(sector_count)
}
pub fn read_sectors(drive: u8, start_sector: u32, buffer: &mut [u32]) {
let base = port(drive);
let channel = channel(drive);
let longs = buffer.len() as u32;
let num_sectors = longs >> 7;
let mut sector = start_sector;
let end_sector = start_sector + num_sectors;
let mut buffer_idx = 0;
while sector < end_sector {
// Read as many sectors as you can
let sector_count = (end_sector - sector) as u8;
outb(base + 2, sector_count);
outb(base + 3, sector as u8);
outb(base + 4, (sector >> 8) as u8);
outb(base + 5, (sector >> 16) as u8);
outb(base + 6, 0xE0 | (channel << 4) | (((sector >> 24) & 0xf) as u8));
outb(base + 7, 0x20);
for _ in 0..sector_count {
wait_for_data(drive);
for _ in 0..(SECTOR_BYTES / ::core::mem::size_of::<u32>()) {
buffer[buffer_idx] = inl(base);
buffer_idx += 1;
}
}
sector += sector_count as u32;
}
}
// You have to manually free the pointer when you stop caring
pub fn
|
(drive: u8) -> Option<Vec<u32>> {
sector_count(drive).map(|count| {
let bytes = (count as usize) * SECTOR_BYTES;
let longs = bytes >> 2;
unsafe {
let pointer = must_allocate(bytes, PAGE_SIZE) as *mut u32; // Code should be page-aligned
read_sectors(drive, 0, slice::from_raw_parts_mut(pointer, longs));
Vec::from_raw_parts(pointer, longs, longs)
}
})
}
|
slurp_drive
|
identifier_name
|
where-clauses-not-parameter.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn equal<T>(_: &T, _: &T) -> bool where isize : Eq {
true //~^ ERROR cannot bound type `isize`, where clause bounds may only be attached
}
// This should be fine involves a type parameter.
fn test<T: Eq>() -> bool where Option<T> : Eq {}
// This should be rejected as well.
fn
|
() -> bool where Option<isize> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<isize>`, where clause bounds may
#[derive(PartialEq)]
//~^ ERROR cannot bound type `isize`, where clause bounds
enum Foo<T> where isize : Eq { MkFoo }
//~^ ERROR cannot bound type `isize`, where clause bounds
fn test3<T: Eq>() -> bool where Option<Foo<T>> : Eq {}
fn test4() -> bool where Option<Foo<isize>> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<Foo<isize>>`, where clause bounds
trait Baz<T> where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds may only
fn baz() where String : Eq; //~ ERROR cannot bound type `collections::string::String`
//~^ ERROR cannot bound type `isize`, where clause
}
impl Baz<isize> for isize where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds
fn baz() where String : Eq {}
}
fn main() {
equal(&0is, &0is);
}
|
test2
|
identifier_name
|
where-clauses-not-parameter.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn equal<T>(_: &T, _: &T) -> bool where isize : Eq {
true //~^ ERROR cannot bound type `isize`, where clause bounds may only be attached
}
// This should be fine involves a type parameter.
fn test<T: Eq>() -> bool where Option<T> : Eq {}
// This should be rejected as well.
fn test2() -> bool where Option<isize> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<isize>`, where clause bounds may
#[derive(PartialEq)]
//~^ ERROR cannot bound type `isize`, where clause bounds
enum Foo<T> where isize : Eq { MkFoo }
//~^ ERROR cannot bound type `isize`, where clause bounds
fn test3<T: Eq>() -> bool where Option<Foo<T>> : Eq {}
fn test4() -> bool where Option<Foo<isize>> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<Foo<isize>>`, where clause bounds
trait Baz<T> where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds may only
fn baz() where String : Eq; //~ ERROR cannot bound type `collections::string::String`
//~^ ERROR cannot bound type `isize`, where clause
}
impl Baz<isize> for isize where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds
fn baz() where String : Eq {}
}
fn main()
|
{
equal(&0is, &0is);
}
|
identifier_body
|
|
where-clauses-not-parameter.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
fn equal<T>(_: &T, _: &T) -> bool where isize : Eq {
true //~^ ERROR cannot bound type `isize`, where clause bounds may only be attached
}
// This should be fine involves a type parameter.
fn test<T: Eq>() -> bool where Option<T> : Eq {}
// This should be rejected as well.
fn test2() -> bool where Option<isize> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<isize>`, where clause bounds may
#[derive(PartialEq)]
//~^ ERROR cannot bound type `isize`, where clause bounds
enum Foo<T> where isize : Eq { MkFoo }
//~^ ERROR cannot bound type `isize`, where clause bounds
fn test3<T: Eq>() -> bool where Option<Foo<T>> : Eq {}
fn test4() -> bool where Option<Foo<isize>> : Eq {}
//~^ ERROR cannot bound type `core::option::Option<Foo<isize>>`, where clause bounds
trait Baz<T> where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds may only
fn baz() where String : Eq; //~ ERROR cannot bound type `collections::string::String`
//~^ ERROR cannot bound type `isize`, where clause
}
impl Baz<isize> for isize where isize : Eq {
//~^ ERROR cannot bound type `isize`, where clause bounds
fn baz() where String : Eq {}
}
fn main() {
equal(&0is, &0is);
}
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
random_line_split
|
uievent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::UIEventBinding;
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, MutNullableDom, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
use std::default::Default;
// https://w3c.github.io/uievents/#interface-uievent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableDom<Window>,
detail: Cell<i32>,
}
impl UIEvent {
pub fn new_inherited() -> UIEvent {
UIEvent {
event: Event::new_inherited(),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<UIEvent> {
reflect_dom_object(
Box::new(UIEvent::new_inherited()),
window,
UIEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<&Window>,
detail: i32,
) -> DomRoot<UIEvent> {
let ev = UIEvent::new_uninitialized(window);
ev.InitUIEvent(
type_,
bool::from(can_bubble),
bool::from(cancelable),
view,
detail,
);
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &UIEventBinding::UIEventInit,
) -> Fallible<DomRoot<UIEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = UIEvent::new(
window,
type_,
bubbles,
cancelable,
init.view.r(),
init.detail,
);
Ok(event)
}
}
impl UIEventMethods for UIEvent {
// https://w3c.github.io/uievents/#widl-UIEvent-view
fn GetView(&self) -> Option<DomRoot<Window>> {
self.view.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-detail
fn Detail(&self) -> i32 {
self.detail.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-initUIEvent
fn InitUIEvent(
&self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
) {
let event = self.upcast::<Event>();
if event.dispatching()
|
event.init_event(Atom::from(type_), can_bubble, cancelable);
self.view.set(view);
self.detail.set(detail);
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
{
return;
}
|
conditional_block
|
uievent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::UIEventBinding;
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, MutNullableDom, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
use std::default::Default;
// https://w3c.github.io/uievents/#interface-uievent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableDom<Window>,
detail: Cell<i32>,
}
impl UIEvent {
pub fn new_inherited() -> UIEvent {
UIEvent {
event: Event::new_inherited(),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<UIEvent> {
reflect_dom_object(
Box::new(UIEvent::new_inherited()),
window,
UIEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<&Window>,
detail: i32,
) -> DomRoot<UIEvent> {
let ev = UIEvent::new_uninitialized(window);
ev.InitUIEvent(
type_,
bool::from(can_bubble),
bool::from(cancelable),
view,
|
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &UIEventBinding::UIEventInit,
) -> Fallible<DomRoot<UIEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = UIEvent::new(
window,
type_,
bubbles,
cancelable,
init.view.r(),
init.detail,
);
Ok(event)
}
}
impl UIEventMethods for UIEvent {
// https://w3c.github.io/uievents/#widl-UIEvent-view
fn GetView(&self) -> Option<DomRoot<Window>> {
self.view.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-detail
fn Detail(&self) -> i32 {
self.detail.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-initUIEvent
fn InitUIEvent(
&self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
) {
let event = self.upcast::<Event>();
if event.dispatching() {
return;
}
event.init_event(Atom::from(type_), can_bubble, cancelable);
self.view.set(view);
self.detail.set(detail);
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
detail,
);
ev
}
|
random_line_split
|
uievent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::UIEventBinding;
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, MutNullableDom, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
use std::default::Default;
// https://w3c.github.io/uievents/#interface-uievent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableDom<Window>,
detail: Cell<i32>,
}
impl UIEvent {
pub fn
|
() -> UIEvent {
UIEvent {
event: Event::new_inherited(),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<UIEvent> {
reflect_dom_object(
Box::new(UIEvent::new_inherited()),
window,
UIEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<&Window>,
detail: i32,
) -> DomRoot<UIEvent> {
let ev = UIEvent::new_uninitialized(window);
ev.InitUIEvent(
type_,
bool::from(can_bubble),
bool::from(cancelable),
view,
detail,
);
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &UIEventBinding::UIEventInit,
) -> Fallible<DomRoot<UIEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = UIEvent::new(
window,
type_,
bubbles,
cancelable,
init.view.r(),
init.detail,
);
Ok(event)
}
}
impl UIEventMethods for UIEvent {
// https://w3c.github.io/uievents/#widl-UIEvent-view
fn GetView(&self) -> Option<DomRoot<Window>> {
self.view.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-detail
fn Detail(&self) -> i32 {
self.detail.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-initUIEvent
fn InitUIEvent(
&self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
) {
let event = self.upcast::<Event>();
if event.dispatching() {
return;
}
event.init_event(Atom::from(type_), can_bubble, cancelable);
self.view.set(view);
self.detail.set(detail);
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
|
new_inherited
|
identifier_name
|
trait-object-reference-without-parens-suggestion.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn
|
() {
let _: &Copy +'static;
//~^ ERROR expected a path
//~| HELP try adding parentheses
//~| SUGGESTION let _: &(Copy +'static);
let _: &'static Copy +'static;
//~^ ERROR expected a path
//~| HELP try adding parentheses
//~| SUGGESTION let _: &'static (Copy +'static);
}
|
main
|
identifier_name
|
trait-object-reference-without-parens-suggestion.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
let _: &Copy +'static;
//~^ ERROR expected a path
//~| HELP try adding parentheses
//~| SUGGESTION let _: &(Copy +'static);
let _: &'static Copy +'static;
//~^ ERROR expected a path
//~| HELP try adding parentheses
//~| SUGGESTION let _: &'static (Copy +'static);
}
|
random_line_split
|
|
trait-object-reference-without-parens-suggestion.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main()
|
{
let _: &Copy + 'static;
//~^ ERROR expected a path
//~| HELP try adding parentheses
//~| SUGGESTION let _: &(Copy + 'static);
let _: &'static Copy + 'static;
//~^ ERROR expected a path
//~| HELP try adding parentheses
//~| SUGGESTION let _: &'static (Copy + 'static);
}
|
identifier_body
|
|
abortable.rs
|
use crate::task::AtomicWaker;
use futures_core::future::Future;
use futures_core::task::{Context, Poll};
use pin_utils::unsafe_pinned;
use core::fmt;
use core::pin::Pin;
use core::sync::atomic::{AtomicBool, Ordering};
use alloc::sync::Arc;
/// A future which can be remotely short-circuited using an `AbortHandle`.
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Abortable<Fut> {
future: Fut,
inner: Arc<AbortInner>,
}
impl<Fut: Unpin> Unpin for Abortable<Fut> {}
impl<Fut> Abortable<Fut> where Fut: Future {
unsafe_pinned!(future: Fut);
/// Creates a new `Abortable` future using an existing `AbortRegistration`.
/// `AbortRegistration`s can be acquired through `AbortHandle::new`.
///
/// When `abort` is called on the handle tied to `reg` or if `abort` has
/// already been called, the future will complete immediately without making
/// any further progress.
///
/// Example:
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future::{Abortable, AbortHandle, Aborted};
///
/// let (abort_handle, abort_registration) = AbortHandle::new_pair();
/// let future = Abortable::new(async { 2 }, abort_registration);
/// abort_handle.abort();
/// assert_eq!(future.await, Err(Aborted));
/// # });
/// ```
pub fn new(future: Fut, reg: AbortRegistration) -> Self {
Abortable {
future,
inner: reg.inner,
}
}
}
/// A registration handle for a `Abortable` future.
/// Values of this type can be acquired from `AbortHandle::new` and are used
/// in calls to `Abortable::new`.
#[derive(Debug)]
pub struct AbortRegistration {
inner: Arc<AbortInner>,
}
/// A handle to a `Abortable` future.
#[derive(Debug, Clone)]
pub struct AbortHandle {
inner: Arc<AbortInner>,
}
impl AbortHandle {
/// Creates an (`AbortHandle`, `AbortRegistration`) pair which can be used
/// to abort a running future.
///
/// This function is usually paired with a call to `Abortable::new`.
///
/// Example:
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future::{Abortable, AbortHandle, Aborted};
///
/// let (abort_handle, abort_registration) = AbortHandle::new_pair();
/// let future = Abortable::new(async { 2 }, abort_registration);
/// abort_handle.abort();
/// assert_eq!(future.await, Err(Aborted));
/// # });
/// ```
pub fn new_pair() -> (Self, AbortRegistration) {
let inner = Arc::new(AbortInner {
waker: AtomicWaker::new(),
cancel: AtomicBool::new(false),
});
(
AbortHandle {
inner: inner.clone(),
},
AbortRegistration {
inner,
},
)
}
}
// Inner type storing the waker to awaken and a bool indicating that it
// should be cancelled.
#[derive(Debug)]
struct AbortInner {
waker: AtomicWaker,
cancel: AtomicBool,
}
/// Creates a new `Abortable` future and a `AbortHandle` which can be used to stop it.
///
/// This function is a convenient (but less flexible) alternative to calling
/// `AbortHandle::new` and `Abortable::new` manually.
///
|
let (handle, reg) = AbortHandle::new_pair();
(
Abortable::new(future, reg),
handle,
)
}
/// Indicator that the `Abortable` future was aborted.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Aborted;
impl fmt::Display for Aborted {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "`Abortable` future has been aborted")
}
}
#[cfg(feature = "std")]
impl std::error::Error for Aborted {}
impl<Fut> Future for Abortable<Fut> where Fut: Future {
type Output = Result<Fut::Output, Aborted>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Check if the future has been aborted
if self.inner.cancel.load(Ordering::Relaxed) {
return Poll::Ready(Err(Aborted))
}
// attempt to complete the future
if let Poll::Ready(x) = self.as_mut().future().poll(cx) {
return Poll::Ready(Ok(x))
}
// Register to receive a wakeup if the future is aborted in the... future
self.inner.waker.register(cx.waker());
// Check to see if the future was aborted between the first check and
// registration.
// Checking with `Relaxed` is sufficient because `register` introduces an
// `AcqRel` barrier.
if self.inner.cancel.load(Ordering::Relaxed) {
return Poll::Ready(Err(Aborted))
}
Poll::Pending
}
}
impl AbortHandle {
/// Abort the `Abortable` future associated with this handle.
///
/// Notifies the Abortable future associated with this handle that it
/// should abort. Note that if the future is currently being polled on
/// another thread, it will not immediately stop running. Instead, it will
/// continue to run until its poll method returns.
pub fn abort(&self) {
self.inner.cancel.store(true, Ordering::Relaxed);
self.inner.waker.wake();
}
}
|
/// This function is only available when the `std` or `alloc` feature of this
/// library is activated, and it is activated by default.
pub fn abortable<Fut>(future: Fut) -> (Abortable<Fut>, AbortHandle)
where Fut: Future
{
|
random_line_split
|
abortable.rs
|
use crate::task::AtomicWaker;
use futures_core::future::Future;
use futures_core::task::{Context, Poll};
use pin_utils::unsafe_pinned;
use core::fmt;
use core::pin::Pin;
use core::sync::atomic::{AtomicBool, Ordering};
use alloc::sync::Arc;
/// A future which can be remotely short-circuited using an `AbortHandle`.
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Abortable<Fut> {
future: Fut,
inner: Arc<AbortInner>,
}
impl<Fut: Unpin> Unpin for Abortable<Fut> {}
impl<Fut> Abortable<Fut> where Fut: Future {
unsafe_pinned!(future: Fut);
/// Creates a new `Abortable` future using an existing `AbortRegistration`.
/// `AbortRegistration`s can be acquired through `AbortHandle::new`.
///
/// When `abort` is called on the handle tied to `reg` or if `abort` has
/// already been called, the future will complete immediately without making
/// any further progress.
///
/// Example:
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future::{Abortable, AbortHandle, Aborted};
///
/// let (abort_handle, abort_registration) = AbortHandle::new_pair();
/// let future = Abortable::new(async { 2 }, abort_registration);
/// abort_handle.abort();
/// assert_eq!(future.await, Err(Aborted));
/// # });
/// ```
pub fn new(future: Fut, reg: AbortRegistration) -> Self {
Abortable {
future,
inner: reg.inner,
}
}
}
/// A registration handle for a `Abortable` future.
/// Values of this type can be acquired from `AbortHandle::new` and are used
/// in calls to `Abortable::new`.
#[derive(Debug)]
pub struct AbortRegistration {
inner: Arc<AbortInner>,
}
/// A handle to a `Abortable` future.
#[derive(Debug, Clone)]
pub struct AbortHandle {
inner: Arc<AbortInner>,
}
impl AbortHandle {
/// Creates an (`AbortHandle`, `AbortRegistration`) pair which can be used
/// to abort a running future.
///
/// This function is usually paired with a call to `Abortable::new`.
///
/// Example:
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future::{Abortable, AbortHandle, Aborted};
///
/// let (abort_handle, abort_registration) = AbortHandle::new_pair();
/// let future = Abortable::new(async { 2 }, abort_registration);
/// abort_handle.abort();
/// assert_eq!(future.await, Err(Aborted));
/// # });
/// ```
pub fn new_pair() -> (Self, AbortRegistration) {
let inner = Arc::new(AbortInner {
waker: AtomicWaker::new(),
cancel: AtomicBool::new(false),
});
(
AbortHandle {
inner: inner.clone(),
},
AbortRegistration {
inner,
},
)
}
}
// Inner type storing the waker to awaken and a bool indicating that it
// should be cancelled.
#[derive(Debug)]
struct AbortInner {
waker: AtomicWaker,
cancel: AtomicBool,
}
/// Creates a new `Abortable` future and a `AbortHandle` which can be used to stop it.
///
/// This function is a convenient (but less flexible) alternative to calling
/// `AbortHandle::new` and `Abortable::new` manually.
///
/// This function is only available when the `std` or `alloc` feature of this
/// library is activated, and it is activated by default.
pub fn abortable<Fut>(future: Fut) -> (Abortable<Fut>, AbortHandle)
where Fut: Future
{
let (handle, reg) = AbortHandle::new_pair();
(
Abortable::new(future, reg),
handle,
)
}
/// Indicator that the `Abortable` future was aborted.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Aborted;
impl fmt::Display for Aborted {
fn
|
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "`Abortable` future has been aborted")
}
}
#[cfg(feature = "std")]
impl std::error::Error for Aborted {}
impl<Fut> Future for Abortable<Fut> where Fut: Future {
type Output = Result<Fut::Output, Aborted>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Check if the future has been aborted
if self.inner.cancel.load(Ordering::Relaxed) {
return Poll::Ready(Err(Aborted))
}
// attempt to complete the future
if let Poll::Ready(x) = self.as_mut().future().poll(cx) {
return Poll::Ready(Ok(x))
}
// Register to receive a wakeup if the future is aborted in the... future
self.inner.waker.register(cx.waker());
// Check to see if the future was aborted between the first check and
// registration.
// Checking with `Relaxed` is sufficient because `register` introduces an
// `AcqRel` barrier.
if self.inner.cancel.load(Ordering::Relaxed) {
return Poll::Ready(Err(Aborted))
}
Poll::Pending
}
}
impl AbortHandle {
/// Abort the `Abortable` future associated with this handle.
///
/// Notifies the Abortable future associated with this handle that it
/// should abort. Note that if the future is currently being polled on
/// another thread, it will not immediately stop running. Instead, it will
/// continue to run until its poll method returns.
pub fn abort(&self) {
self.inner.cancel.store(true, Ordering::Relaxed);
self.inner.waker.wake();
}
}
|
fmt
|
identifier_name
|
abortable.rs
|
use crate::task::AtomicWaker;
use futures_core::future::Future;
use futures_core::task::{Context, Poll};
use pin_utils::unsafe_pinned;
use core::fmt;
use core::pin::Pin;
use core::sync::atomic::{AtomicBool, Ordering};
use alloc::sync::Arc;
/// A future which can be remotely short-circuited using an `AbortHandle`.
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Abortable<Fut> {
future: Fut,
inner: Arc<AbortInner>,
}
impl<Fut: Unpin> Unpin for Abortable<Fut> {}
impl<Fut> Abortable<Fut> where Fut: Future {
unsafe_pinned!(future: Fut);
/// Creates a new `Abortable` future using an existing `AbortRegistration`.
/// `AbortRegistration`s can be acquired through `AbortHandle::new`.
///
/// When `abort` is called on the handle tied to `reg` or if `abort` has
/// already been called, the future will complete immediately without making
/// any further progress.
///
/// Example:
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future::{Abortable, AbortHandle, Aborted};
///
/// let (abort_handle, abort_registration) = AbortHandle::new_pair();
/// let future = Abortable::new(async { 2 }, abort_registration);
/// abort_handle.abort();
/// assert_eq!(future.await, Err(Aborted));
/// # });
/// ```
pub fn new(future: Fut, reg: AbortRegistration) -> Self {
Abortable {
future,
inner: reg.inner,
}
}
}
/// A registration handle for a `Abortable` future.
/// Values of this type can be acquired from `AbortHandle::new` and are used
/// in calls to `Abortable::new`.
#[derive(Debug)]
pub struct AbortRegistration {
inner: Arc<AbortInner>,
}
/// A handle to a `Abortable` future.
#[derive(Debug, Clone)]
pub struct AbortHandle {
inner: Arc<AbortInner>,
}
impl AbortHandle {
/// Creates an (`AbortHandle`, `AbortRegistration`) pair which can be used
/// to abort a running future.
///
/// This function is usually paired with a call to `Abortable::new`.
///
/// Example:
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future::{Abortable, AbortHandle, Aborted};
///
/// let (abort_handle, abort_registration) = AbortHandle::new_pair();
/// let future = Abortable::new(async { 2 }, abort_registration);
/// abort_handle.abort();
/// assert_eq!(future.await, Err(Aborted));
/// # });
/// ```
pub fn new_pair() -> (Self, AbortRegistration) {
let inner = Arc::new(AbortInner {
waker: AtomicWaker::new(),
cancel: AtomicBool::new(false),
});
(
AbortHandle {
inner: inner.clone(),
},
AbortRegistration {
inner,
},
)
}
}
// Inner type storing the waker to awaken and a bool indicating that it
// should be cancelled.
#[derive(Debug)]
struct AbortInner {
waker: AtomicWaker,
cancel: AtomicBool,
}
/// Creates a new `Abortable` future and a `AbortHandle` which can be used to stop it.
///
/// This function is a convenient (but less flexible) alternative to calling
/// `AbortHandle::new` and `Abortable::new` manually.
///
/// This function is only available when the `std` or `alloc` feature of this
/// library is activated, and it is activated by default.
pub fn abortable<Fut>(future: Fut) -> (Abortable<Fut>, AbortHandle)
where Fut: Future
{
let (handle, reg) = AbortHandle::new_pair();
(
Abortable::new(future, reg),
handle,
)
}
/// Indicator that the `Abortable` future was aborted.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Aborted;
impl fmt::Display for Aborted {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "`Abortable` future has been aborted")
}
}
#[cfg(feature = "std")]
impl std::error::Error for Aborted {}
impl<Fut> Future for Abortable<Fut> where Fut: Future {
type Output = Result<Fut::Output, Aborted>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Check if the future has been aborted
if self.inner.cancel.load(Ordering::Relaxed)
|
// attempt to complete the future
if let Poll::Ready(x) = self.as_mut().future().poll(cx) {
return Poll::Ready(Ok(x))
}
// Register to receive a wakeup if the future is aborted in the... future
self.inner.waker.register(cx.waker());
// Check to see if the future was aborted between the first check and
// registration.
// Checking with `Relaxed` is sufficient because `register` introduces an
// `AcqRel` barrier.
if self.inner.cancel.load(Ordering::Relaxed) {
return Poll::Ready(Err(Aborted))
}
Poll::Pending
}
}
impl AbortHandle {
/// Abort the `Abortable` future associated with this handle.
///
/// Notifies the Abortable future associated with this handle that it
/// should abort. Note that if the future is currently being polled on
/// another thread, it will not immediately stop running. Instead, it will
/// continue to run until its poll method returns.
pub fn abort(&self) {
self.inner.cancel.store(true, Ordering::Relaxed);
self.inner.waker.wake();
}
}
|
{
return Poll::Ready(Err(Aborted))
}
|
conditional_block
|
abortable.rs
|
use crate::task::AtomicWaker;
use futures_core::future::Future;
use futures_core::task::{Context, Poll};
use pin_utils::unsafe_pinned;
use core::fmt;
use core::pin::Pin;
use core::sync::atomic::{AtomicBool, Ordering};
use alloc::sync::Arc;
/// A future which can be remotely short-circuited using an `AbortHandle`.
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Abortable<Fut> {
future: Fut,
inner: Arc<AbortInner>,
}
impl<Fut: Unpin> Unpin for Abortable<Fut> {}
impl<Fut> Abortable<Fut> where Fut: Future {
unsafe_pinned!(future: Fut);
/// Creates a new `Abortable` future using an existing `AbortRegistration`.
/// `AbortRegistration`s can be acquired through `AbortHandle::new`.
///
/// When `abort` is called on the handle tied to `reg` or if `abort` has
/// already been called, the future will complete immediately without making
/// any further progress.
///
/// Example:
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future::{Abortable, AbortHandle, Aborted};
///
/// let (abort_handle, abort_registration) = AbortHandle::new_pair();
/// let future = Abortable::new(async { 2 }, abort_registration);
/// abort_handle.abort();
/// assert_eq!(future.await, Err(Aborted));
/// # });
/// ```
pub fn new(future: Fut, reg: AbortRegistration) -> Self
|
}
/// A registration handle for a `Abortable` future.
/// Values of this type can be acquired from `AbortHandle::new` and are used
/// in calls to `Abortable::new`.
#[derive(Debug)]
pub struct AbortRegistration {
inner: Arc<AbortInner>,
}
/// A handle to a `Abortable` future.
#[derive(Debug, Clone)]
pub struct AbortHandle {
inner: Arc<AbortInner>,
}
impl AbortHandle {
/// Creates an (`AbortHandle`, `AbortRegistration`) pair which can be used
/// to abort a running future.
///
/// This function is usually paired with a call to `Abortable::new`.
///
/// Example:
///
/// ```
/// # futures::executor::block_on(async {
/// use futures::future::{Abortable, AbortHandle, Aborted};
///
/// let (abort_handle, abort_registration) = AbortHandle::new_pair();
/// let future = Abortable::new(async { 2 }, abort_registration);
/// abort_handle.abort();
/// assert_eq!(future.await, Err(Aborted));
/// # });
/// ```
pub fn new_pair() -> (Self, AbortRegistration) {
let inner = Arc::new(AbortInner {
waker: AtomicWaker::new(),
cancel: AtomicBool::new(false),
});
(
AbortHandle {
inner: inner.clone(),
},
AbortRegistration {
inner,
},
)
}
}
// Inner type storing the waker to awaken and a bool indicating that it
// should be cancelled.
#[derive(Debug)]
struct AbortInner {
waker: AtomicWaker,
cancel: AtomicBool,
}
/// Creates a new `Abortable` future and a `AbortHandle` which can be used to stop it.
///
/// This function is a convenient (but less flexible) alternative to calling
/// `AbortHandle::new` and `Abortable::new` manually.
///
/// This function is only available when the `std` or `alloc` feature of this
/// library is activated, and it is activated by default.
pub fn abortable<Fut>(future: Fut) -> (Abortable<Fut>, AbortHandle)
where Fut: Future
{
let (handle, reg) = AbortHandle::new_pair();
(
Abortable::new(future, reg),
handle,
)
}
/// Indicator that the `Abortable` future was aborted.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Aborted;
impl fmt::Display for Aborted {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "`Abortable` future has been aborted")
}
}
#[cfg(feature = "std")]
impl std::error::Error for Aborted {}
impl<Fut> Future for Abortable<Fut> where Fut: Future {
type Output = Result<Fut::Output, Aborted>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Check if the future has been aborted
if self.inner.cancel.load(Ordering::Relaxed) {
return Poll::Ready(Err(Aborted))
}
// attempt to complete the future
if let Poll::Ready(x) = self.as_mut().future().poll(cx) {
return Poll::Ready(Ok(x))
}
// Register to receive a wakeup if the future is aborted in the... future
self.inner.waker.register(cx.waker());
// Check to see if the future was aborted between the first check and
// registration.
// Checking with `Relaxed` is sufficient because `register` introduces an
// `AcqRel` barrier.
if self.inner.cancel.load(Ordering::Relaxed) {
return Poll::Ready(Err(Aborted))
}
Poll::Pending
}
}
impl AbortHandle {
/// Abort the `Abortable` future associated with this handle.
///
/// Notifies the Abortable future associated with this handle that it
/// should abort. Note that if the future is currently being polled on
/// another thread, it will not immediately stop running. Instead, it will
/// continue to run until its poll method returns.
pub fn abort(&self) {
self.inner.cancel.store(true, Ordering::Relaxed);
self.inner.waker.wake();
}
}
|
{
Abortable {
future,
inner: reg.inner,
}
}
|
identifier_body
|
preprocess.rs
|
// C preprocessor
use crate::token::*;
use std::cell::RefCell;
use std::collections::HashMap;
thread_local! {
static MACROS: RefCell<HashMap<String, Macro>> = RefCell::new(HashMap::new());
static ENV: RefCell<Env> = RefCell::new(Env::new());
}
fn set_env(env: Env) {
ENV.with(|c| {
*c.borrow_mut() = env;
})
}
fn get_env() -> Option<Env> {
ENV.with(|c| {
Some(c.borrow().clone())
})
}
fn env_pop() {
ENV.with(|c| {
let prev = c.borrow().prev.clone();
if prev.is_some() {
|
}
})
}
fn env_output() -> Vec<Token> {
ENV.with(|c| {
return c.borrow().output.clone();
})
}
fn macros_get(key: &String) -> Option<Macro> {
MACROS.with(|m| {
match m.borrow().get(key) {
Some(v) => Some(v.clone()),
None => None,
}
})
}
fn macros_put(key: String, value: Macro) {
MACROS.with(|m| {
m.borrow_mut().insert(key, value);
})
}
#[derive(Clone, Debug)]
struct Env {
input: Vec<Token>,
output: Vec<Token>,
pos: usize,
prev: Option<Box<Env>>,
}
impl Env {
fn new() -> Env {
Env {
input: Vec::new(),
output: Vec::new(),
pos: 0,
prev: None,
}
}
}
fn new_env(prev: Option<Env>, input: Vec<Token>) -> Env {
let mut ctx = Env::new();
ctx.input = input;
if prev.is_none() {
ctx.prev = None;
} else {
ctx.prev = Some(Box::new(prev.unwrap()));
}
return ctx;
}
#[derive(Clone, Debug, PartialEq)]
enum MacroType {
OBJLIKE,
FUNCLIKE,
}
#[derive(Clone, Debug)]
struct Macro {
ty: MacroType,
tokens: Vec<Token>,
params: Vec<String>,
}
fn new_macro(ty: MacroType, name: String) {
let m = Macro {
ty: ty,
tokens: Vec::new(),
params: Vec::new(),
};
macros_put(name, m);
}
fn append(v: &mut Vec<Token>) {
ENV.with(|env| {
env.borrow_mut().output.append(v);
})
}
fn emit(t: Token) {
ENV.with(|env| {
env.borrow_mut().output.push(t);
})
}
fn next() -> Token {
ENV.with(|env| {
let pos = env.borrow().pos;
assert!(pos < env.borrow().input.len());
env.borrow_mut().pos += 1;
return env.borrow().input[pos].clone();
})
}
fn is_eof() -> bool {
ENV.with(|c| {
let env = c.borrow();
return env.pos == env.input.len();
})
}
fn get(ty: TokenType, msg: String) -> Token {
let t = next();
if t.ty!= ty {
bad_token(&t, msg);
}
return t;
}
fn ident(msg: String) -> String {
let t = get(TokenType::IDENT, msg);
return t.name.clone();
}
fn peek() -> Token {
ENV.with(|c| {
let env = c.borrow();
return env.input[env.pos].clone();
})
}
fn consume(ty: TokenType) -> bool {
if peek().ty!= ty {
return false;
}
ENV.with(|c| {
c.borrow_mut().pos += 1;
});
return true;
}
fn read_until_eol() -> Vec<Token> {
let mut v = Vec::new();
while!is_eof() {
let t = next();
if t.ty == TokenType::NEW_LINE {
break;
}
v.push(t.clone());
}
return v;
}
fn new_int(tmpl: & Token, val: i32) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::NUM;
t.val = val;
return t;
}
fn new_string(tmpl: & Token, str_cnt: String, len: usize) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::STR;
t.str_cnt = str_cnt;
t.len = len;
return t;
}
fn new_param(tmpl: & Token, val: i32) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::PARAM;
t.val = val;
return t;
}
fn is_ident(t: &Token, s: &str) -> bool {
return t.ty == TokenType::IDENT && t.name == s;
}
// Replaces macro parameter tokens with TokenType::PARAM tokens.
fn replace_macro_params(m: &mut Macro) {
let params = m.params.clone();
let mut tokens = m.tokens.clone();
let mut map = HashMap::new();
for i in 0..params.len() {
let name = params[i].clone();
map.insert(name, i as i32);
}
for i in 0..tokens.len() {
let t = &tokens[i].clone();
if t.ty!= TokenType::IDENT {
continue;
}
let n = match map.get(&t.name) {
Some(i) => *i,
None => {
continue;
}
};
tokens.remove(i);
tokens.insert(i, new_param(t, n));
}
m.tokens = tokens;
}
// Replaces '#' followed by a macro parameter.
fn replace_hash_ident(m: &mut Macro) {
let tokens = m.tokens.clone();
let mut v = Vec::new();
let mut i = 0;
while i < tokens.len()-1 {
let t1 = tokens[i].clone();
let mut t2 = tokens[i+1].clone();
if t1.ty == TokenType::SHARP && t2.ty == TokenType::PARAM {
t2.stringize = true;
v.push(t2);
i += 1;
} else {
v.push(t1);
}
i += 1;
}
if i == tokens.len() - 1 {
v.push(tokens[i].clone());
}
m.tokens = v;
}
fn read_one_arg() -> Vec<Token> {
let mut v = Vec::new();
let start = peek();
let mut level = 0;
while!is_eof() {
let t = peek();
if level == 0 {
if t.ty == TokenType::KET || t.ty == TokenType::COMMA {
return v;
}
}
next();
if t.ty == TokenType::BRA {
level += 1;
} else if t.ty == TokenType::KET {
level -= 1;
}
v.push(t);
}
bad_token(&start, "unclosed macro argument".to_string());
panic!();
}
fn read_args() -> HashMap<usize, Vec<Token>> {
let mut v = HashMap::new();
if consume(TokenType::KET) {
return v;
}
let i = v.len();
v.insert(i, read_one_arg());
while!consume(TokenType::KET) {
get(TokenType::COMMA, "comma expected".to_string());
let i = v.len();
v.insert(i, read_one_arg());
}
return v;
}
fn emit_special_macro(t: & Token) -> bool {
if is_ident(t, "__LINE__") {
emit(new_int(t, get_line_number(t)));
return true;
}
return false;
}
fn apply_objlike(m: &mut Macro, _start: & Token) {
for t in m.tokens.iter() {
if emit_special_macro(t) {
continue;
}
emit(t.clone());
}
}
fn apply_functionlike(m: &mut Macro, start: & Token) {
get(TokenType::BRA, "comma expected".to_string());
let args = read_args();
if m.params.len()!= args.len() {
bad_token(&start, format!("number of parameter does not match ({}!= {})", m.params.len(), args.len()).to_string());
panic!();
}
for i in 0..m.tokens.len() {
let t = &m.tokens[i];
if emit_special_macro(t) {
continue;
}
if t.ty == TokenType::PARAM {
if t.stringize {
let j = t.val as usize;
let s = stringize(args.get(&j).unwrap().clone());
emit(new_string(t, s.clone(), s.len()));
} else {
let j = t.val as usize;
append(&mut args.get(&j).unwrap().clone());
}
continue;
}
emit(t.clone());
}
}
fn apply(m: &mut Macro, start: & Token) {
if m.ty == MacroType::OBJLIKE {
apply_objlike(m, start);
} else {
apply_functionlike(m, start);
}
}
fn define_funclike(name: String) {
new_macro(MacroType::FUNCLIKE, name.clone());
let mut m = macros_get(&name).unwrap();
while!consume(TokenType::KET) {
if m.params.len() > 0 {
get(TokenType::COMMA, ", expected".to_string());
}
m.params.push(ident("parameter name expected".to_string()));
}
m.tokens = read_until_eol();
replace_macro_params(&mut m);
replace_hash_ident(&mut m);
macros_put(name, m);
}
fn define_objlike(name: String) {
new_macro(MacroType::OBJLIKE, name.clone());
let mut m = macros_get(&name).unwrap();
m.tokens = read_until_eol();
macros_put(name, m);
}
fn define() {
let name = ident("macro name expected".to_string());
if consume(TokenType::BRA) {
return define_funclike(name);
}
return define_objlike(name);
}
fn include() {
let t = get(TokenType::STR, "string expected".to_string());
let path = t.str_cnt;
get(TokenType::NEW_LINE, "newline expected".to_string());
return append(&mut tokenize(path, false));
}
pub fn preprocess(tokens: Vec<Token>) -> Vec<Token> {
set_env(new_env(get_env(), tokens.clone()));
while!is_eof() {
let mut t = next();
if t.ty == TokenType::IDENT {
match macros_get(&t.name) {
Some(macro_token) => {
apply(&mut macro_token.clone(), &t);
}
None => {
emit(t.clone());
}
}
continue;
}
if t.ty!= TokenType::SHARP {
emit(t.clone());
continue;
}
t = get(TokenType::IDENT, "identifier expected".to_string());
if t.name == "define" {
define();
} else if t.name == "include" {
include();
} else {
bad_token(&t, "unknown directive".to_string());
}
}
let v = env_output();
env_pop();
return v;
}
|
*c.borrow_mut() = *prev.unwrap();
|
random_line_split
|
preprocess.rs
|
// C preprocessor
use crate::token::*;
use std::cell::RefCell;
use std::collections::HashMap;
thread_local! {
static MACROS: RefCell<HashMap<String, Macro>> = RefCell::new(HashMap::new());
static ENV: RefCell<Env> = RefCell::new(Env::new());
}
fn set_env(env: Env) {
ENV.with(|c| {
*c.borrow_mut() = env;
})
}
fn get_env() -> Option<Env> {
ENV.with(|c| {
Some(c.borrow().clone())
})
}
fn env_pop() {
ENV.with(|c| {
let prev = c.borrow().prev.clone();
if prev.is_some() {
*c.borrow_mut() = *prev.unwrap();
}
})
}
fn env_output() -> Vec<Token> {
ENV.with(|c| {
return c.borrow().output.clone();
})
}
fn
|
(key: &String) -> Option<Macro> {
MACROS.with(|m| {
match m.borrow().get(key) {
Some(v) => Some(v.clone()),
None => None,
}
})
}
fn macros_put(key: String, value: Macro) {
MACROS.with(|m| {
m.borrow_mut().insert(key, value);
})
}
#[derive(Clone, Debug)]
struct Env {
input: Vec<Token>,
output: Vec<Token>,
pos: usize,
prev: Option<Box<Env>>,
}
impl Env {
fn new() -> Env {
Env {
input: Vec::new(),
output: Vec::new(),
pos: 0,
prev: None,
}
}
}
fn new_env(prev: Option<Env>, input: Vec<Token>) -> Env {
let mut ctx = Env::new();
ctx.input = input;
if prev.is_none() {
ctx.prev = None;
} else {
ctx.prev = Some(Box::new(prev.unwrap()));
}
return ctx;
}
#[derive(Clone, Debug, PartialEq)]
enum MacroType {
OBJLIKE,
FUNCLIKE,
}
#[derive(Clone, Debug)]
struct Macro {
ty: MacroType,
tokens: Vec<Token>,
params: Vec<String>,
}
fn new_macro(ty: MacroType, name: String) {
let m = Macro {
ty: ty,
tokens: Vec::new(),
params: Vec::new(),
};
macros_put(name, m);
}
fn append(v: &mut Vec<Token>) {
ENV.with(|env| {
env.borrow_mut().output.append(v);
})
}
fn emit(t: Token) {
ENV.with(|env| {
env.borrow_mut().output.push(t);
})
}
fn next() -> Token {
ENV.with(|env| {
let pos = env.borrow().pos;
assert!(pos < env.borrow().input.len());
env.borrow_mut().pos += 1;
return env.borrow().input[pos].clone();
})
}
fn is_eof() -> bool {
ENV.with(|c| {
let env = c.borrow();
return env.pos == env.input.len();
})
}
fn get(ty: TokenType, msg: String) -> Token {
let t = next();
if t.ty!= ty {
bad_token(&t, msg);
}
return t;
}
fn ident(msg: String) -> String {
let t = get(TokenType::IDENT, msg);
return t.name.clone();
}
fn peek() -> Token {
ENV.with(|c| {
let env = c.borrow();
return env.input[env.pos].clone();
})
}
fn consume(ty: TokenType) -> bool {
if peek().ty!= ty {
return false;
}
ENV.with(|c| {
c.borrow_mut().pos += 1;
});
return true;
}
fn read_until_eol() -> Vec<Token> {
let mut v = Vec::new();
while!is_eof() {
let t = next();
if t.ty == TokenType::NEW_LINE {
break;
}
v.push(t.clone());
}
return v;
}
fn new_int(tmpl: & Token, val: i32) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::NUM;
t.val = val;
return t;
}
fn new_string(tmpl: & Token, str_cnt: String, len: usize) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::STR;
t.str_cnt = str_cnt;
t.len = len;
return t;
}
fn new_param(tmpl: & Token, val: i32) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::PARAM;
t.val = val;
return t;
}
fn is_ident(t: &Token, s: &str) -> bool {
return t.ty == TokenType::IDENT && t.name == s;
}
// Replaces macro parameter tokens with TokenType::PARAM tokens.
fn replace_macro_params(m: &mut Macro) {
let params = m.params.clone();
let mut tokens = m.tokens.clone();
let mut map = HashMap::new();
for i in 0..params.len() {
let name = params[i].clone();
map.insert(name, i as i32);
}
for i in 0..tokens.len() {
let t = &tokens[i].clone();
if t.ty!= TokenType::IDENT {
continue;
}
let n = match map.get(&t.name) {
Some(i) => *i,
None => {
continue;
}
};
tokens.remove(i);
tokens.insert(i, new_param(t, n));
}
m.tokens = tokens;
}
// Replaces '#' followed by a macro parameter.
fn replace_hash_ident(m: &mut Macro) {
let tokens = m.tokens.clone();
let mut v = Vec::new();
let mut i = 0;
while i < tokens.len()-1 {
let t1 = tokens[i].clone();
let mut t2 = tokens[i+1].clone();
if t1.ty == TokenType::SHARP && t2.ty == TokenType::PARAM {
t2.stringize = true;
v.push(t2);
i += 1;
} else {
v.push(t1);
}
i += 1;
}
if i == tokens.len() - 1 {
v.push(tokens[i].clone());
}
m.tokens = v;
}
fn read_one_arg() -> Vec<Token> {
let mut v = Vec::new();
let start = peek();
let mut level = 0;
while!is_eof() {
let t = peek();
if level == 0 {
if t.ty == TokenType::KET || t.ty == TokenType::COMMA {
return v;
}
}
next();
if t.ty == TokenType::BRA {
level += 1;
} else if t.ty == TokenType::KET {
level -= 1;
}
v.push(t);
}
bad_token(&start, "unclosed macro argument".to_string());
panic!();
}
fn read_args() -> HashMap<usize, Vec<Token>> {
let mut v = HashMap::new();
if consume(TokenType::KET) {
return v;
}
let i = v.len();
v.insert(i, read_one_arg());
while!consume(TokenType::KET) {
get(TokenType::COMMA, "comma expected".to_string());
let i = v.len();
v.insert(i, read_one_arg());
}
return v;
}
fn emit_special_macro(t: & Token) -> bool {
if is_ident(t, "__LINE__") {
emit(new_int(t, get_line_number(t)));
return true;
}
return false;
}
fn apply_objlike(m: &mut Macro, _start: & Token) {
for t in m.tokens.iter() {
if emit_special_macro(t) {
continue;
}
emit(t.clone());
}
}
fn apply_functionlike(m: &mut Macro, start: & Token) {
get(TokenType::BRA, "comma expected".to_string());
let args = read_args();
if m.params.len()!= args.len() {
bad_token(&start, format!("number of parameter does not match ({}!= {})", m.params.len(), args.len()).to_string());
panic!();
}
for i in 0..m.tokens.len() {
let t = &m.tokens[i];
if emit_special_macro(t) {
continue;
}
if t.ty == TokenType::PARAM {
if t.stringize {
let j = t.val as usize;
let s = stringize(args.get(&j).unwrap().clone());
emit(new_string(t, s.clone(), s.len()));
} else {
let j = t.val as usize;
append(&mut args.get(&j).unwrap().clone());
}
continue;
}
emit(t.clone());
}
}
fn apply(m: &mut Macro, start: & Token) {
if m.ty == MacroType::OBJLIKE {
apply_objlike(m, start);
} else {
apply_functionlike(m, start);
}
}
fn define_funclike(name: String) {
new_macro(MacroType::FUNCLIKE, name.clone());
let mut m = macros_get(&name).unwrap();
while!consume(TokenType::KET) {
if m.params.len() > 0 {
get(TokenType::COMMA, ", expected".to_string());
}
m.params.push(ident("parameter name expected".to_string()));
}
m.tokens = read_until_eol();
replace_macro_params(&mut m);
replace_hash_ident(&mut m);
macros_put(name, m);
}
fn define_objlike(name: String) {
new_macro(MacroType::OBJLIKE, name.clone());
let mut m = macros_get(&name).unwrap();
m.tokens = read_until_eol();
macros_put(name, m);
}
fn define() {
let name = ident("macro name expected".to_string());
if consume(TokenType::BRA) {
return define_funclike(name);
}
return define_objlike(name);
}
fn include() {
let t = get(TokenType::STR, "string expected".to_string());
let path = t.str_cnt;
get(TokenType::NEW_LINE, "newline expected".to_string());
return append(&mut tokenize(path, false));
}
pub fn preprocess(tokens: Vec<Token>) -> Vec<Token> {
set_env(new_env(get_env(), tokens.clone()));
while!is_eof() {
let mut t = next();
if t.ty == TokenType::IDENT {
match macros_get(&t.name) {
Some(macro_token) => {
apply(&mut macro_token.clone(), &t);
}
None => {
emit(t.clone());
}
}
continue;
}
if t.ty!= TokenType::SHARP {
emit(t.clone());
continue;
}
t = get(TokenType::IDENT, "identifier expected".to_string());
if t.name == "define" {
define();
} else if t.name == "include" {
include();
} else {
bad_token(&t, "unknown directive".to_string());
}
}
let v = env_output();
env_pop();
return v;
}
|
macros_get
|
identifier_name
|
preprocess.rs
|
// C preprocessor
use crate::token::*;
use std::cell::RefCell;
use std::collections::HashMap;
thread_local! {
static MACROS: RefCell<HashMap<String, Macro>> = RefCell::new(HashMap::new());
static ENV: RefCell<Env> = RefCell::new(Env::new());
}
fn set_env(env: Env) {
ENV.with(|c| {
*c.borrow_mut() = env;
})
}
fn get_env() -> Option<Env> {
ENV.with(|c| {
Some(c.borrow().clone())
})
}
fn env_pop() {
ENV.with(|c| {
let prev = c.borrow().prev.clone();
if prev.is_some() {
*c.borrow_mut() = *prev.unwrap();
}
})
}
fn env_output() -> Vec<Token> {
ENV.with(|c| {
return c.borrow().output.clone();
})
}
fn macros_get(key: &String) -> Option<Macro> {
MACROS.with(|m| {
match m.borrow().get(key) {
Some(v) => Some(v.clone()),
None => None,
}
})
}
fn macros_put(key: String, value: Macro) {
MACROS.with(|m| {
m.borrow_mut().insert(key, value);
})
}
#[derive(Clone, Debug)]
struct Env {
input: Vec<Token>,
output: Vec<Token>,
pos: usize,
prev: Option<Box<Env>>,
}
impl Env {
fn new() -> Env {
Env {
input: Vec::new(),
output: Vec::new(),
pos: 0,
prev: None,
}
}
}
fn new_env(prev: Option<Env>, input: Vec<Token>) -> Env {
let mut ctx = Env::new();
ctx.input = input;
if prev.is_none() {
ctx.prev = None;
} else {
ctx.prev = Some(Box::new(prev.unwrap()));
}
return ctx;
}
#[derive(Clone, Debug, PartialEq)]
enum MacroType {
OBJLIKE,
FUNCLIKE,
}
#[derive(Clone, Debug)]
struct Macro {
ty: MacroType,
tokens: Vec<Token>,
params: Vec<String>,
}
fn new_macro(ty: MacroType, name: String) {
let m = Macro {
ty: ty,
tokens: Vec::new(),
params: Vec::new(),
};
macros_put(name, m);
}
fn append(v: &mut Vec<Token>) {
ENV.with(|env| {
env.borrow_mut().output.append(v);
})
}
fn emit(t: Token) {
ENV.with(|env| {
env.borrow_mut().output.push(t);
})
}
fn next() -> Token {
ENV.with(|env| {
let pos = env.borrow().pos;
assert!(pos < env.borrow().input.len());
env.borrow_mut().pos += 1;
return env.borrow().input[pos].clone();
})
}
fn is_eof() -> bool {
ENV.with(|c| {
let env = c.borrow();
return env.pos == env.input.len();
})
}
fn get(ty: TokenType, msg: String) -> Token {
let t = next();
if t.ty!= ty {
bad_token(&t, msg);
}
return t;
}
fn ident(msg: String) -> String {
let t = get(TokenType::IDENT, msg);
return t.name.clone();
}
fn peek() -> Token {
ENV.with(|c| {
let env = c.borrow();
return env.input[env.pos].clone();
})
}
fn consume(ty: TokenType) -> bool {
if peek().ty!= ty {
return false;
}
ENV.with(|c| {
c.borrow_mut().pos += 1;
});
return true;
}
fn read_until_eol() -> Vec<Token> {
let mut v = Vec::new();
while!is_eof() {
let t = next();
if t.ty == TokenType::NEW_LINE {
break;
}
v.push(t.clone());
}
return v;
}
fn new_int(tmpl: & Token, val: i32) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::NUM;
t.val = val;
return t;
}
fn new_string(tmpl: & Token, str_cnt: String, len: usize) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::STR;
t.str_cnt = str_cnt;
t.len = len;
return t;
}
fn new_param(tmpl: & Token, val: i32) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::PARAM;
t.val = val;
return t;
}
fn is_ident(t: &Token, s: &str) -> bool {
return t.ty == TokenType::IDENT && t.name == s;
}
// Replaces macro parameter tokens with TokenType::PARAM tokens.
fn replace_macro_params(m: &mut Macro) {
let params = m.params.clone();
let mut tokens = m.tokens.clone();
let mut map = HashMap::new();
for i in 0..params.len() {
let name = params[i].clone();
map.insert(name, i as i32);
}
for i in 0..tokens.len() {
let t = &tokens[i].clone();
if t.ty!= TokenType::IDENT {
continue;
}
let n = match map.get(&t.name) {
Some(i) => *i,
None => {
continue;
}
};
tokens.remove(i);
tokens.insert(i, new_param(t, n));
}
m.tokens = tokens;
}
// Replaces '#' followed by a macro parameter.
fn replace_hash_ident(m: &mut Macro) {
let tokens = m.tokens.clone();
let mut v = Vec::new();
let mut i = 0;
while i < tokens.len()-1 {
let t1 = tokens[i].clone();
let mut t2 = tokens[i+1].clone();
if t1.ty == TokenType::SHARP && t2.ty == TokenType::PARAM {
t2.stringize = true;
v.push(t2);
i += 1;
} else {
v.push(t1);
}
i += 1;
}
if i == tokens.len() - 1 {
v.push(tokens[i].clone());
}
m.tokens = v;
}
fn read_one_arg() -> Vec<Token> {
let mut v = Vec::new();
let start = peek();
let mut level = 0;
while!is_eof() {
let t = peek();
if level == 0 {
if t.ty == TokenType::KET || t.ty == TokenType::COMMA {
return v;
}
}
next();
if t.ty == TokenType::BRA {
level += 1;
} else if t.ty == TokenType::KET {
level -= 1;
}
v.push(t);
}
bad_token(&start, "unclosed macro argument".to_string());
panic!();
}
fn read_args() -> HashMap<usize, Vec<Token>> {
let mut v = HashMap::new();
if consume(TokenType::KET) {
return v;
}
let i = v.len();
v.insert(i, read_one_arg());
while!consume(TokenType::KET) {
get(TokenType::COMMA, "comma expected".to_string());
let i = v.len();
v.insert(i, read_one_arg());
}
return v;
}
fn emit_special_macro(t: & Token) -> bool {
if is_ident(t, "__LINE__") {
emit(new_int(t, get_line_number(t)));
return true;
}
return false;
}
fn apply_objlike(m: &mut Macro, _start: & Token) {
for t in m.tokens.iter() {
if emit_special_macro(t) {
continue;
}
emit(t.clone());
}
}
fn apply_functionlike(m: &mut Macro, start: & Token) {
get(TokenType::BRA, "comma expected".to_string());
let args = read_args();
if m.params.len()!= args.len() {
bad_token(&start, format!("number of parameter does not match ({}!= {})", m.params.len(), args.len()).to_string());
panic!();
}
for i in 0..m.tokens.len() {
let t = &m.tokens[i];
if emit_special_macro(t) {
continue;
}
if t.ty == TokenType::PARAM {
if t.stringize {
let j = t.val as usize;
let s = stringize(args.get(&j).unwrap().clone());
emit(new_string(t, s.clone(), s.len()));
} else {
let j = t.val as usize;
append(&mut args.get(&j).unwrap().clone());
}
continue;
}
emit(t.clone());
}
}
fn apply(m: &mut Macro, start: & Token) {
if m.ty == MacroType::OBJLIKE {
apply_objlike(m, start);
} else {
apply_functionlike(m, start);
}
}
fn define_funclike(name: String) {
new_macro(MacroType::FUNCLIKE, name.clone());
let mut m = macros_get(&name).unwrap();
while!consume(TokenType::KET) {
if m.params.len() > 0 {
get(TokenType::COMMA, ", expected".to_string());
}
m.params.push(ident("parameter name expected".to_string()));
}
m.tokens = read_until_eol();
replace_macro_params(&mut m);
replace_hash_ident(&mut m);
macros_put(name, m);
}
fn define_objlike(name: String) {
new_macro(MacroType::OBJLIKE, name.clone());
let mut m = macros_get(&name).unwrap();
m.tokens = read_until_eol();
macros_put(name, m);
}
fn define() {
let name = ident("macro name expected".to_string());
if consume(TokenType::BRA) {
return define_funclike(name);
}
return define_objlike(name);
}
fn include() {
let t = get(TokenType::STR, "string expected".to_string());
let path = t.str_cnt;
get(TokenType::NEW_LINE, "newline expected".to_string());
return append(&mut tokenize(path, false));
}
pub fn preprocess(tokens: Vec<Token>) -> Vec<Token> {
set_env(new_env(get_env(), tokens.clone()));
while!is_eof() {
let mut t = next();
if t.ty == TokenType::IDENT {
match macros_get(&t.name) {
Some(macro_token) => {
apply(&mut macro_token.clone(), &t);
}
None => {
emit(t.clone());
}
}
continue;
}
if t.ty!= TokenType::SHARP {
emit(t.clone());
continue;
}
t = get(TokenType::IDENT, "identifier expected".to_string());
if t.name == "define" {
define();
} else if t.name == "include"
|
else {
bad_token(&t, "unknown directive".to_string());
}
}
let v = env_output();
env_pop();
return v;
}
|
{
include();
}
|
conditional_block
|
preprocess.rs
|
// C preprocessor
use crate::token::*;
use std::cell::RefCell;
use std::collections::HashMap;
thread_local! {
static MACROS: RefCell<HashMap<String, Macro>> = RefCell::new(HashMap::new());
static ENV: RefCell<Env> = RefCell::new(Env::new());
}
fn set_env(env: Env) {
ENV.with(|c| {
*c.borrow_mut() = env;
})
}
fn get_env() -> Option<Env> {
ENV.with(|c| {
Some(c.borrow().clone())
})
}
fn env_pop() {
ENV.with(|c| {
let prev = c.borrow().prev.clone();
if prev.is_some() {
*c.borrow_mut() = *prev.unwrap();
}
})
}
fn env_output() -> Vec<Token> {
ENV.with(|c| {
return c.borrow().output.clone();
})
}
fn macros_get(key: &String) -> Option<Macro> {
MACROS.with(|m| {
match m.borrow().get(key) {
Some(v) => Some(v.clone()),
None => None,
}
})
}
fn macros_put(key: String, value: Macro) {
MACROS.with(|m| {
m.borrow_mut().insert(key, value);
})
}
#[derive(Clone, Debug)]
struct Env {
input: Vec<Token>,
output: Vec<Token>,
pos: usize,
prev: Option<Box<Env>>,
}
impl Env {
fn new() -> Env
|
}
fn new_env(prev: Option<Env>, input: Vec<Token>) -> Env {
let mut ctx = Env::new();
ctx.input = input;
if prev.is_none() {
ctx.prev = None;
} else {
ctx.prev = Some(Box::new(prev.unwrap()));
}
return ctx;
}
#[derive(Clone, Debug, PartialEq)]
enum MacroType {
OBJLIKE,
FUNCLIKE,
}
#[derive(Clone, Debug)]
struct Macro {
ty: MacroType,
tokens: Vec<Token>,
params: Vec<String>,
}
fn new_macro(ty: MacroType, name: String) {
let m = Macro {
ty: ty,
tokens: Vec::new(),
params: Vec::new(),
};
macros_put(name, m);
}
fn append(v: &mut Vec<Token>) {
ENV.with(|env| {
env.borrow_mut().output.append(v);
})
}
fn emit(t: Token) {
ENV.with(|env| {
env.borrow_mut().output.push(t);
})
}
fn next() -> Token {
ENV.with(|env| {
let pos = env.borrow().pos;
assert!(pos < env.borrow().input.len());
env.borrow_mut().pos += 1;
return env.borrow().input[pos].clone();
})
}
fn is_eof() -> bool {
ENV.with(|c| {
let env = c.borrow();
return env.pos == env.input.len();
})
}
fn get(ty: TokenType, msg: String) -> Token {
let t = next();
if t.ty!= ty {
bad_token(&t, msg);
}
return t;
}
fn ident(msg: String) -> String {
let t = get(TokenType::IDENT, msg);
return t.name.clone();
}
fn peek() -> Token {
ENV.with(|c| {
let env = c.borrow();
return env.input[env.pos].clone();
})
}
fn consume(ty: TokenType) -> bool {
if peek().ty!= ty {
return false;
}
ENV.with(|c| {
c.borrow_mut().pos += 1;
});
return true;
}
fn read_until_eol() -> Vec<Token> {
let mut v = Vec::new();
while!is_eof() {
let t = next();
if t.ty == TokenType::NEW_LINE {
break;
}
v.push(t.clone());
}
return v;
}
fn new_int(tmpl: & Token, val: i32) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::NUM;
t.val = val;
return t;
}
fn new_string(tmpl: & Token, str_cnt: String, len: usize) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::STR;
t.str_cnt = str_cnt;
t.len = len;
return t;
}
fn new_param(tmpl: & Token, val: i32) -> Token {
let mut t = tmpl.clone();
t.ty = TokenType::PARAM;
t.val = val;
return t;
}
fn is_ident(t: &Token, s: &str) -> bool {
return t.ty == TokenType::IDENT && t.name == s;
}
// Replaces macro parameter tokens with TokenType::PARAM tokens.
fn replace_macro_params(m: &mut Macro) {
let params = m.params.clone();
let mut tokens = m.tokens.clone();
let mut map = HashMap::new();
for i in 0..params.len() {
let name = params[i].clone();
map.insert(name, i as i32);
}
for i in 0..tokens.len() {
let t = &tokens[i].clone();
if t.ty!= TokenType::IDENT {
continue;
}
let n = match map.get(&t.name) {
Some(i) => *i,
None => {
continue;
}
};
tokens.remove(i);
tokens.insert(i, new_param(t, n));
}
m.tokens = tokens;
}
// Replaces '#' followed by a macro parameter.
fn replace_hash_ident(m: &mut Macro) {
let tokens = m.tokens.clone();
let mut v = Vec::new();
let mut i = 0;
while i < tokens.len()-1 {
let t1 = tokens[i].clone();
let mut t2 = tokens[i+1].clone();
if t1.ty == TokenType::SHARP && t2.ty == TokenType::PARAM {
t2.stringize = true;
v.push(t2);
i += 1;
} else {
v.push(t1);
}
i += 1;
}
if i == tokens.len() - 1 {
v.push(tokens[i].clone());
}
m.tokens = v;
}
fn read_one_arg() -> Vec<Token> {
let mut v = Vec::new();
let start = peek();
let mut level = 0;
while!is_eof() {
let t = peek();
if level == 0 {
if t.ty == TokenType::KET || t.ty == TokenType::COMMA {
return v;
}
}
next();
if t.ty == TokenType::BRA {
level += 1;
} else if t.ty == TokenType::KET {
level -= 1;
}
v.push(t);
}
bad_token(&start, "unclosed macro argument".to_string());
panic!();
}
fn read_args() -> HashMap<usize, Vec<Token>> {
let mut v = HashMap::new();
if consume(TokenType::KET) {
return v;
}
let i = v.len();
v.insert(i, read_one_arg());
while!consume(TokenType::KET) {
get(TokenType::COMMA, "comma expected".to_string());
let i = v.len();
v.insert(i, read_one_arg());
}
return v;
}
fn emit_special_macro(t: & Token) -> bool {
if is_ident(t, "__LINE__") {
emit(new_int(t, get_line_number(t)));
return true;
}
return false;
}
fn apply_objlike(m: &mut Macro, _start: & Token) {
for t in m.tokens.iter() {
if emit_special_macro(t) {
continue;
}
emit(t.clone());
}
}
fn apply_functionlike(m: &mut Macro, start: & Token) {
get(TokenType::BRA, "comma expected".to_string());
let args = read_args();
if m.params.len()!= args.len() {
bad_token(&start, format!("number of parameter does not match ({}!= {})", m.params.len(), args.len()).to_string());
panic!();
}
for i in 0..m.tokens.len() {
let t = &m.tokens[i];
if emit_special_macro(t) {
continue;
}
if t.ty == TokenType::PARAM {
if t.stringize {
let j = t.val as usize;
let s = stringize(args.get(&j).unwrap().clone());
emit(new_string(t, s.clone(), s.len()));
} else {
let j = t.val as usize;
append(&mut args.get(&j).unwrap().clone());
}
continue;
}
emit(t.clone());
}
}
fn apply(m: &mut Macro, start: & Token) {
if m.ty == MacroType::OBJLIKE {
apply_objlike(m, start);
} else {
apply_functionlike(m, start);
}
}
fn define_funclike(name: String) {
new_macro(MacroType::FUNCLIKE, name.clone());
let mut m = macros_get(&name).unwrap();
while!consume(TokenType::KET) {
if m.params.len() > 0 {
get(TokenType::COMMA, ", expected".to_string());
}
m.params.push(ident("parameter name expected".to_string()));
}
m.tokens = read_until_eol();
replace_macro_params(&mut m);
replace_hash_ident(&mut m);
macros_put(name, m);
}
fn define_objlike(name: String) {
new_macro(MacroType::OBJLIKE, name.clone());
let mut m = macros_get(&name).unwrap();
m.tokens = read_until_eol();
macros_put(name, m);
}
fn define() {
let name = ident("macro name expected".to_string());
if consume(TokenType::BRA) {
return define_funclike(name);
}
return define_objlike(name);
}
fn include() {
let t = get(TokenType::STR, "string expected".to_string());
let path = t.str_cnt;
get(TokenType::NEW_LINE, "newline expected".to_string());
return append(&mut tokenize(path, false));
}
pub fn preprocess(tokens: Vec<Token>) -> Vec<Token> {
set_env(new_env(get_env(), tokens.clone()));
while!is_eof() {
let mut t = next();
if t.ty == TokenType::IDENT {
match macros_get(&t.name) {
Some(macro_token) => {
apply(&mut macro_token.clone(), &t);
}
None => {
emit(t.clone());
}
}
continue;
}
if t.ty!= TokenType::SHARP {
emit(t.clone());
continue;
}
t = get(TokenType::IDENT, "identifier expected".to_string());
if t.name == "define" {
define();
} else if t.name == "include" {
include();
} else {
bad_token(&t, "unknown directive".to_string());
}
}
let v = env_output();
env_pop();
return v;
}
|
{
Env {
input: Vec::new(),
output: Vec::new(),
pos: 0,
prev: None,
}
}
|
identifier_body
|
mod.rs
|
#![cfg(any(target_os = "linux", target_os = "dragonfly", target_os = "freebsd"))]
use Api;
use BuilderAttribs;
use ContextError;
use CreationError;
use GlContext;
use PixelFormat;
use libc;
use api::osmesa::{self, OsMesaContext};
#[cfg(feature = "window")]
pub use self::api_dispatch::{Window, WindowProxy, MonitorID, get_available_monitors, get_primary_monitor};
#[cfg(feature = "window")]
pub use self::api_dispatch::{WaitEventsIterator, PollEventsIterator};
#[cfg(feature = "window")]
mod api_dispatch;
#[cfg(not(feature = "window"))]
pub type Window = (); // TODO: hack to make things work
#[cfg(not(feature = "window"))]
pub type MonitorID = (); // TODO: hack to make things work
pub struct
|
(OsMesaContext);
impl HeadlessContext {
pub fn new(builder: BuilderAttribs) -> Result<HeadlessContext, CreationError> {
match OsMesaContext::new(builder) {
Ok(c) => return Ok(HeadlessContext(c)),
Err(osmesa::OsMesaCreationError::NotSupported) => (),
Err(osmesa::OsMesaCreationError::CreationError(e)) => return Err(e),
};
Err(CreationError::NotSupported)
}
}
impl GlContext for HeadlessContext {
#[inline]
unsafe fn make_current(&self) -> Result<(), ContextError> {
self.0.make_current()
}
#[inline]
fn is_current(&self) -> bool {
self.0.is_current()
}
#[inline]
fn get_proc_address(&self, addr: &str) -> *const libc::c_void {
self.0.get_proc_address(addr)
}
#[inline]
fn swap_buffers(&self) -> Result<(), ContextError> {
self.0.swap_buffers()
}
#[inline]
fn get_api(&self) -> Api {
self.0.get_api()
}
#[inline]
fn get_pixel_format(&self) -> PixelFormat {
self.0.get_pixel_format()
}
}
|
HeadlessContext
|
identifier_name
|
mod.rs
|
#![cfg(any(target_os = "linux", target_os = "dragonfly", target_os = "freebsd"))]
use Api;
use BuilderAttribs;
use ContextError;
use CreationError;
use GlContext;
use PixelFormat;
use libc;
use api::osmesa::{self, OsMesaContext};
#[cfg(feature = "window")]
pub use self::api_dispatch::{Window, WindowProxy, MonitorID, get_available_monitors, get_primary_monitor};
#[cfg(feature = "window")]
pub use self::api_dispatch::{WaitEventsIterator, PollEventsIterator};
#[cfg(feature = "window")]
mod api_dispatch;
#[cfg(not(feature = "window"))]
pub type Window = (); // TODO: hack to make things work
#[cfg(not(feature = "window"))]
pub type MonitorID = (); // TODO: hack to make things work
pub struct HeadlessContext(OsMesaContext);
impl HeadlessContext {
pub fn new(builder: BuilderAttribs) -> Result<HeadlessContext, CreationError> {
match OsMesaContext::new(builder) {
Ok(c) => return Ok(HeadlessContext(c)),
Err(osmesa::OsMesaCreationError::NotSupported) => (),
Err(osmesa::OsMesaCreationError::CreationError(e)) => return Err(e),
};
Err(CreationError::NotSupported)
}
}
impl GlContext for HeadlessContext {
#[inline]
unsafe fn make_current(&self) -> Result<(), ContextError> {
self.0.make_current()
}
#[inline]
fn is_current(&self) -> bool {
self.0.is_current()
}
#[inline]
fn get_proc_address(&self, addr: &str) -> *const libc::c_void {
self.0.get_proc_address(addr)
}
#[inline]
fn swap_buffers(&self) -> Result<(), ContextError> {
self.0.swap_buffers()
}
|
fn get_api(&self) -> Api {
self.0.get_api()
}
#[inline]
fn get_pixel_format(&self) -> PixelFormat {
self.0.get_pixel_format()
}
}
|
#[inline]
|
random_line_split
|
strx.rs
|
pub fn remove_to<'s>(s: &'s str, c: char) -> &'s str {
match s.rfind(c) {
Some(pos) => s.slice_from(pos + 1),
None => s
}
}
#[allow(dead_code)]
pub fn remove_from_last<'s>(s: &'s str, c: char) -> &'s str {
match s.rfind(c) {
Some(pos) => s.slice_to(pos),
None => s,
}
}
pub fn remove_suffix<'s>(s: &'s str, suffix: &str) -> &'s str {
if!s.ends_with(suffix) {
panic!();
}
s.slice_to(s.len() - suffix.len())
}
pub fn remove_prefix<'s>(s: &'s str, prefix: &str) -> &'s str {
if!s.starts_with(prefix) {
panic!();
}
s.slice_from(prefix.len())
}
#[cfg(test)]
mod test {
|
use super::remove_to;
use super::remove_from_last;
use super::remove_prefix;
use super::remove_suffix;
#[test]
fn test_remove_to() {
assert_eq!("aaa", remove_to("aaa", '.'));
assert_eq!("bbb", remove_to("aaa.bbb", '.'));
assert_eq!("ccc", remove_to("aaa.bbb.ccc", '.'));
}
#[test]
fn test_remove_from_last() {
assert_eq!("aaa", remove_from_last("aaa", '.'));
assert_eq!("aaa", remove_from_last("aaa.bbb", '.'));
assert_eq!("aaa.bbb", remove_from_last("aaa.bbb.ccc", '.'));
}
#[test]
fn test_remove_prefix() {
assert_eq!("aaa", remove_prefix("bbbaaa", "bbb"));
}
#[test]
#[should_fail]
fn test_remove_prefix_fail() {
remove_prefix("aaa", "bbb");
}
#[test]
fn test_remove_suffix() {
assert_eq!("bbb", remove_suffix("bbbaaa", "aaa"));
}
#[test]
#[should_fail]
fn test_remove_suffix_fail() {
remove_suffix("aaa", "bbb");
}
}
|
random_line_split
|
|
strx.rs
|
pub fn remove_to<'s>(s: &'s str, c: char) -> &'s str {
match s.rfind(c) {
Some(pos) => s.slice_from(pos + 1),
None => s
}
}
#[allow(dead_code)]
pub fn remove_from_last<'s>(s: &'s str, c: char) -> &'s str {
match s.rfind(c) {
Some(pos) => s.slice_to(pos),
None => s,
}
}
pub fn remove_suffix<'s>(s: &'s str, suffix: &str) -> &'s str {
if!s.ends_with(suffix) {
panic!();
}
s.slice_to(s.len() - suffix.len())
}
pub fn remove_prefix<'s>(s: &'s str, prefix: &str) -> &'s str {
if!s.starts_with(prefix) {
panic!();
}
s.slice_from(prefix.len())
}
#[cfg(test)]
mod test {
use super::remove_to;
use super::remove_from_last;
use super::remove_prefix;
use super::remove_suffix;
#[test]
fn test_remove_to() {
assert_eq!("aaa", remove_to("aaa", '.'));
assert_eq!("bbb", remove_to("aaa.bbb", '.'));
assert_eq!("ccc", remove_to("aaa.bbb.ccc", '.'));
}
#[test]
fn test_remove_from_last()
|
#[test]
fn test_remove_prefix() {
assert_eq!("aaa", remove_prefix("bbbaaa", "bbb"));
}
#[test]
#[should_fail]
fn test_remove_prefix_fail() {
remove_prefix("aaa", "bbb");
}
#[test]
fn test_remove_suffix() {
assert_eq!("bbb", remove_suffix("bbbaaa", "aaa"));
}
#[test]
#[should_fail]
fn test_remove_suffix_fail() {
remove_suffix("aaa", "bbb");
}
}
|
{
assert_eq!("aaa", remove_from_last("aaa", '.'));
assert_eq!("aaa", remove_from_last("aaa.bbb", '.'));
assert_eq!("aaa.bbb", remove_from_last("aaa.bbb.ccc", '.'));
}
|
identifier_body
|
strx.rs
|
pub fn remove_to<'s>(s: &'s str, c: char) -> &'s str {
match s.rfind(c) {
Some(pos) => s.slice_from(pos + 1),
None => s
}
}
#[allow(dead_code)]
pub fn remove_from_last<'s>(s: &'s str, c: char) -> &'s str {
match s.rfind(c) {
Some(pos) => s.slice_to(pos),
None => s,
}
}
pub fn remove_suffix<'s>(s: &'s str, suffix: &str) -> &'s str {
if!s.ends_with(suffix) {
panic!();
}
s.slice_to(s.len() - suffix.len())
}
pub fn
|
<'s>(s: &'s str, prefix: &str) -> &'s str {
if!s.starts_with(prefix) {
panic!();
}
s.slice_from(prefix.len())
}
#[cfg(test)]
mod test {
use super::remove_to;
use super::remove_from_last;
use super::remove_prefix;
use super::remove_suffix;
#[test]
fn test_remove_to() {
assert_eq!("aaa", remove_to("aaa", '.'));
assert_eq!("bbb", remove_to("aaa.bbb", '.'));
assert_eq!("ccc", remove_to("aaa.bbb.ccc", '.'));
}
#[test]
fn test_remove_from_last() {
assert_eq!("aaa", remove_from_last("aaa", '.'));
assert_eq!("aaa", remove_from_last("aaa.bbb", '.'));
assert_eq!("aaa.bbb", remove_from_last("aaa.bbb.ccc", '.'));
}
#[test]
fn test_remove_prefix() {
assert_eq!("aaa", remove_prefix("bbbaaa", "bbb"));
}
#[test]
#[should_fail]
fn test_remove_prefix_fail() {
remove_prefix("aaa", "bbb");
}
#[test]
fn test_remove_suffix() {
assert_eq!("bbb", remove_suffix("bbbaaa", "aaa"));
}
#[test]
#[should_fail]
fn test_remove_suffix_fail() {
remove_suffix("aaa", "bbb");
}
}
|
remove_prefix
|
identifier_name
|
strx.rs
|
pub fn remove_to<'s>(s: &'s str, c: char) -> &'s str {
match s.rfind(c) {
Some(pos) => s.slice_from(pos + 1),
None => s
}
}
#[allow(dead_code)]
pub fn remove_from_last<'s>(s: &'s str, c: char) -> &'s str {
match s.rfind(c) {
Some(pos) => s.slice_to(pos),
None => s,
}
}
pub fn remove_suffix<'s>(s: &'s str, suffix: &str) -> &'s str {
if!s.ends_with(suffix)
|
s.slice_to(s.len() - suffix.len())
}
pub fn remove_prefix<'s>(s: &'s str, prefix: &str) -> &'s str {
if!s.starts_with(prefix) {
panic!();
}
s.slice_from(prefix.len())
}
#[cfg(test)]
mod test {
use super::remove_to;
use super::remove_from_last;
use super::remove_prefix;
use super::remove_suffix;
#[test]
fn test_remove_to() {
assert_eq!("aaa", remove_to("aaa", '.'));
assert_eq!("bbb", remove_to("aaa.bbb", '.'));
assert_eq!("ccc", remove_to("aaa.bbb.ccc", '.'));
}
#[test]
fn test_remove_from_last() {
assert_eq!("aaa", remove_from_last("aaa", '.'));
assert_eq!("aaa", remove_from_last("aaa.bbb", '.'));
assert_eq!("aaa.bbb", remove_from_last("aaa.bbb.ccc", '.'));
}
#[test]
fn test_remove_prefix() {
assert_eq!("aaa", remove_prefix("bbbaaa", "bbb"));
}
#[test]
#[should_fail]
fn test_remove_prefix_fail() {
remove_prefix("aaa", "bbb");
}
#[test]
fn test_remove_suffix() {
assert_eq!("bbb", remove_suffix("bbbaaa", "aaa"));
}
#[test]
#[should_fail]
fn test_remove_suffix_fail() {
remove_suffix("aaa", "bbb");
}
}
|
{
panic!();
}
|
conditional_block
|
typeref.rs
|
/* automatically generated by rust-bindgen */
#![allow(non_snake_case)]
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl <T> __BindgenUnionField<T> {
#[inline]
pub fn new() -> Self { __BindgenUnionField(::std::marker::PhantomData) }
#[inline]
pub unsafe fn as_ref(&self) -> &T { ::std::mem::transmute(self) }
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T { ::std::mem::transmute(self) }
}
impl <T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self { Self::new() }
}
impl <T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
fn clone(&self) -> Self { Self::new() }
}
impl <T> ::std::marker::Copy for __BindgenUnionField<T> { }
impl <T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
}
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct nsFoo {
pub mBar: StyleShapeSource<::std::os::raw::c_int>,
}
#[test]
fn bindgen_test_layout_nsFoo() {
assert_eq!(::std::mem::size_of::<nsFoo>(), 8usize);
assert_eq!(::std::mem::align_of::<nsFoo>(), 8usize);
}
impl Clone for nsFoo {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct FragmentOrURL {
pub mIsLocalRef: bool,
}
#[test]
fn bindgen_test_layout_FragmentOrURL() {
assert_eq!(::std::mem::size_of::<FragmentOrURL>(), 1usize);
assert_eq!(::std::mem::align_of::<FragmentOrURL>(), 1usize);
}
impl Clone for FragmentOrURL {
|
#[derive(Debug, Copy)]
pub struct Position {
pub _address: u8,
}
#[test]
fn bindgen_test_layout_Position() {
assert_eq!(::std::mem::size_of::<Position>(), 1usize);
assert_eq!(::std::mem::align_of::<Position>(), 1usize);
}
impl Clone for Position {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct Bar {
pub mFoo: *mut nsFoo,
}
#[test]
fn bindgen_test_layout_Bar() {
assert_eq!(::std::mem::size_of::<Bar>(), 8usize);
assert_eq!(::std::mem::align_of::<Bar>(), 8usize);
}
impl Clone for Bar {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct StyleShapeSource<ReferenceBox> {
pub __bindgen_anon_1: StyleShapeSource__bindgen_ty_1<ReferenceBox>,
pub _phantom_0: ::std::marker::PhantomData<ReferenceBox>,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct StyleShapeSource__bindgen_ty_1<ReferenceBox> {
pub mPosition: __BindgenUnionField<*mut Position>,
pub mFragmentOrURL: __BindgenUnionField<*mut FragmentOrURL>,
pub bindgen_union_field: u64,
pub _phantom_0: ::std::marker::PhantomData<ReferenceBox>,
}
|
fn clone(&self) -> Self { *self }
}
#[repr(C)]
|
random_line_split
|
typeref.rs
|
/* automatically generated by rust-bindgen */
#![allow(non_snake_case)]
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl <T> __BindgenUnionField<T> {
#[inline]
pub fn new() -> Self { __BindgenUnionField(::std::marker::PhantomData) }
#[inline]
pub unsafe fn as_ref(&self) -> &T { ::std::mem::transmute(self) }
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T { ::std::mem::transmute(self) }
}
impl <T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self { Self::new() }
}
impl <T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
fn clone(&self) -> Self { Self::new() }
}
impl <T> ::std::marker::Copy for __BindgenUnionField<T> { }
impl <T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
}
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct nsFoo {
pub mBar: StyleShapeSource<::std::os::raw::c_int>,
}
#[test]
fn bindgen_test_layout_nsFoo() {
assert_eq!(::std::mem::size_of::<nsFoo>(), 8usize);
assert_eq!(::std::mem::align_of::<nsFoo>(), 8usize);
}
impl Clone for nsFoo {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct FragmentOrURL {
pub mIsLocalRef: bool,
}
#[test]
fn bindgen_test_layout_FragmentOrURL() {
assert_eq!(::std::mem::size_of::<FragmentOrURL>(), 1usize);
assert_eq!(::std::mem::align_of::<FragmentOrURL>(), 1usize);
}
impl Clone for FragmentOrURL {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct Position {
pub _address: u8,
}
#[test]
fn
|
() {
assert_eq!(::std::mem::size_of::<Position>(), 1usize);
assert_eq!(::std::mem::align_of::<Position>(), 1usize);
}
impl Clone for Position {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy)]
pub struct Bar {
pub mFoo: *mut nsFoo,
}
#[test]
fn bindgen_test_layout_Bar() {
assert_eq!(::std::mem::size_of::<Bar>(), 8usize);
assert_eq!(::std::mem::align_of::<Bar>(), 8usize);
}
impl Clone for Bar {
fn clone(&self) -> Self { *self }
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct StyleShapeSource<ReferenceBox> {
pub __bindgen_anon_1: StyleShapeSource__bindgen_ty_1<ReferenceBox>,
pub _phantom_0: ::std::marker::PhantomData<ReferenceBox>,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct StyleShapeSource__bindgen_ty_1<ReferenceBox> {
pub mPosition: __BindgenUnionField<*mut Position>,
pub mFragmentOrURL: __BindgenUnionField<*mut FragmentOrURL>,
pub bindgen_union_field: u64,
pub _phantom_0: ::std::marker::PhantomData<ReferenceBox>,
}
|
bindgen_test_layout_Position
|
identifier_name
|
script_msg.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::CanvasMsg;
use euclid::point::Point2D;
use euclid::size::Size2D;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::{AnimationState, DocumentState, IframeLoadInfo, NavigationDirection};
use msg::constellation_msg::{Failure, MozBrowserEvent, PipelineId};
use msg::constellation_msg::{LoadData, SubpageId};
use msg::constellation_msg::{MouseButton, MouseEventType};
use offscreen_gl_context::GLContextAttributes;
use style_traits::viewport::ViewportConstraints;
use url::Url;
use util::cursor::Cursor;
/// Messages from the layout to the constellation.
#[derive(Deserialize, Serialize)]
pub enum
|
{
/// Indicates whether this pipeline is currently running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Layout task failure.
Failure(Failure),
/// Requests that the constellation inform the compositor of the a cursor change.
SetCursor(Cursor),
/// Notifies the constellation that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
}
/// Messages from the script to the constellation.
#[derive(Deserialize, Serialize)]
pub enum ScriptMsg {
/// Indicates whether this pipeline is currently running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Requests that a new 2D canvas thread be created. (This is done in the constellation because
/// 2D canvases may use the GPU and we don't want to give untrusted content access to the GPU.)
CreateCanvasPaintTask(Size2D<i32>, IpcSender<(IpcSender<CanvasMsg>, usize)>),
/// Requests that a new WebGL thread be created. (This is done in the constellation because
/// WebGL uses the GPU and we don't want to give untrusted content access to the GPU.)
CreateWebGLPaintTask(Size2D<i32>,
GLContextAttributes,
IpcSender<Result<(IpcSender<CanvasMsg>, usize), String>>),
/// Dispatched after the DOM load event has fired on a document
/// Causes a `load` event to be dispatched to any enclosing frame context element
/// for the given pipeline.
DOMLoad(PipelineId),
/// Script task failure.
Failure(Failure),
/// Notifies the constellation that this frame has received focus.
Focus(PipelineId),
/// Re-send a mouse button event that was sent to the parent window.
ForwardMouseButtonEvent(PipelineId, MouseEventType, MouseButton, Point2D<f32>),
/// Re-send a mouse move event that was sent to the parent window.
ForwardMouseMoveEvent(PipelineId, Point2D<f32>),
/// Requests that the constellation retrieve the current contents of the clipboard
GetClipboardContents(IpcSender<String>),
/// <head> tag finished parsing
HeadParsed,
/// All pending loads are complete.
LoadComplete(PipelineId),
/// A new load has been requested.
LoadUrl(PipelineId, LoadData),
/// Dispatch a mozbrowser event to a given iframe. Only available in experimental mode.
MozBrowserEvent(PipelineId, SubpageId, MozBrowserEvent),
/// HTMLIFrameElement Forward or Back navigation.
Navigate(Option<(PipelineId, SubpageId)>, NavigationDirection),
/// Favicon detected
NewFavicon(Url),
/// Status message to be displayed in the chrome, eg. a link URL on mouseover.
NodeStatus(Option<String>),
/// Notification that this iframe should be removed.
RemoveIFrame(PipelineId),
/// A load has been requested in an IFrame.
ScriptLoadedURLInIFrame(IframeLoadInfo),
/// Requests that the constellation set the contents of the clipboard
SetClipboardContents(String),
/// Mark a new document as active
ActivateDocument(PipelineId),
/// Set the document state for a pipeline (used by screenshot / reftests)
SetDocumentState(PipelineId, DocumentState),
/// Update the pipeline Url, which can change after redirections.
SetFinalUrl(PipelineId, Url),
}
|
LayoutMsg
|
identifier_name
|
script_msg.rs
|
use canvas_traits::CanvasMsg;
use euclid::point::Point2D;
use euclid::size::Size2D;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::{AnimationState, DocumentState, IframeLoadInfo, NavigationDirection};
use msg::constellation_msg::{Failure, MozBrowserEvent, PipelineId};
use msg::constellation_msg::{LoadData, SubpageId};
use msg::constellation_msg::{MouseButton, MouseEventType};
use offscreen_gl_context::GLContextAttributes;
use style_traits::viewport::ViewportConstraints;
use url::Url;
use util::cursor::Cursor;
/// Messages from the layout to the constellation.
#[derive(Deserialize, Serialize)]
pub enum LayoutMsg {
/// Indicates whether this pipeline is currently running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Layout task failure.
Failure(Failure),
/// Requests that the constellation inform the compositor of the a cursor change.
SetCursor(Cursor),
/// Notifies the constellation that the viewport has been constrained in some manner
ViewportConstrained(PipelineId, ViewportConstraints),
}
/// Messages from the script to the constellation.
#[derive(Deserialize, Serialize)]
pub enum ScriptMsg {
/// Indicates whether this pipeline is currently running animations.
ChangeRunningAnimationsState(PipelineId, AnimationState),
/// Requests that a new 2D canvas thread be created. (This is done in the constellation because
/// 2D canvases may use the GPU and we don't want to give untrusted content access to the GPU.)
CreateCanvasPaintTask(Size2D<i32>, IpcSender<(IpcSender<CanvasMsg>, usize)>),
/// Requests that a new WebGL thread be created. (This is done in the constellation because
/// WebGL uses the GPU and we don't want to give untrusted content access to the GPU.)
CreateWebGLPaintTask(Size2D<i32>,
GLContextAttributes,
IpcSender<Result<(IpcSender<CanvasMsg>, usize), String>>),
/// Dispatched after the DOM load event has fired on a document
/// Causes a `load` event to be dispatched to any enclosing frame context element
/// for the given pipeline.
DOMLoad(PipelineId),
/// Script task failure.
Failure(Failure),
/// Notifies the constellation that this frame has received focus.
Focus(PipelineId),
/// Re-send a mouse button event that was sent to the parent window.
ForwardMouseButtonEvent(PipelineId, MouseEventType, MouseButton, Point2D<f32>),
/// Re-send a mouse move event that was sent to the parent window.
ForwardMouseMoveEvent(PipelineId, Point2D<f32>),
/// Requests that the constellation retrieve the current contents of the clipboard
GetClipboardContents(IpcSender<String>),
/// <head> tag finished parsing
HeadParsed,
/// All pending loads are complete.
LoadComplete(PipelineId),
/// A new load has been requested.
LoadUrl(PipelineId, LoadData),
/// Dispatch a mozbrowser event to a given iframe. Only available in experimental mode.
MozBrowserEvent(PipelineId, SubpageId, MozBrowserEvent),
/// HTMLIFrameElement Forward or Back navigation.
Navigate(Option<(PipelineId, SubpageId)>, NavigationDirection),
/// Favicon detected
NewFavicon(Url),
/// Status message to be displayed in the chrome, eg. a link URL on mouseover.
NodeStatus(Option<String>),
/// Notification that this iframe should be removed.
RemoveIFrame(PipelineId),
/// A load has been requested in an IFrame.
ScriptLoadedURLInIFrame(IframeLoadInfo),
/// Requests that the constellation set the contents of the clipboard
SetClipboardContents(String),
/// Mark a new document as active
ActivateDocument(PipelineId),
/// Set the document state for a pipeline (used by screenshot / reftests)
SetDocumentState(PipelineId, DocumentState),
/// Update the pipeline Url, which can change after redirections.
SetFinalUrl(PipelineId, Url),
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
random_line_split
|
|
coherence_inherent.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that methods that implement a trait cannot be invoked
|
mod Lib {
pub trait TheTrait {
fn the_fn(&self);
}
pub struct TheStruct;
impl TheTrait for TheStruct {
fn the_fn(&self) {}
}
}
mod Import {
// Trait is in scope here:
use Lib::TheStruct;
use Lib::TheTrait;
fn call_the_fn(s: &TheStruct) {
s.the_fn();
}
}
mod NoImport {
// Trait is not in scope here:
use Lib::TheStruct;
fn call_the_fn(s: &TheStruct) {
s.the_fn(); //~ ERROR no method named `the_fn` found
}
}
fn main() {}
|
// unless the trait is imported.
|
random_line_split
|
coherence_inherent.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that methods that implement a trait cannot be invoked
// unless the trait is imported.
mod Lib {
pub trait TheTrait {
fn the_fn(&self);
}
pub struct TheStruct;
impl TheTrait for TheStruct {
fn the_fn(&self) {}
}
}
mod Import {
// Trait is in scope here:
use Lib::TheStruct;
use Lib::TheTrait;
fn call_the_fn(s: &TheStruct) {
s.the_fn();
}
}
mod NoImport {
// Trait is not in scope here:
use Lib::TheStruct;
fn
|
(s: &TheStruct) {
s.the_fn(); //~ ERROR no method named `the_fn` found
}
}
fn main() {}
|
call_the_fn
|
identifier_name
|
coherence_inherent.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that methods that implement a trait cannot be invoked
// unless the trait is imported.
mod Lib {
pub trait TheTrait {
fn the_fn(&self);
}
pub struct TheStruct;
impl TheTrait for TheStruct {
fn the_fn(&self) {}
}
}
mod Import {
// Trait is in scope here:
use Lib::TheStruct;
use Lib::TheTrait;
fn call_the_fn(s: &TheStruct)
|
}
mod NoImport {
// Trait is not in scope here:
use Lib::TheStruct;
fn call_the_fn(s: &TheStruct) {
s.the_fn(); //~ ERROR no method named `the_fn` found
}
}
fn main() {}
|
{
s.the_fn();
}
|
identifier_body
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "devtools_traits"]
#![crate_type = "rlib"]
#![allow(non_snake_case)]
#![deny(unsafe_code)]
#[macro_use]
extern crate bitflags;
extern crate hyper;
extern crate ipc_channel;
extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate msg;
#[macro_use] extern crate serde;
extern crate servo_url;
extern crate time;
use hyper::header::Headers;
use hyper::method::Method;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::PipelineId;
use servo_url::ServoUrl;
use std::net::TcpStream;
use time::Duration;
use time::Tm;
// Information would be attached to NewGlobal to be received and show in devtools.
// Extend these fields if we need more information.
#[derive(Debug, Deserialize, Serialize)]
pub struct DevtoolsPageInfo {
pub title: String,
pub url: ServoUrl,
}
#[derive(Clone, Debug, Deserialize, MallocSizeOf, Serialize)]
pub struct CSSError {
pub filename: String,
pub line: u32,
pub column: u32,
pub msg: String
}
/// Messages to instruct the devtools server to update its known actors/state
/// according to changes in the browser.
#[derive(Debug)]
pub enum DevtoolsControlMsg {
/// Messages from threads in the chrome process (resource/constellation/devtools)
FromChrome(ChromeToDevtoolsControlMsg),
/// Messages from script threads
FromScript(ScriptToDevtoolsControlMsg),
}
/// Events that the devtools server must act upon.
#[derive(Debug)]
pub enum ChromeToDevtoolsControlMsg {
/// A new client has connected to the server.
AddClient(TcpStream),
/// The browser is shutting down.
ServerExitMsg,
/// A network event occurred (request, reply, etc.). The actor with the
/// provided name should be notified.
NetworkEvent(String, NetworkEvent),
}
#[derive(Debug, Deserialize, Serialize)]
/// Events that the devtools server must act upon.
pub enum ScriptToDevtoolsControlMsg {
/// A new global object was created, associated with a particular pipeline.
/// The means of communicating directly with it are provided.
NewGlobal((PipelineId, Option<WorkerId>),
IpcSender<DevtoolScriptControlMsg>,
DevtoolsPageInfo),
/// A particular page has invoked the console API.
ConsoleAPI(PipelineId, ConsoleMessage, Option<WorkerId>),
/// An animation frame with the given timestamp was processed in a script thread.
/// The actor with the provided name should be notified.
FramerateTick(String, f64),
/// Report a CSS parse error for the given pipeline
ReportCSSError(PipelineId, CSSError),
}
/// Serialized JS return values
/// TODO: generalize this beyond the EvaluateJS message?
#[derive(Debug, Deserialize, Serialize)]
pub enum EvaluateJSReply {
VoidValue,
NullValue,
BooleanValue(bool),
NumberValue(f64),
StringValue(String),
ActorValue { class: String, uuid: String },
}
#[derive(Debug, Deserialize, Serialize)]
pub struct AttrInfo {
pub namespace: String,
pub name: String,
pub value: String,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct NodeInfo {
pub uniqueId: String,
pub baseURI: String,
pub parent: String,
pub nodeType: u16,
pub namespaceURI: String,
pub nodeName: String,
pub numChildren: usize,
pub name: String,
pub publicId: String,
pub systemId: String,
pub attrs: Vec<AttrInfo>,
pub isDocumentElement: bool,
pub shortValue: String,
pub incompleteValue: bool,
}
pub struct StartedTimelineMarker {
name: String,
start_time: PreciseTime,
|
pub name: String,
pub start_time: PreciseTime,
pub start_stack: Option<Vec<()>>,
pub end_time: PreciseTime,
pub end_stack: Option<Vec<()>>,
}
#[derive(Clone, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub enum TimelineMarkerType {
Reflow,
DOMEvent,
}
/// The properties of a DOM node as computed by layout.
#[derive(Debug, Deserialize, Serialize)]
pub struct ComputedNodeLayout {
pub display: String,
pub position: String,
pub zIndex: String,
pub boxSizing: String,
pub autoMargins: AutoMargins,
pub marginTop: String,
pub marginRight: String,
pub marginBottom: String,
pub marginLeft: String,
pub borderTopWidth: String,
pub borderRightWidth: String,
pub borderBottomWidth: String,
pub borderLeftWidth: String,
pub paddingTop: String,
pub paddingRight: String,
pub paddingBottom: String,
pub paddingLeft: String,
pub width: f32,
pub height: f32,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct AutoMargins {
pub top: bool,
pub right: bool,
pub bottom: bool,
pub left: bool,
}
/// Messages to process in a particular script thread, as instructed by a devtools client.
/// TODO: better error handling, e.g. if pipeline id lookup fails?
#[derive(Debug, Deserialize, Serialize)]
pub enum DevtoolScriptControlMsg {
/// Evaluate a JS snippet in the context of the global for the given pipeline.
EvaluateJS(PipelineId, String, IpcSender<EvaluateJSReply>),
/// Retrieve the details of the root node (ie. the document) for the given pipeline.
GetRootNode(PipelineId, IpcSender<Option<NodeInfo>>),
/// Retrieve the details of the document element for the given pipeline.
GetDocumentElement(PipelineId, IpcSender<Option<NodeInfo>>),
/// Retrieve the details of the child nodes of the given node in the given pipeline.
GetChildren(PipelineId, String, IpcSender<Option<Vec<NodeInfo>>>),
/// Retrieve the computed layout properties of the given node in the given pipeline.
GetLayout(PipelineId, String, IpcSender<Option<ComputedNodeLayout>>),
/// Retrieve all stored console messages for the given pipeline.
GetCachedMessages(PipelineId, CachedConsoleMessageTypes, IpcSender<Vec<CachedConsoleMessage>>),
/// Update a given node's attributes with a list of modifications.
ModifyAttribute(PipelineId, String, Vec<Modification>),
/// Request live console messages for a given pipeline (true if desired, false otherwise).
WantsLiveNotifications(PipelineId, bool),
/// Request live notifications for a given set of timeline events for a given pipeline.
SetTimelineMarkers(PipelineId, Vec<TimelineMarkerType>, IpcSender<Option<TimelineMarker>>),
/// Withdraw request for live timeline notifications for a given pipeline.
DropTimelineMarkers(PipelineId, Vec<TimelineMarkerType>),
/// Request a callback directed at the given actor name from the next animation frame
/// executed in the given pipeline.
RequestAnimationFrame(PipelineId, String),
/// Direct the given pipeline to reload the current page.
Reload(PipelineId),
}
#[derive(Debug, Deserialize, Serialize)]
pub struct Modification {
pub attributeName: String,
pub newValue: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum LogLevel {
Log,
Debug,
Info,
Warn,
Error,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ConsoleMessage {
pub message: String,
pub logLevel: LogLevel,
pub filename: String,
pub lineNumber: usize,
pub columnNumber: usize,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub struct CachedConsoleMessageTypes: u8 {
const PAGE_ERROR = 1 << 0;
const CONSOLE_API = 1 << 1;
}
}
#[derive(Debug, Deserialize, Serialize)]
pub struct PageError {
#[serde(rename = "_type")]
pub type_: String,
pub errorMessage: String,
pub sourceName: String,
pub lineText: String,
pub lineNumber: u32,
pub columnNumber: u32,
pub category: String,
pub timeStamp: u64,
pub error: bool,
pub warning: bool,
pub exception: bool,
pub strict: bool,
pub private: bool,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct ConsoleAPI {
#[serde(rename = "_type")]
pub type_: String,
pub level: String,
pub filename: String,
pub lineNumber: u32,
pub functionName: String,
pub timeStamp: u64,
pub private: bool,
pub arguments: Vec<String>,
}
#[derive(Debug, Deserialize, Serialize)]
pub enum CachedConsoleMessage {
PageError(PageError),
ConsoleAPI(ConsoleAPI),
}
#[derive(Debug, PartialEq)]
pub struct HttpRequest {
pub url: ServoUrl,
pub method: Method,
pub headers: Headers,
pub body: Option<Vec<u8>>,
pub pipeline_id: PipelineId,
pub startedDateTime: Tm,
pub timeStamp: i64,
pub connect_time: u64,
pub send_time: u64,
pub is_xhr: bool,
}
#[derive(Debug, PartialEq)]
pub struct HttpResponse {
pub headers: Option<Headers>,
pub status: Option<(u16, Vec<u8>)>,
pub body: Option<Vec<u8>>,
pub pipeline_id: PipelineId,
}
#[derive(Debug)]
pub enum NetworkEvent {
HttpRequest(HttpRequest),
HttpResponse(HttpResponse),
}
impl TimelineMarker {
pub fn start(name: String) -> StartedTimelineMarker {
StartedTimelineMarker {
name: name,
start_time: PreciseTime::now(),
start_stack: None,
}
}
}
impl StartedTimelineMarker {
pub fn end(self) -> TimelineMarker {
TimelineMarker {
name: self.name,
start_time: self.start_time,
start_stack: self.start_stack,
end_time: PreciseTime::now(),
end_stack: None,
}
}
}
/// A replacement for `time::PreciseTime` that isn't opaque, so we can serialize it.
///
/// The reason why this doesn't go upstream is that `time` is slated to be part of Rust's standard
/// library, which definitely can't have any dependencies on `serde`. But `serde` can't implement
/// `Deserialize` and `Serialize` itself, because `time::PreciseTime` is opaque! A Catch-22. So I'm
/// duplicating the definition here.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub struct PreciseTime(u64);
impl PreciseTime {
pub fn now() -> PreciseTime {
PreciseTime(time::precise_time_ns())
}
pub fn to(&self, later: PreciseTime) -> Duration {
Duration::nanoseconds((later.0 - self.0) as i64)
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub struct WorkerId(pub u32);
|
start_stack: Option<Vec<()>>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct TimelineMarker {
|
random_line_split
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "devtools_traits"]
#![crate_type = "rlib"]
#![allow(non_snake_case)]
#![deny(unsafe_code)]
#[macro_use]
extern crate bitflags;
extern crate hyper;
extern crate ipc_channel;
extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate msg;
#[macro_use] extern crate serde;
extern crate servo_url;
extern crate time;
use hyper::header::Headers;
use hyper::method::Method;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::PipelineId;
use servo_url::ServoUrl;
use std::net::TcpStream;
use time::Duration;
use time::Tm;
// Information would be attached to NewGlobal to be received and show in devtools.
// Extend these fields if we need more information.
#[derive(Debug, Deserialize, Serialize)]
pub struct DevtoolsPageInfo {
pub title: String,
pub url: ServoUrl,
}
#[derive(Clone, Debug, Deserialize, MallocSizeOf, Serialize)]
pub struct CSSError {
pub filename: String,
pub line: u32,
pub column: u32,
pub msg: String
}
/// Messages to instruct the devtools server to update its known actors/state
/// according to changes in the browser.
#[derive(Debug)]
pub enum DevtoolsControlMsg {
/// Messages from threads in the chrome process (resource/constellation/devtools)
FromChrome(ChromeToDevtoolsControlMsg),
/// Messages from script threads
FromScript(ScriptToDevtoolsControlMsg),
}
/// Events that the devtools server must act upon.
#[derive(Debug)]
pub enum ChromeToDevtoolsControlMsg {
/// A new client has connected to the server.
AddClient(TcpStream),
/// The browser is shutting down.
ServerExitMsg,
/// A network event occurred (request, reply, etc.). The actor with the
/// provided name should be notified.
NetworkEvent(String, NetworkEvent),
}
#[derive(Debug, Deserialize, Serialize)]
/// Events that the devtools server must act upon.
pub enum ScriptToDevtoolsControlMsg {
/// A new global object was created, associated with a particular pipeline.
/// The means of communicating directly with it are provided.
NewGlobal((PipelineId, Option<WorkerId>),
IpcSender<DevtoolScriptControlMsg>,
DevtoolsPageInfo),
/// A particular page has invoked the console API.
ConsoleAPI(PipelineId, ConsoleMessage, Option<WorkerId>),
/// An animation frame with the given timestamp was processed in a script thread.
/// The actor with the provided name should be notified.
FramerateTick(String, f64),
/// Report a CSS parse error for the given pipeline
ReportCSSError(PipelineId, CSSError),
}
/// Serialized JS return values
/// TODO: generalize this beyond the EvaluateJS message?
#[derive(Debug, Deserialize, Serialize)]
pub enum EvaluateJSReply {
VoidValue,
NullValue,
BooleanValue(bool),
NumberValue(f64),
StringValue(String),
ActorValue { class: String, uuid: String },
}
#[derive(Debug, Deserialize, Serialize)]
pub struct AttrInfo {
pub namespace: String,
pub name: String,
pub value: String,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct NodeInfo {
pub uniqueId: String,
pub baseURI: String,
pub parent: String,
pub nodeType: u16,
pub namespaceURI: String,
pub nodeName: String,
pub numChildren: usize,
pub name: String,
pub publicId: String,
pub systemId: String,
pub attrs: Vec<AttrInfo>,
pub isDocumentElement: bool,
pub shortValue: String,
pub incompleteValue: bool,
}
pub struct StartedTimelineMarker {
name: String,
start_time: PreciseTime,
start_stack: Option<Vec<()>>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct TimelineMarker {
pub name: String,
pub start_time: PreciseTime,
pub start_stack: Option<Vec<()>>,
pub end_time: PreciseTime,
pub end_stack: Option<Vec<()>>,
}
#[derive(Clone, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub enum TimelineMarkerType {
Reflow,
DOMEvent,
}
/// The properties of a DOM node as computed by layout.
#[derive(Debug, Deserialize, Serialize)]
pub struct ComputedNodeLayout {
pub display: String,
pub position: String,
pub zIndex: String,
pub boxSizing: String,
pub autoMargins: AutoMargins,
pub marginTop: String,
pub marginRight: String,
pub marginBottom: String,
pub marginLeft: String,
pub borderTopWidth: String,
pub borderRightWidth: String,
pub borderBottomWidth: String,
pub borderLeftWidth: String,
pub paddingTop: String,
pub paddingRight: String,
pub paddingBottom: String,
pub paddingLeft: String,
pub width: f32,
pub height: f32,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct AutoMargins {
pub top: bool,
pub right: bool,
pub bottom: bool,
pub left: bool,
}
/// Messages to process in a particular script thread, as instructed by a devtools client.
/// TODO: better error handling, e.g. if pipeline id lookup fails?
#[derive(Debug, Deserialize, Serialize)]
pub enum DevtoolScriptControlMsg {
/// Evaluate a JS snippet in the context of the global for the given pipeline.
EvaluateJS(PipelineId, String, IpcSender<EvaluateJSReply>),
/// Retrieve the details of the root node (ie. the document) for the given pipeline.
GetRootNode(PipelineId, IpcSender<Option<NodeInfo>>),
/// Retrieve the details of the document element for the given pipeline.
GetDocumentElement(PipelineId, IpcSender<Option<NodeInfo>>),
/// Retrieve the details of the child nodes of the given node in the given pipeline.
GetChildren(PipelineId, String, IpcSender<Option<Vec<NodeInfo>>>),
/// Retrieve the computed layout properties of the given node in the given pipeline.
GetLayout(PipelineId, String, IpcSender<Option<ComputedNodeLayout>>),
/// Retrieve all stored console messages for the given pipeline.
GetCachedMessages(PipelineId, CachedConsoleMessageTypes, IpcSender<Vec<CachedConsoleMessage>>),
/// Update a given node's attributes with a list of modifications.
ModifyAttribute(PipelineId, String, Vec<Modification>),
/// Request live console messages for a given pipeline (true if desired, false otherwise).
WantsLiveNotifications(PipelineId, bool),
/// Request live notifications for a given set of timeline events for a given pipeline.
SetTimelineMarkers(PipelineId, Vec<TimelineMarkerType>, IpcSender<Option<TimelineMarker>>),
/// Withdraw request for live timeline notifications for a given pipeline.
DropTimelineMarkers(PipelineId, Vec<TimelineMarkerType>),
/// Request a callback directed at the given actor name from the next animation frame
/// executed in the given pipeline.
RequestAnimationFrame(PipelineId, String),
/// Direct the given pipeline to reload the current page.
Reload(PipelineId),
}
#[derive(Debug, Deserialize, Serialize)]
pub struct Modification {
pub attributeName: String,
pub newValue: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum LogLevel {
Log,
Debug,
Info,
Warn,
Error,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ConsoleMessage {
pub message: String,
pub logLevel: LogLevel,
pub filename: String,
pub lineNumber: usize,
pub columnNumber: usize,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub struct CachedConsoleMessageTypes: u8 {
const PAGE_ERROR = 1 << 0;
const CONSOLE_API = 1 << 1;
}
}
#[derive(Debug, Deserialize, Serialize)]
pub struct PageError {
#[serde(rename = "_type")]
pub type_: String,
pub errorMessage: String,
pub sourceName: String,
pub lineText: String,
pub lineNumber: u32,
pub columnNumber: u32,
pub category: String,
pub timeStamp: u64,
pub error: bool,
pub warning: bool,
pub exception: bool,
pub strict: bool,
pub private: bool,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct ConsoleAPI {
#[serde(rename = "_type")]
pub type_: String,
pub level: String,
pub filename: String,
pub lineNumber: u32,
pub functionName: String,
pub timeStamp: u64,
pub private: bool,
pub arguments: Vec<String>,
}
#[derive(Debug, Deserialize, Serialize)]
pub enum CachedConsoleMessage {
PageError(PageError),
ConsoleAPI(ConsoleAPI),
}
#[derive(Debug, PartialEq)]
pub struct HttpRequest {
pub url: ServoUrl,
pub method: Method,
pub headers: Headers,
pub body: Option<Vec<u8>>,
pub pipeline_id: PipelineId,
pub startedDateTime: Tm,
pub timeStamp: i64,
pub connect_time: u64,
pub send_time: u64,
pub is_xhr: bool,
}
#[derive(Debug, PartialEq)]
pub struct HttpResponse {
pub headers: Option<Headers>,
pub status: Option<(u16, Vec<u8>)>,
pub body: Option<Vec<u8>>,
pub pipeline_id: PipelineId,
}
#[derive(Debug)]
pub enum
|
{
HttpRequest(HttpRequest),
HttpResponse(HttpResponse),
}
impl TimelineMarker {
pub fn start(name: String) -> StartedTimelineMarker {
StartedTimelineMarker {
name: name,
start_time: PreciseTime::now(),
start_stack: None,
}
}
}
impl StartedTimelineMarker {
pub fn end(self) -> TimelineMarker {
TimelineMarker {
name: self.name,
start_time: self.start_time,
start_stack: self.start_stack,
end_time: PreciseTime::now(),
end_stack: None,
}
}
}
/// A replacement for `time::PreciseTime` that isn't opaque, so we can serialize it.
///
/// The reason why this doesn't go upstream is that `time` is slated to be part of Rust's standard
/// library, which definitely can't have any dependencies on `serde`. But `serde` can't implement
/// `Deserialize` and `Serialize` itself, because `time::PreciseTime` is opaque! A Catch-22. So I'm
/// duplicating the definition here.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub struct PreciseTime(u64);
impl PreciseTime {
pub fn now() -> PreciseTime {
PreciseTime(time::precise_time_ns())
}
pub fn to(&self, later: PreciseTime) -> Duration {
Duration::nanoseconds((later.0 - self.0) as i64)
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub struct WorkerId(pub u32);
|
NetworkEvent
|
identifier_name
|
uart.rs
|
/*
* MIT License
*
* Copyright (c) 2018 Andre Richter <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use core::ops;
use core::result::Result;
use cortex_a::asm;
use register::{mmio::*, register_bitfields};
use super::gpio;
use super::mbox;
// PL011 UART registers.
//
// Descriptions taken from
// https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf
register_bitfields! {
u32,
/// Flag Register
FR [
/// Transmit FIFO full. The meaning of this bit depends on the
/// state of the FEN bit in the UARTLCR_ LCRH Register. If the
/// FIFO is disabled, this bit is set when the transmit
/// holding register is full. If the FIFO is enabled, the TXFF
/// bit is set when the transmit FIFO is full.
TXFF OFFSET(5) NUMBITS(1) [],
/// Receive FIFO empty. The meaning of this bit depends on the
/// state of the FEN bit in the UARTLCR_H Register. If the
/// FIFO is disabled, this bit is set when the receive holding
/// register is empty. If the FIFO is enabled, the RXFE bit is
/// set when the receive FIFO is empty.
RXFE OFFSET(4) NUMBITS(1) []
],
/// Integer Baud rate divisor
IBRD [
/// Integer Baud rate divisor
IBRD OFFSET(0) NUMBITS(16) []
],
/// Fractional Baud rate divisor
FBRD [
/// Fractional Baud rate divisor
FBRD OFFSET(0) NUMBITS(6) []
],
/// Line Control register
LCRH [
/// Word length. These bits indicate the number of data bits
/// transmitted or received in a frame.
WLEN OFFSET(5) NUMBITS(2) [
FiveBit = 0b00,
SixBit = 0b01,
SevenBit = 0b10,
EightBit = 0b11
]
],
/// Control Register
CR [
/// Receive enable. If this bit is set to 1, the receive
/// section of the UART is enabled. Data reception occurs for
/// UART signals. When the UART is disabled in the middle of
/// reception, it completes the current character before
/// stopping.
RXE OFFSET(9) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Transmit enable. If this bit is set to 1, the transmit
/// section of the UART is enabled. Data transmission occurs
/// for UART signals. When the UART is disabled in the middle
/// of transmission, it completes the current character before
/// stopping.
TXE OFFSET(8) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// UART enable
UARTEN OFFSET(0) NUMBITS(1) [
/// If the UART is disabled in the middle of transmission
/// or reception, it completes the current character
/// before stopping.
Disabled = 0,
Enabled = 1
]
],
/// Interupt Clear Register
ICR [
/// Meta field for all pending interrupts
ALL OFFSET(0) NUMBITS(11) []
]
}
#[allow(non_snake_case)]
#[repr(C)]
pub struct RegisterBlock {
DR: ReadWrite<u32>,
__reserved_0: [u32; 5],
FR: ReadOnly<u32, FR::Register>,
__reserved_1: [u32; 2],
IBRD: WriteOnly<u32, IBRD::Register>,
FBRD: WriteOnly<u32, FBRD::Register>,
LCRH: WriteOnly<u32, LCRH::Register>,
CR: WriteOnly<u32, CR::Register>,
__reserved_2: [u32; 4],
ICR: WriteOnly<u32, ICR::Register>,
}
pub struct Uart {
base_addr: usize,
}
#[allow(unused)]
impl Uart {
pub const fn new(base_addr: usize) -> Self {
Uart { base_addr }
}
/// Returns a pointer to the register block
fn ptr(&self) -> *const RegisterBlock {
self.base_addr as *const _
}
///Set baud rate and characteristics (115200 8N1) and map to GPIO
pub fn init(&self, mbox: &mut mbox::Mbox, gpio: &gpio::GPIO) -> Result<(), &'static str> {
// turn off UART0
self.CR.set(0);
// set up clock for consistent divisor values
let mut data = [2, 4_000_000, 0];
mbox.property(mbox::Tag::SetClockRate, &mut data)
.map_err(|_| "failed to set uart clock rate")?;
// map UART0 to GPIO pins
gpio.GPFSEL1.modify(
gpio::GPFSEL1::FSEL14.val(gpio::GPFSEL::Alt0 as u32)
+ gpio::GPFSEL1::FSEL15.val(gpio::GPFSEL::Alt0 as u32),
);
gpio.GPPUD.write(gpio::GPPUD::PUD::Off);
self.wait_cycles(150);
gpio.GPPUDCLK0
.modify(gpio::GPREGSET0::P14::SET + gpio::GPREGSET0::P15::SET);
self.wait_cycles(150);
gpio.GPPUDCLK0.set(0);
self.ICR.write(ICR::ALL::CLEAR);
self.IBRD.write(IBRD::IBRD.val(2)); // Results in 115200 baud
self.FBRD.write(FBRD::FBRD.val(0xB));
self.LCRH.write(LCRH::WLEN::EightBit); // 8N1
self.CR
.write(CR::UARTEN::Enabled + CR::TXE::Enabled + CR::RXE::Enabled);
Ok(())
}
/// Receive a character
pub fn getc(&self) -> char {
// wait until something is in the buffer
loop {
if!self.FR.is_set(FR::RXFE) {
break;
}
asm::nop();
}
// read it and return
let mut ret = self.DR.get() as u8 as char;
// convert carrige return to newline
if ret == '\r' {
ret = '\n'
}
ret
}
/// Display a string
pub fn puts(&self, string: &str) {
for c in string.chars() {
// convert newline to carrige return + newline
if c == '\n' {
self.send('\r')
}
self.send(c);
}
}
/// Send a character
pub fn send(&self, c: char) {
// wait until we can send
loop {
if!self.FR.is_set(FR::TXFF) {
break;
}
asm::nop();
}
// write the character to the buffer
self.DR.set(c as u32);
}
fn wait_cycles(&self, cyc: u32) {
for _ in 0..cyc {
asm::nop();
}
}
}
impl ops::Deref for Uart {
type Target = RegisterBlock;
fn deref(&self) -> &Self::Target
|
}
|
{
unsafe { &*self.ptr() }
}
|
identifier_body
|
uart.rs
|
/*
* MIT License
*
* Copyright (c) 2018 Andre Richter <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use core::ops;
use core::result::Result;
use cortex_a::asm;
use register::{mmio::*, register_bitfields};
use super::gpio;
use super::mbox;
// PL011 UART registers.
//
// Descriptions taken from
// https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf
register_bitfields! {
u32,
/// Flag Register
FR [
/// Transmit FIFO full. The meaning of this bit depends on the
/// state of the FEN bit in the UARTLCR_ LCRH Register. If the
/// FIFO is disabled, this bit is set when the transmit
/// holding register is full. If the FIFO is enabled, the TXFF
/// bit is set when the transmit FIFO is full.
TXFF OFFSET(5) NUMBITS(1) [],
/// Receive FIFO empty. The meaning of this bit depends on the
/// state of the FEN bit in the UARTLCR_H Register. If the
/// FIFO is disabled, this bit is set when the receive holding
/// register is empty. If the FIFO is enabled, the RXFE bit is
/// set when the receive FIFO is empty.
RXFE OFFSET(4) NUMBITS(1) []
],
/// Integer Baud rate divisor
IBRD [
/// Integer Baud rate divisor
IBRD OFFSET(0) NUMBITS(16) []
],
/// Fractional Baud rate divisor
FBRD [
/// Fractional Baud rate divisor
FBRD OFFSET(0) NUMBITS(6) []
],
/// Line Control register
LCRH [
/// Word length. These bits indicate the number of data bits
/// transmitted or received in a frame.
WLEN OFFSET(5) NUMBITS(2) [
FiveBit = 0b00,
SixBit = 0b01,
SevenBit = 0b10,
EightBit = 0b11
]
],
/// Control Register
CR [
/// Receive enable. If this bit is set to 1, the receive
/// section of the UART is enabled. Data reception occurs for
/// UART signals. When the UART is disabled in the middle of
/// reception, it completes the current character before
/// stopping.
RXE OFFSET(9) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Transmit enable. If this bit is set to 1, the transmit
/// section of the UART is enabled. Data transmission occurs
/// for UART signals. When the UART is disabled in the middle
/// of transmission, it completes the current character before
/// stopping.
TXE OFFSET(8) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// UART enable
UARTEN OFFSET(0) NUMBITS(1) [
/// If the UART is disabled in the middle of transmission
/// or reception, it completes the current character
/// before stopping.
Disabled = 0,
Enabled = 1
]
],
/// Interupt Clear Register
ICR [
/// Meta field for all pending interrupts
ALL OFFSET(0) NUMBITS(11) []
]
}
#[allow(non_snake_case)]
#[repr(C)]
pub struct RegisterBlock {
DR: ReadWrite<u32>,
__reserved_0: [u32; 5],
FR: ReadOnly<u32, FR::Register>,
__reserved_1: [u32; 2],
IBRD: WriteOnly<u32, IBRD::Register>,
FBRD: WriteOnly<u32, FBRD::Register>,
LCRH: WriteOnly<u32, LCRH::Register>,
CR: WriteOnly<u32, CR::Register>,
__reserved_2: [u32; 4],
ICR: WriteOnly<u32, ICR::Register>,
}
pub struct Uart {
base_addr: usize,
}
#[allow(unused)]
impl Uart {
pub const fn new(base_addr: usize) -> Self {
Uart { base_addr }
}
/// Returns a pointer to the register block
fn ptr(&self) -> *const RegisterBlock {
self.base_addr as *const _
}
///Set baud rate and characteristics (115200 8N1) and map to GPIO
pub fn init(&self, mbox: &mut mbox::Mbox, gpio: &gpio::GPIO) -> Result<(), &'static str> {
// turn off UART0
self.CR.set(0);
// set up clock for consistent divisor values
let mut data = [2, 4_000_000, 0];
mbox.property(mbox::Tag::SetClockRate, &mut data)
.map_err(|_| "failed to set uart clock rate")?;
// map UART0 to GPIO pins
gpio.GPFSEL1.modify(
gpio::GPFSEL1::FSEL14.val(gpio::GPFSEL::Alt0 as u32)
+ gpio::GPFSEL1::FSEL15.val(gpio::GPFSEL::Alt0 as u32),
);
gpio.GPPUD.write(gpio::GPPUD::PUD::Off);
self.wait_cycles(150);
gpio.GPPUDCLK0
.modify(gpio::GPREGSET0::P14::SET + gpio::GPREGSET0::P15::SET);
self.wait_cycles(150);
gpio.GPPUDCLK0.set(0);
self.ICR.write(ICR::ALL::CLEAR);
self.IBRD.write(IBRD::IBRD.val(2)); // Results in 115200 baud
self.FBRD.write(FBRD::FBRD.val(0xB));
self.LCRH.write(LCRH::WLEN::EightBit); // 8N1
self.CR
.write(CR::UARTEN::Enabled + CR::TXE::Enabled + CR::RXE::Enabled);
Ok(())
}
/// Receive a character
pub fn getc(&self) -> char {
// wait until something is in the buffer
loop {
if!self.FR.is_set(FR::RXFE) {
break;
}
asm::nop();
}
// read it and return
let mut ret = self.DR.get() as u8 as char;
// convert carrige return to newline
if ret == '\r' {
ret = '\n'
}
ret
}
/// Display a string
pub fn puts(&self, string: &str) {
for c in string.chars() {
// convert newline to carrige return + newline
if c == '\n' {
self.send('\r')
}
self.send(c);
}
}
/// Send a character
pub fn send(&self, c: char) {
// wait until we can send
loop {
if!self.FR.is_set(FR::TXFF)
|
asm::nop();
}
// write the character to the buffer
self.DR.set(c as u32);
}
fn wait_cycles(&self, cyc: u32) {
for _ in 0..cyc {
asm::nop();
}
}
}
impl ops::Deref for Uart {
type Target = RegisterBlock;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr() }
}
}
|
{
break;
}
|
conditional_block
|
uart.rs
|
/*
* MIT License
*
* Copyright (c) 2018 Andre Richter <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use core::ops;
use core::result::Result;
use cortex_a::asm;
use register::{mmio::*, register_bitfields};
use super::gpio;
use super::mbox;
// PL011 UART registers.
//
// Descriptions taken from
// https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf
register_bitfields! {
u32,
/// Flag Register
FR [
/// Transmit FIFO full. The meaning of this bit depends on the
/// state of the FEN bit in the UARTLCR_ LCRH Register. If the
/// FIFO is disabled, this bit is set when the transmit
/// holding register is full. If the FIFO is enabled, the TXFF
/// bit is set when the transmit FIFO is full.
TXFF OFFSET(5) NUMBITS(1) [],
/// Receive FIFO empty. The meaning of this bit depends on the
/// state of the FEN bit in the UARTLCR_H Register. If the
/// FIFO is disabled, this bit is set when the receive holding
/// register is empty. If the FIFO is enabled, the RXFE bit is
/// set when the receive FIFO is empty.
RXFE OFFSET(4) NUMBITS(1) []
],
/// Integer Baud rate divisor
IBRD [
/// Integer Baud rate divisor
IBRD OFFSET(0) NUMBITS(16) []
],
/// Fractional Baud rate divisor
FBRD [
/// Fractional Baud rate divisor
FBRD OFFSET(0) NUMBITS(6) []
],
/// Line Control register
LCRH [
/// Word length. These bits indicate the number of data bits
/// transmitted or received in a frame.
WLEN OFFSET(5) NUMBITS(2) [
FiveBit = 0b00,
SixBit = 0b01,
SevenBit = 0b10,
EightBit = 0b11
]
],
/// Control Register
CR [
/// Receive enable. If this bit is set to 1, the receive
/// section of the UART is enabled. Data reception occurs for
/// UART signals. When the UART is disabled in the middle of
/// reception, it completes the current character before
/// stopping.
RXE OFFSET(9) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Transmit enable. If this bit is set to 1, the transmit
/// section of the UART is enabled. Data transmission occurs
/// for UART signals. When the UART is disabled in the middle
/// of transmission, it completes the current character before
/// stopping.
TXE OFFSET(8) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// UART enable
UARTEN OFFSET(0) NUMBITS(1) [
/// If the UART is disabled in the middle of transmission
/// or reception, it completes the current character
/// before stopping.
Disabled = 0,
Enabled = 1
]
],
/// Interupt Clear Register
ICR [
/// Meta field for all pending interrupts
ALL OFFSET(0) NUMBITS(11) []
]
}
#[allow(non_snake_case)]
#[repr(C)]
pub struct RegisterBlock {
DR: ReadWrite<u32>,
__reserved_0: [u32; 5],
FR: ReadOnly<u32, FR::Register>,
__reserved_1: [u32; 2],
IBRD: WriteOnly<u32, IBRD::Register>,
FBRD: WriteOnly<u32, FBRD::Register>,
LCRH: WriteOnly<u32, LCRH::Register>,
CR: WriteOnly<u32, CR::Register>,
__reserved_2: [u32; 4],
ICR: WriteOnly<u32, ICR::Register>,
}
pub struct Uart {
base_addr: usize,
}
#[allow(unused)]
impl Uart {
pub const fn new(base_addr: usize) -> Self {
Uart { base_addr }
|
/// Returns a pointer to the register block
fn ptr(&self) -> *const RegisterBlock {
self.base_addr as *const _
}
///Set baud rate and characteristics (115200 8N1) and map to GPIO
pub fn init(&self, mbox: &mut mbox::Mbox, gpio: &gpio::GPIO) -> Result<(), &'static str> {
// turn off UART0
self.CR.set(0);
// set up clock for consistent divisor values
let mut data = [2, 4_000_000, 0];
mbox.property(mbox::Tag::SetClockRate, &mut data)
.map_err(|_| "failed to set uart clock rate")?;
// map UART0 to GPIO pins
gpio.GPFSEL1.modify(
gpio::GPFSEL1::FSEL14.val(gpio::GPFSEL::Alt0 as u32)
+ gpio::GPFSEL1::FSEL15.val(gpio::GPFSEL::Alt0 as u32),
);
gpio.GPPUD.write(gpio::GPPUD::PUD::Off);
self.wait_cycles(150);
gpio.GPPUDCLK0
.modify(gpio::GPREGSET0::P14::SET + gpio::GPREGSET0::P15::SET);
self.wait_cycles(150);
gpio.GPPUDCLK0.set(0);
self.ICR.write(ICR::ALL::CLEAR);
self.IBRD.write(IBRD::IBRD.val(2)); // Results in 115200 baud
self.FBRD.write(FBRD::FBRD.val(0xB));
self.LCRH.write(LCRH::WLEN::EightBit); // 8N1
self.CR
.write(CR::UARTEN::Enabled + CR::TXE::Enabled + CR::RXE::Enabled);
Ok(())
}
/// Receive a character
pub fn getc(&self) -> char {
// wait until something is in the buffer
loop {
if!self.FR.is_set(FR::RXFE) {
break;
}
asm::nop();
}
// read it and return
let mut ret = self.DR.get() as u8 as char;
// convert carrige return to newline
if ret == '\r' {
ret = '\n'
}
ret
}
/// Display a string
pub fn puts(&self, string: &str) {
for c in string.chars() {
// convert newline to carrige return + newline
if c == '\n' {
self.send('\r')
}
self.send(c);
}
}
/// Send a character
pub fn send(&self, c: char) {
// wait until we can send
loop {
if!self.FR.is_set(FR::TXFF) {
break;
}
asm::nop();
}
// write the character to the buffer
self.DR.set(c as u32);
}
fn wait_cycles(&self, cyc: u32) {
for _ in 0..cyc {
asm::nop();
}
}
}
impl ops::Deref for Uart {
type Target = RegisterBlock;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr() }
}
}
|
}
|
random_line_split
|
uart.rs
|
/*
* MIT License
*
* Copyright (c) 2018 Andre Richter <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use core::ops;
use core::result::Result;
use cortex_a::asm;
use register::{mmio::*, register_bitfields};
use super::gpio;
use super::mbox;
// PL011 UART registers.
//
// Descriptions taken from
// https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf
register_bitfields! {
u32,
/// Flag Register
FR [
/// Transmit FIFO full. The meaning of this bit depends on the
/// state of the FEN bit in the UARTLCR_ LCRH Register. If the
/// FIFO is disabled, this bit is set when the transmit
/// holding register is full. If the FIFO is enabled, the TXFF
/// bit is set when the transmit FIFO is full.
TXFF OFFSET(5) NUMBITS(1) [],
/// Receive FIFO empty. The meaning of this bit depends on the
/// state of the FEN bit in the UARTLCR_H Register. If the
/// FIFO is disabled, this bit is set when the receive holding
/// register is empty. If the FIFO is enabled, the RXFE bit is
/// set when the receive FIFO is empty.
RXFE OFFSET(4) NUMBITS(1) []
],
/// Integer Baud rate divisor
IBRD [
/// Integer Baud rate divisor
IBRD OFFSET(0) NUMBITS(16) []
],
/// Fractional Baud rate divisor
FBRD [
/// Fractional Baud rate divisor
FBRD OFFSET(0) NUMBITS(6) []
],
/// Line Control register
LCRH [
/// Word length. These bits indicate the number of data bits
/// transmitted or received in a frame.
WLEN OFFSET(5) NUMBITS(2) [
FiveBit = 0b00,
SixBit = 0b01,
SevenBit = 0b10,
EightBit = 0b11
]
],
/// Control Register
CR [
/// Receive enable. If this bit is set to 1, the receive
/// section of the UART is enabled. Data reception occurs for
/// UART signals. When the UART is disabled in the middle of
/// reception, it completes the current character before
/// stopping.
RXE OFFSET(9) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Transmit enable. If this bit is set to 1, the transmit
/// section of the UART is enabled. Data transmission occurs
/// for UART signals. When the UART is disabled in the middle
/// of transmission, it completes the current character before
/// stopping.
TXE OFFSET(8) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// UART enable
UARTEN OFFSET(0) NUMBITS(1) [
/// If the UART is disabled in the middle of transmission
/// or reception, it completes the current character
/// before stopping.
Disabled = 0,
Enabled = 1
]
],
/// Interupt Clear Register
ICR [
/// Meta field for all pending interrupts
ALL OFFSET(0) NUMBITS(11) []
]
}
#[allow(non_snake_case)]
#[repr(C)]
pub struct
|
{
DR: ReadWrite<u32>,
__reserved_0: [u32; 5],
FR: ReadOnly<u32, FR::Register>,
__reserved_1: [u32; 2],
IBRD: WriteOnly<u32, IBRD::Register>,
FBRD: WriteOnly<u32, FBRD::Register>,
LCRH: WriteOnly<u32, LCRH::Register>,
CR: WriteOnly<u32, CR::Register>,
__reserved_2: [u32; 4],
ICR: WriteOnly<u32, ICR::Register>,
}
pub struct Uart {
base_addr: usize,
}
#[allow(unused)]
impl Uart {
pub const fn new(base_addr: usize) -> Self {
Uart { base_addr }
}
/// Returns a pointer to the register block
fn ptr(&self) -> *const RegisterBlock {
self.base_addr as *const _
}
///Set baud rate and characteristics (115200 8N1) and map to GPIO
pub fn init(&self, mbox: &mut mbox::Mbox, gpio: &gpio::GPIO) -> Result<(), &'static str> {
// turn off UART0
self.CR.set(0);
// set up clock for consistent divisor values
let mut data = [2, 4_000_000, 0];
mbox.property(mbox::Tag::SetClockRate, &mut data)
.map_err(|_| "failed to set uart clock rate")?;
// map UART0 to GPIO pins
gpio.GPFSEL1.modify(
gpio::GPFSEL1::FSEL14.val(gpio::GPFSEL::Alt0 as u32)
+ gpio::GPFSEL1::FSEL15.val(gpio::GPFSEL::Alt0 as u32),
);
gpio.GPPUD.write(gpio::GPPUD::PUD::Off);
self.wait_cycles(150);
gpio.GPPUDCLK0
.modify(gpio::GPREGSET0::P14::SET + gpio::GPREGSET0::P15::SET);
self.wait_cycles(150);
gpio.GPPUDCLK0.set(0);
self.ICR.write(ICR::ALL::CLEAR);
self.IBRD.write(IBRD::IBRD.val(2)); // Results in 115200 baud
self.FBRD.write(FBRD::FBRD.val(0xB));
self.LCRH.write(LCRH::WLEN::EightBit); // 8N1
self.CR
.write(CR::UARTEN::Enabled + CR::TXE::Enabled + CR::RXE::Enabled);
Ok(())
}
/// Receive a character
pub fn getc(&self) -> char {
// wait until something is in the buffer
loop {
if!self.FR.is_set(FR::RXFE) {
break;
}
asm::nop();
}
// read it and return
let mut ret = self.DR.get() as u8 as char;
// convert carrige return to newline
if ret == '\r' {
ret = '\n'
}
ret
}
/// Display a string
pub fn puts(&self, string: &str) {
for c in string.chars() {
// convert newline to carrige return + newline
if c == '\n' {
self.send('\r')
}
self.send(c);
}
}
/// Send a character
pub fn send(&self, c: char) {
// wait until we can send
loop {
if!self.FR.is_set(FR::TXFF) {
break;
}
asm::nop();
}
// write the character to the buffer
self.DR.set(c as u32);
}
fn wait_cycles(&self, cyc: u32) {
for _ in 0..cyc {
asm::nop();
}
}
}
impl ops::Deref for Uart {
type Target = RegisterBlock;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr() }
}
}
|
RegisterBlock
|
identifier_name
|
from_primitive_int.rs
|
use malachite_base::num::basic::traits::One;
use malachite_nz::natural::Natural;
use Rational;
macro_rules! impl_from_unsigned {
($t: ident) => {
impl From<$t> for Rational {
/// Converts a value to an `Integer`, where the value is of a primitive unsigned integer
/// type.
///
/// # Worst-case complexity
/// Constant time and additional memory.
///
/// # Examples
/// See the documentation of the `conversion::from_primitive_int` module.
#[inline]
fn from(u: $t) -> Rational {
Rational {
sign: true,
numerator: Natural::from(u),
denominator: Natural::ONE,
}
}
}
};
}
apply_to_unsigneds!(impl_from_unsigned);
macro_rules! impl_from_signed {
($t: ident) => {
impl From<$t> for Rational {
/// Converts a value to an `Integer`, where the value is of a primitive signed integer
/// type.
///
|
///
/// # Examples
/// See the documentation of the `conversion::from_primitive_int` module.
#[inline]
fn from(i: $t) -> Rational {
Rational {
sign: i >= 0,
numerator: Natural::from(i.unsigned_abs()),
denominator: Natural::ONE,
}
}
}
};
}
apply_to_signeds!(impl_from_signed);
|
/// # Worst-case complexity
/// Constant time and additional memory.
|
random_line_split
|
p069.rs
|
const N:usize = 1_000_000;
fn prime(p:&mut [usize]) {
for i in 2..N+1 {
if p[i]!= 0 {
continue ;
}
for j in i..N+1 {
if j*i > N {
break;
}
p[j*i] = i;
}
}
}
fn
|
(p:&[usize], v:usize) -> f32 {
let mut ps = Vec::new();
let mut vv = v;
while p[vv]!= 0 {
let pr = p[vv];
ps.push(pr);
while vv % pr == 0 {
vv/=pr;
}
}
if vv!= 1 {
ps.push(vv);
}
// print!("{} :",v);
// for i in &ps {
// print!("{} ",i);
// }
// println!("");
let mut cnt = v as i32;
for i in 1..(1<<ps.len()) {
let mut mulres = 1;
let mut bitcnt = 1;
for j in 0..ps.len() {
if i & (1<<j)!= 0 {
mulres *= ps[j];
bitcnt *= -1;
}
}
cnt += bitcnt*((v/mulres)as i32);
}
return v as f32 / cnt as f32;
}
fn main() {
let mut arr =[0 as usize;N+10];
prime(&mut arr);
let mut ans = (1,2.0);
for i in 2..N+1 {
let ret = phi_n(&arr, i);
if ret > ans.1 {
ans = (i,ret)
}
}
println!("{:?}",ans);
}
|
phi_n
|
identifier_name
|
p069.rs
|
const N:usize = 1_000_000;
fn prime(p:&mut [usize]) {
for i in 2..N+1 {
if p[i]!= 0 {
continue ;
}
for j in i..N+1 {
if j*i > N {
break;
}
p[j*i] = i;
}
}
}
fn phi_n(p:&[usize], v:usize) -> f32 {
let mut ps = Vec::new();
let mut vv = v;
while p[vv]!= 0 {
let pr = p[vv];
ps.push(pr);
while vv % pr == 0 {
vv/=pr;
}
}
if vv!= 1
|
// print!("{} :",v);
// for i in &ps {
// print!("{} ",i);
// }
// println!("");
let mut cnt = v as i32;
for i in 1..(1<<ps.len()) {
let mut mulres = 1;
let mut bitcnt = 1;
for j in 0..ps.len() {
if i & (1<<j)!= 0 {
mulres *= ps[j];
bitcnt *= -1;
}
}
cnt += bitcnt*((v/mulres)as i32);
}
return v as f32 / cnt as f32;
}
fn main() {
let mut arr =[0 as usize;N+10];
prime(&mut arr);
let mut ans = (1,2.0);
for i in 2..N+1 {
let ret = phi_n(&arr, i);
if ret > ans.1 {
ans = (i,ret)
}
}
println!("{:?}",ans);
}
|
{
ps.push(vv);
}
|
conditional_block
|
p069.rs
|
const N:usize = 1_000_000;
fn prime(p:&mut [usize]) {
for i in 2..N+1 {
if p[i]!= 0 {
continue ;
}
for j in i..N+1 {
if j*i > N {
break;
}
p[j*i] = i;
}
}
}
fn phi_n(p:&[usize], v:usize) -> f32 {
let mut ps = Vec::new();
let mut vv = v;
while p[vv]!= 0 {
let pr = p[vv];
ps.push(pr);
while vv % pr == 0 {
vv/=pr;
}
}
if vv!= 1 {
ps.push(vv);
}
// print!("{} :",v);
// for i in &ps {
// print!("{} ",i);
// }
// println!("");
let mut cnt = v as i32;
for i in 1..(1<<ps.len()) {
let mut mulres = 1;
let mut bitcnt = 1;
for j in 0..ps.len() {
if i & (1<<j)!= 0 {
mulres *= ps[j];
bitcnt *= -1;
}
}
cnt += bitcnt*((v/mulres)as i32);
}
return v as f32 / cnt as f32;
}
fn main() {
let mut arr =[0 as usize;N+10];
prime(&mut arr);
let mut ans = (1,2.0);
for i in 2..N+1 {
let ret = phi_n(&arr, i);
if ret > ans.1 {
|
ans = (i,ret)
}
}
println!("{:?}",ans);
}
|
random_line_split
|
|
p069.rs
|
const N:usize = 1_000_000;
fn prime(p:&mut [usize]) {
for i in 2..N+1 {
if p[i]!= 0 {
continue ;
}
for j in i..N+1 {
if j*i > N {
break;
}
p[j*i] = i;
}
}
}
fn phi_n(p:&[usize], v:usize) -> f32 {
let mut ps = Vec::new();
let mut vv = v;
while p[vv]!= 0 {
let pr = p[vv];
ps.push(pr);
while vv % pr == 0 {
vv/=pr;
}
}
if vv!= 1 {
ps.push(vv);
}
// print!("{} :",v);
// for i in &ps {
// print!("{} ",i);
// }
// println!("");
let mut cnt = v as i32;
for i in 1..(1<<ps.len()) {
let mut mulres = 1;
let mut bitcnt = 1;
for j in 0..ps.len() {
if i & (1<<j)!= 0 {
mulres *= ps[j];
bitcnt *= -1;
}
}
cnt += bitcnt*((v/mulres)as i32);
}
return v as f32 / cnt as f32;
}
fn main()
|
{
let mut arr =[0 as usize;N+10];
prime(&mut arr);
let mut ans = (1,2.0);
for i in 2..N+1 {
let ret = phi_n(&arr, i);
if ret > ans.1 {
ans = (i,ret)
}
}
println!("{:?}",ans);
}
|
identifier_body
|
|
log-knows-the-names-of-variants-in-std.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate debug;
#[deriving(Clone)]
enum foo {
a(uint),
b(String),
}
fn check_log<T>(exp: String, v: T) {
assert_eq!(exp, format!("{:?}", v));
}
pub fn main() {
let mut x = Some(a(22u));
let exp = "Some(a(22u))".to_string();
let act = format!("{:?}", x);
assert_eq!(act, exp);
check_log(exp, x);
|
check_log(exp, x);
}
|
x = None;
let exp = "None".to_string();
let act = format!("{:?}", x);
assert_eq!(act, exp);
|
random_line_split
|
log-knows-the-names-of-variants-in-std.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate debug;
#[deriving(Clone)]
enum foo {
a(uint),
b(String),
}
fn
|
<T>(exp: String, v: T) {
assert_eq!(exp, format!("{:?}", v));
}
pub fn main() {
let mut x = Some(a(22u));
let exp = "Some(a(22u))".to_string();
let act = format!("{:?}", x);
assert_eq!(act, exp);
check_log(exp, x);
x = None;
let exp = "None".to_string();
let act = format!("{:?}", x);
assert_eq!(act, exp);
check_log(exp, x);
}
|
check_log
|
identifier_name
|
liveness-use-after-send.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate debug;
fn send<T:Send>(ch: _chan<T>, data: T) {
println!("{:?}", ch);
println!("{:?}", data);
fail!();
}
struct _chan<T>(int);
// Tests that "log(debug, message);" is flagged as using
// message after the send deinitializes it
fn test00_start(ch: _chan<Box<int>>, message: Box<int>, _count: Box<int>) {
send(ch, message);
println!("{:?}", message); //~ ERROR use of moved value: `message`
}
fn main() { fail!(); }
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
random_line_split
|
liveness-use-after-send.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate debug;
fn send<T:Send>(ch: _chan<T>, data: T) {
println!("{:?}", ch);
println!("{:?}", data);
fail!();
}
struct _chan<T>(int);
// Tests that "log(debug, message);" is flagged as using
// message after the send deinitializes it
fn test00_start(ch: _chan<Box<int>>, message: Box<int>, _count: Box<int>)
|
fn main() { fail!(); }
|
{
send(ch, message);
println!("{:?}", message); //~ ERROR use of moved value: `message`
}
|
identifier_body
|
liveness-use-after-send.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate debug;
fn
|
<T:Send>(ch: _chan<T>, data: T) {
println!("{:?}", ch);
println!("{:?}", data);
fail!();
}
struct _chan<T>(int);
// Tests that "log(debug, message);" is flagged as using
// message after the send deinitializes it
fn test00_start(ch: _chan<Box<int>>, message: Box<int>, _count: Box<int>) {
send(ch, message);
println!("{:?}", message); //~ ERROR use of moved value: `message`
}
fn main() { fail!(); }
|
send
|
identifier_name
|
htmlframesetelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLFrameSetElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLFrameSetElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLFrameSetElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLFrameSetElement {
pub htmlelement: HTMLElement
}
impl HTMLFrameSetElementDerived for EventTarget {
fn is_htmlframesetelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLFrameSetElementTypeId))
}
}
impl HTMLFrameSetElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLFrameSetElement {
HTMLFrameSetElement {
htmlelement: HTMLElement::new_inherited(HTMLFrameSetElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLFrameSetElement> {
let element = HTMLFrameSetElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLFrameSetElementBinding::Wrap)
}
}
pub trait HTMLFrameSetElementMethods {
}
impl Reflectable for HTMLFrameSetElement {
fn reflector<'a>(&'a self) -> &'a Reflector
|
}
|
{
self.htmlelement.reflector()
}
|
identifier_body
|
htmlframesetelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLFrameSetElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLFrameSetElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLFrameSetElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLFrameSetElement {
pub htmlelement: HTMLElement
}
impl HTMLFrameSetElementDerived for EventTarget {
fn is_htmlframesetelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLFrameSetElementTypeId))
}
}
impl HTMLFrameSetElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLFrameSetElement {
HTMLFrameSetElement {
htmlelement: HTMLElement::new_inherited(HTMLFrameSetElementTypeId, localName, document)
}
}
pub fn
|
(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLFrameSetElement> {
let element = HTMLFrameSetElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLFrameSetElementBinding::Wrap)
}
}
pub trait HTMLFrameSetElementMethods {
}
impl Reflectable for HTMLFrameSetElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
new
|
identifier_name
|
htmlframesetelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLFrameSetElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLFrameSetElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLFrameSetElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
|
#[deriving(Encodable)]
pub struct HTMLFrameSetElement {
pub htmlelement: HTMLElement
}
impl HTMLFrameSetElementDerived for EventTarget {
fn is_htmlframesetelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLFrameSetElementTypeId))
}
}
impl HTMLFrameSetElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLFrameSetElement {
HTMLFrameSetElement {
htmlelement: HTMLElement::new_inherited(HTMLFrameSetElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLFrameSetElement> {
let element = HTMLFrameSetElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLFrameSetElementBinding::Wrap)
}
}
pub trait HTMLFrameSetElementMethods {
}
impl Reflectable for HTMLFrameSetElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
|
random_line_split
|
size_hint.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::option::Iter;
// #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub enum Option<T> {
// /// No value
// #[stable(feature = "rust1", since = "1.0.0")]
// None,
// /// Some value `T`
// #[stable(feature = "rust1", since = "1.0.0")]
// Some(T)
// }
// pub struct Iter<'a, A: 'a> { inner: Item<&'a A> }
|
// type Item = &'a A;
//
// #[inline]
// fn next(&mut self) -> Option<&'a A> { self.inner.next() }
// #[inline]
// fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
// }
type T = u32;
#[test]
fn size_hint_test1() {
let x: Option<T> = Some::<T>(7);
let iter: Iter<T> = x.iter();
let (lower, upper): (usize, Option<usize>) = iter.size_hint();
assert_eq!(lower, 1);
assert_eq!(upper, Some::<usize>(1));
}
#[test]
fn size_hint_test2() {
let x: Option<T> = None::<T>;
let iter: Iter<T> = x.iter();
let (lower, upper): (usize, Option<usize>) = iter.size_hint();
assert_eq!(lower, 0);
assert_eq!(upper, Some::<usize>(0));
}
}
|
// impl<'a, A> Iterator for Iter<'a, A> {
|
random_line_split
|
size_hint.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::option::Iter;
// #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub enum Option<T> {
// /// No value
// #[stable(feature = "rust1", since = "1.0.0")]
// None,
// /// Some value `T`
// #[stable(feature = "rust1", since = "1.0.0")]
// Some(T)
// }
// pub struct Iter<'a, A: 'a> { inner: Item<&'a A> }
// impl<'a, A> Iterator for Iter<'a, A> {
// type Item = &'a A;
//
// #[inline]
// fn next(&mut self) -> Option<&'a A> { self.inner.next() }
// #[inline]
// fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
// }
type T = u32;
#[test]
fn size_hint_test1()
|
#[test]
fn size_hint_test2() {
let x: Option<T> = None::<T>;
let iter: Iter<T> = x.iter();
let (lower, upper): (usize, Option<usize>) = iter.size_hint();
assert_eq!(lower, 0);
assert_eq!(upper, Some::<usize>(0));
}
}
|
{
let x: Option<T> = Some::<T>(7);
let iter: Iter<T> = x.iter();
let (lower, upper): (usize, Option<usize>) = iter.size_hint();
assert_eq!(lower, 1);
assert_eq!(upper, Some::<usize>(1));
}
|
identifier_body
|
size_hint.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::option::Iter;
// #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
// #[stable(feature = "rust1", since = "1.0.0")]
// pub enum Option<T> {
// /// No value
// #[stable(feature = "rust1", since = "1.0.0")]
// None,
// /// Some value `T`
// #[stable(feature = "rust1", since = "1.0.0")]
// Some(T)
// }
// pub struct Iter<'a, A: 'a> { inner: Item<&'a A> }
// impl<'a, A> Iterator for Iter<'a, A> {
// type Item = &'a A;
//
// #[inline]
// fn next(&mut self) -> Option<&'a A> { self.inner.next() }
// #[inline]
// fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
// }
type T = u32;
#[test]
fn size_hint_test1() {
let x: Option<T> = Some::<T>(7);
let iter: Iter<T> = x.iter();
let (lower, upper): (usize, Option<usize>) = iter.size_hint();
assert_eq!(lower, 1);
assert_eq!(upper, Some::<usize>(1));
}
#[test]
fn
|
() {
let x: Option<T> = None::<T>;
let iter: Iter<T> = x.iter();
let (lower, upper): (usize, Option<usize>) = iter.size_hint();
assert_eq!(lower, 0);
assert_eq!(upper, Some::<usize>(0));
}
}
|
size_hint_test2
|
identifier_name
|
common.rs
|
use core::fmt;
pub const SYS_DEBUG: usize = 0;
// Linux compatible
pub const SYS_BRK: usize = 45;
pub const SYS_CHDIR: usize = 12;
pub const SYS_CLOSE: usize = 6;
pub const SYS_CLONE: usize = 120;
pub const CLONE_VM: usize = 0x100;
pub const CLONE_FS: usize = 0x200;
pub const CLONE_FILES: usize = 0x400;
pub const CLONE_VFORK: usize = 0x4000;
pub const SYS_CLOCK_GETTIME: usize = 265;
pub const CLOCK_REALTIME: usize = 1;
pub const CLOCK_MONOTONIC: usize = 4;
pub const SYS_DUP: usize = 41;
pub const SYS_EXECVE: usize = 11;
pub const SYS_SPAWNVE: usize = 1011; //Extra to fix scheme execve
pub const SYS_EXIT: usize = 1;
pub const SYS_FPATH: usize = 3001;
pub const SYS_FSTAT: usize = 28;
pub const SYS_FSYNC: usize = 118;
pub const SYS_FTRUNCATE: usize = 93;
pub const SYS_GETPID: usize = 20;
pub const SYS_LINK: usize = 9;
pub const SYS_LSEEK: usize = 19;
pub const SEEK_SET: usize = 0;
pub const SEEK_CUR: usize = 1;
pub const SEEK_END: usize = 2;
pub const SYS_MKDIR: usize = 39;
pub const SYS_NANOSLEEP: usize = 162;
pub const SYS_OPEN: usize = 5;
pub const O_RDONLY: usize = 0;
pub const O_WRONLY: usize = 1;
pub const O_RDWR: usize = 2;
pub const O_NONBLOCK: usize = 4;
pub const O_APPEND: usize = 8;
pub const O_SHLOCK: usize = 0x10;
pub const O_EXLOCK: usize = 0x20;
pub const O_ASYNC: usize = 0x40;
pub const O_FSYNC: usize = 0x80;
pub const O_CREAT: usize = 0x200;
pub const O_TRUNC: usize = 0x400;
pub const O_EXCL: usize = 0x800;
pub const SYS_READ: usize = 3;
pub const SYS_UNLINK: usize = 10;
pub const SYS_WAITPID: usize = 7;
pub const SYS_WRITE: usize = 4;
pub const SYS_YIELD: usize = 158;
// Rust Memory
pub const SYS_ALLOC: usize = 1000;
pub const SYS_REALLOC: usize = 1001;
pub const SYS_REALLOC_INPLACE: usize = 1002;
pub const SYS_UNALLOC: usize = 1003;
// Structures
#[repr(packed)]
pub struct TimeSpec {
pub tv_sec: i64,
pub tv_nsec: i32,
}
// Errors
pub struct SysError {
errno: isize,
}
impl SysError {
pub fn new(errno: isize) -> SysError {
SysError {
errno: errno
}
}
pub fn mux(result: Result<usize, SysError>) -> usize {
match result {
Ok(value) => value,
Err(error) => -error.errno as usize
}
}
pub fn demux(value: usize) -> Result<usize, SysError> {
let errno = -(value as isize);
if errno >= 1 && errno < STR_ERROR.len() as isize {
Err(SysError::new(errno))
} else {
Ok(value)
}
}
pub fn text(&self) -> &str {
if let Some(description) = STR_ERROR.get(self.errno as usize) {
description
} else {
"Unknown Error"
}
}
}
impl fmt::Debug for SysError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.text())
}
}
impl fmt::Display for SysError {
fn
|
(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.text())
}
}
pub const EPERM: isize = 1; /* Operation not permitted */
pub const ENOENT: isize = 2; /* No such file or directory */
pub const ESRCH: isize = 3; /* No such process */
pub const EINTR: isize = 4; /* Interrupted system call */
pub const EIO: isize = 5; /* I/O error */
pub const ENXIO: isize = 6; /* No such device or address */
pub const E2BIG: isize = 7; /* Argument list too long */
pub const ENOEXEC: isize = 8; /* Exec format error */
pub const EBADF: isize = 9; /* Bad file number */
pub const ECHILD: isize = 10; /* No child processes */
pub const EAGAIN: isize = 11; /* Try again */
pub const ENOMEM: isize = 12; /* Out of memory */
pub const EACCES: isize = 13; /* Permission denied */
pub const EFAULT: isize = 14; /* Bad address */
pub const ENOTBLK: isize = 15; /* Block device required */
pub const EBUSY: isize = 16; /* Device or resource busy */
pub const EEXIST: isize = 17; /* File exists */
pub const EXDEV: isize = 18; /* Cross-device link */
pub const ENODEV: isize = 19; /* No such device */
pub const ENOTDIR: isize = 20; /* Not a directory */
pub const EISDIR: isize = 21; /* Is a directory */
pub const EINVAL: isize = 22; /* Invalid argument */
pub const ENFILE: isize = 23; /* File table overflow */
pub const EMFILE: isize = 24; /* Too many open files */
pub const ENOTTY: isize = 25; /* Not a typewriter */
pub const ETXTBSY: isize = 26; /* Text file busy */
pub const EFBIG: isize = 27; /* File too large */
pub const ENOSPC: isize = 28; /* No space left on device */
pub const ESPIPE: isize = 29; /* Illegal seek */
pub const EROFS: isize = 30; /* Read-only file system */
pub const EMLINK: isize = 31; /* Too many links */
pub const EPIPE: isize = 32; /* Broken pipe */
pub const EDOM: isize = 33; /* Math argument out of domain of func */
pub const ERANGE: isize = 34; /* Math result not representable */
pub const EDEADLK: isize = 35; /* Resource deadlock would occur */
pub const ENAMETOOLONG: isize = 36; /* File name too long */
pub const ENOLCK: isize = 37; /* No record locks available */
pub const ENOSYS: isize = 38; /* Function not implemented */
pub const ENOTEMPTY: isize = 39; /* Directory not empty */
pub const ELOOP: isize = 40; /* Too many symbolic links encountered */
pub const EWOULDBLOCK: isize = 41; /* Operation would block */
pub const ENOMSG: isize = 42; /* No message of desired type */
pub const EIDRM: isize = 43; /* Identifier removed */
pub const ECHRNG: isize = 44; /* Channel number out of range */
pub const EL2NSYNC: isize = 45; /* Level 2 not synchronized */
pub const EL3HLT: isize = 46; /* Level 3 halted */
pub const EL3RST: isize = 47; /* Level 3 reset */
pub const ELNRNG: isize = 48; /* Link number out of range */
pub const EUNATCH: isize = 49; /* Protocol driver not attached */
pub const ENOCSI: isize = 50; /* No CSI structure available */
pub const EL2HLT: isize = 51; /* Level 2 halted */
pub const EBADE: isize = 52; /* Invalid exchange */
pub const EBADR: isize = 53; /* Invalid request descriptor */
pub const EXFULL: isize = 54; /* Exchange full */
pub const ENOANO: isize = 55; /* No anode */
pub const EBADRQC: isize = 56; /* Invalid request code */
pub const EBADSLT: isize = 57; /* Invalid slot */
pub const EDEADLOCK: isize = 58; /* Resource deadlock would occur */
pub const EBFONT: isize = 59; /* Bad font file format */
pub const ENOSTR: isize = 60; /* Device not a stream */
pub const ENODATA: isize = 61; /* No data available */
pub const ETIME: isize = 62; /* Timer expired */
pub const ENOSR: isize = 63; /* Out of streams resources */
pub const ENONET: isize = 64; /* Machine is not on the network */
pub const ENOPKG: isize = 65; /* Package not installed */
pub const EREMOTE: isize = 66; /* Object is remote */
pub const ENOLINK: isize = 67; /* Link has been severed */
pub const EADV: isize = 68; /* Advertise error */
pub const ESRMNT: isize = 69; /* Srmount error */
pub const ECOMM: isize = 70; /* Communication error on send */
pub const EPROTO: isize = 71; /* Protocol error */
pub const EMULTIHOP: isize = 72; /* Multihop attempted */
pub const EDOTDOT: isize = 73; /* RFS specific error */
pub const EBADMSG: isize = 74; /* Not a data message */
pub const EOVERFLOW: isize = 75; /* Value too large for defined data type */
pub const ENOTUNIQ: isize = 76; /* Name not unique on network */
pub const EBADFD: isize = 77; /* File descriptor in bad state */
pub const EREMCHG: isize = 78; /* Remote address changed */
pub const ELIBACC: isize = 79; /* Can not access a needed shared library */
pub const ELIBBAD: isize = 80; /* Accessing a corrupted shared library */
pub const ELIBSCN: isize = 81; /*.lib section in a.out corrupted */
pub const ELIBMAX: isize = 82; /* Attempting to link in too many shared libraries */
pub const ELIBEXEC: isize = 83; /* Cannot exec a shared library directly */
pub const EILSEQ: isize = 84; /* Illegal byte sequence */
pub const ERESTART: isize = 85; /* Interrupted system call should be restarted */
pub const ESTRPIPE: isize = 86; /* Streams pipe error */
pub const EUSERS: isize = 87; /* Too many users */
pub const ENOTSOCK: isize = 88; /* Socket operation on non-socket */
pub const EDESTADDRREQ: isize = 89; /* Destination address required */
pub const EMSGSIZE: isize = 90; /* Message too long */
pub const EPROTOTYPE: isize = 91; /* Protocol wrong type for socket */
pub const ENOPROTOOPT: isize = 92; /* Protocol not available */
pub const EPROTONOSUPPORT: isize = 93; /* Protocol not supported */
pub const ESOCKTNOSUPPORT: isize = 94; /* Socket type not supported */
pub const EOPNOTSUPP: isize = 95; /* Operation not supported on transport endpoint */
pub const EPFNOSUPPORT: isize = 96; /* Protocol family not supported */
pub const EAFNOSUPPORT: isize = 97; /* Address family not supported by protocol */
pub const EADDRINUSE: isize = 98; /* Address already in use */
pub const EADDRNOTAVAIL: isize = 99; /* Cannot assign requested address */
pub const ENETDOWN: isize = 100; /* Network is down */
pub const ENETUNREACH: isize = 101; /* Network is unreachable */
pub const ENETRESET: isize = 102; /* Network dropped connection because of reset */
pub const ECONNABORTED: isize = 103; /* Software caused connection abort */
pub const ECONNRESET: isize = 104; /* Connection reset by peer */
pub const ENOBUFS: isize = 105; /* No buffer space available */
pub const EISCONN: isize = 106; /* Transport endpoint is already connected */
pub const ENOTCONN: isize = 107; /* Transport endpoint is not connected */
pub const ESHUTDOWN: isize = 108; /* Cannot send after transport endpoint shutdown */
pub const ETOOMANYREFS: isize = 109; /* Too many references: cannot splice */
pub const ETIMEDOUT: isize = 110; /* Connection timed out */
pub const ECONNREFUSED: isize = 111; /* Connection refused */
pub const EHOSTDOWN: isize = 112; /* Host is down */
pub const EHOSTUNREACH: isize = 113; /* No route to host */
pub const EALREADY: isize = 114; /* Operation already in progress */
pub const EINPROGRESS: isize = 115; /* Operation now in progress */
pub const ESTALE: isize = 116; /* Stale NFS file handle */
pub const EUCLEAN: isize = 117; /* Structure needs cleaning */
pub const ENOTNAM: isize = 118; /* Not a XENIX named type file */
pub const ENAVAIL: isize = 119; /* No XENIX semaphores available */
pub const EISNAM: isize = 120; /* Is a named type file */
pub const EREMOTEIO: isize = 121; /* Remote I/O error */
pub const EDQUOT: isize = 122; /* Quota exceeded */
pub const ENOMEDIUM: isize = 123; /* No medium found */
pub const EMEDIUMTYPE: isize = 124; /* Wrong medium type */
pub const ECANCELED: isize = 125; /* Operation Canceled */
pub const ENOKEY: isize = 126; /* Required key not available */
pub const EKEYEXPIRED: isize = 127; /* Key has expired */
pub const EKEYREVOKED: isize = 128; /* Key has been revoked */
pub const EKEYREJECTED: isize = 129; /* Key was rejected by service */
pub const EOWNERDEAD: isize = 130; /* Owner died */
pub const ENOTRECOVERABLE: isize = 131; /* State not recoverable */
pub const STR_ERROR: [&'static str; 132] = [
"Success",
"Operation not permitted",
"No such file or directory",
"No such process",
"Interrupted system call",
"I/O error",
"No such device or address",
"Argument list too long",
"Exec format error",
"Bad file number",
"No child processes",
"Try again",
"Out of memory",
"Permission denied",
"Bad address",
"Block device required",
"Device or resource busy",
"File exists",
"Cross-device link",
"No such device",
"Not a directory",
"Is a directory",
"Invalid argument",
"File table overflow",
"Too many open files",
"Not a typewriter",
"Text file busy",
"File too large",
"No space left on device",
"Illegal seek",
"Read-only file system",
"Too many links",
"Broken pipe",
"Math argument out of domain of func",
"Math result not representable",
"Resource deadlock would occur",
"File name too long",
"No record locks available",
"Function not implemented",
"Directory not empty",
"Too many symbolic links encountered",
"Operation would block",
"No message of desired type",
"Identifier removed",
"Channel number out of range",
"Level 2 not synchronized",
"Level 3 halted",
"Level 3 reset",
"Link number out of range",
"Protocol driver not attached",
"No CSI structure available",
"Level 2 halted",
"Invalid exchange",
"Invalid request descriptor",
"Exchange full",
"No anode",
"Invalid request code",
"Invalid slot",
"Resource deadlock would occur",
"Bad font file format",
"Device not a stream",
"No data available",
"Timer expired",
"Out of streams resources",
"Machine is not on the network",
"Package not installed",
"Object is remote",
"Link has been severed",
"Advertise error",
"Srmount error",
"Communication error on send",
"Protocol error",
"Multihop attempted",
"RFS specific error",
"Not a data message",
"Value too large for defined data type",
"Name not unique on network",
"File descriptor in bad state",
"Remote address changed",
"Can not access a needed shared library",
"Accessing a corrupted shared library",
".lib section in a.out corrupted",
"Attempting to link in too many shared libraries",
"Cannot exec a shared library directly",
"Illegal byte sequence",
"Interrupted system call should be restarted",
"Streams pipe error",
"Too many users",
"Socket operation on non-socket",
"Destination address required",
"Message too long",
"Protocol wrong type for socket",
"Protocol not available",
"Protocol not supported",
"Socket type not supported",
"Operation not supported on transport endpoint",
"Protocol family not supported",
"Address family not supported by protocol",
"Address already in use",
"Cannot assign requested address",
"Network is down",
"Network is unreachable",
"Network dropped connection because of reset",
"Software caused connection abort",
"Connection reset by peer",
"No buffer space available",
"Transport endpoint is already connected",
"Transport endpoint is not connected",
"Cannot send after transport endpoint shutdown",
"Too many references: cannot splice",
"Connection timed out",
"Connection refused",
"Host is down",
"No route to host",
"Operation already in progress",
"Operation now in progress",
"Stale NFS file handle",
"Structure needs cleaning",
"Not a XENIX named type file",
"No XENIX semaphores available",
"Is a named type file",
"Remote I/O error",
"Quota exceeded",
"No medium found",
"Wrong medium type",
"Operation Canceled",
"Required key not available",
"Key has expired",
"Key has been revoked",
"Key was rejected by service",
"Owner died",
"State not recoverable",
];
|
fmt
|
identifier_name
|
common.rs
|
use core::fmt;
pub const SYS_DEBUG: usize = 0;
// Linux compatible
pub const SYS_BRK: usize = 45;
pub const SYS_CHDIR: usize = 12;
pub const SYS_CLOSE: usize = 6;
pub const SYS_CLONE: usize = 120;
pub const CLONE_VM: usize = 0x100;
pub const CLONE_FS: usize = 0x200;
pub const CLONE_FILES: usize = 0x400;
pub const CLONE_VFORK: usize = 0x4000;
pub const SYS_CLOCK_GETTIME: usize = 265;
pub const CLOCK_REALTIME: usize = 1;
pub const CLOCK_MONOTONIC: usize = 4;
pub const SYS_DUP: usize = 41;
pub const SYS_EXECVE: usize = 11;
pub const SYS_SPAWNVE: usize = 1011; //Extra to fix scheme execve
pub const SYS_EXIT: usize = 1;
pub const SYS_FPATH: usize = 3001;
pub const SYS_FSTAT: usize = 28;
pub const SYS_FSYNC: usize = 118;
pub const SYS_FTRUNCATE: usize = 93;
pub const SYS_GETPID: usize = 20;
pub const SYS_LINK: usize = 9;
pub const SYS_LSEEK: usize = 19;
pub const SEEK_SET: usize = 0;
pub const SEEK_CUR: usize = 1;
pub const SEEK_END: usize = 2;
pub const SYS_MKDIR: usize = 39;
pub const SYS_NANOSLEEP: usize = 162;
pub const SYS_OPEN: usize = 5;
pub const O_RDONLY: usize = 0;
pub const O_WRONLY: usize = 1;
pub const O_RDWR: usize = 2;
pub const O_NONBLOCK: usize = 4;
pub const O_APPEND: usize = 8;
pub const O_SHLOCK: usize = 0x10;
pub const O_EXLOCK: usize = 0x20;
pub const O_ASYNC: usize = 0x40;
pub const O_FSYNC: usize = 0x80;
pub const O_CREAT: usize = 0x200;
pub const O_TRUNC: usize = 0x400;
pub const O_EXCL: usize = 0x800;
pub const SYS_READ: usize = 3;
pub const SYS_UNLINK: usize = 10;
pub const SYS_WAITPID: usize = 7;
pub const SYS_WRITE: usize = 4;
pub const SYS_YIELD: usize = 158;
// Rust Memory
pub const SYS_ALLOC: usize = 1000;
pub const SYS_REALLOC: usize = 1001;
pub const SYS_REALLOC_INPLACE: usize = 1002;
pub const SYS_UNALLOC: usize = 1003;
// Structures
#[repr(packed)]
pub struct TimeSpec {
pub tv_sec: i64,
pub tv_nsec: i32,
}
// Errors
pub struct SysError {
errno: isize,
}
impl SysError {
pub fn new(errno: isize) -> SysError {
SysError {
errno: errno
}
}
pub fn mux(result: Result<usize, SysError>) -> usize {
match result {
Ok(value) => value,
Err(error) => -error.errno as usize
}
}
pub fn demux(value: usize) -> Result<usize, SysError> {
let errno = -(value as isize);
if errno >= 1 && errno < STR_ERROR.len() as isize {
Err(SysError::new(errno))
} else {
Ok(value)
}
}
pub fn text(&self) -> &str {
if let Some(description) = STR_ERROR.get(self.errno as usize) {
description
} else {
"Unknown Error"
}
}
}
impl fmt::Debug for SysError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.text())
}
}
impl fmt::Display for SysError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.text())
}
}
pub const EPERM: isize = 1; /* Operation not permitted */
pub const ENOENT: isize = 2; /* No such file or directory */
pub const ESRCH: isize = 3; /* No such process */
pub const EINTR: isize = 4; /* Interrupted system call */
pub const EIO: isize = 5; /* I/O error */
pub const ENXIO: isize = 6; /* No such device or address */
pub const E2BIG: isize = 7; /* Argument list too long */
pub const ENOEXEC: isize = 8; /* Exec format error */
pub const EBADF: isize = 9; /* Bad file number */
pub const ECHILD: isize = 10; /* No child processes */
pub const EAGAIN: isize = 11; /* Try again */
pub const ENOMEM: isize = 12; /* Out of memory */
pub const EACCES: isize = 13; /* Permission denied */
pub const EFAULT: isize = 14; /* Bad address */
pub const ENOTBLK: isize = 15; /* Block device required */
pub const EBUSY: isize = 16; /* Device or resource busy */
pub const EEXIST: isize = 17; /* File exists */
pub const EXDEV: isize = 18; /* Cross-device link */
pub const ENODEV: isize = 19; /* No such device */
pub const ENOTDIR: isize = 20; /* Not a directory */
pub const EISDIR: isize = 21; /* Is a directory */
pub const EINVAL: isize = 22; /* Invalid argument */
pub const ENFILE: isize = 23; /* File table overflow */
pub const EMFILE: isize = 24; /* Too many open files */
pub const ENOTTY: isize = 25; /* Not a typewriter */
pub const ETXTBSY: isize = 26; /* Text file busy */
pub const EFBIG: isize = 27; /* File too large */
pub const ENOSPC: isize = 28; /* No space left on device */
pub const ESPIPE: isize = 29; /* Illegal seek */
pub const EROFS: isize = 30; /* Read-only file system */
pub const EMLINK: isize = 31; /* Too many links */
pub const EPIPE: isize = 32; /* Broken pipe */
pub const EDOM: isize = 33; /* Math argument out of domain of func */
pub const ERANGE: isize = 34; /* Math result not representable */
pub const EDEADLK: isize = 35; /* Resource deadlock would occur */
pub const ENAMETOOLONG: isize = 36; /* File name too long */
pub const ENOLCK: isize = 37; /* No record locks available */
pub const ENOSYS: isize = 38; /* Function not implemented */
pub const ENOTEMPTY: isize = 39; /* Directory not empty */
pub const ELOOP: isize = 40; /* Too many symbolic links encountered */
pub const EWOULDBLOCK: isize = 41; /* Operation would block */
pub const ENOMSG: isize = 42; /* No message of desired type */
pub const EIDRM: isize = 43; /* Identifier removed */
pub const ECHRNG: isize = 44; /* Channel number out of range */
pub const EL2NSYNC: isize = 45; /* Level 2 not synchronized */
pub const EL3HLT: isize = 46; /* Level 3 halted */
pub const EL3RST: isize = 47; /* Level 3 reset */
pub const ELNRNG: isize = 48; /* Link number out of range */
pub const EUNATCH: isize = 49; /* Protocol driver not attached */
pub const ENOCSI: isize = 50; /* No CSI structure available */
pub const EL2HLT: isize = 51; /* Level 2 halted */
pub const EBADE: isize = 52; /* Invalid exchange */
pub const EBADR: isize = 53; /* Invalid request descriptor */
pub const EXFULL: isize = 54; /* Exchange full */
pub const ENOANO: isize = 55; /* No anode */
pub const EBADRQC: isize = 56; /* Invalid request code */
pub const EBADSLT: isize = 57; /* Invalid slot */
pub const EDEADLOCK: isize = 58; /* Resource deadlock would occur */
pub const EBFONT: isize = 59; /* Bad font file format */
pub const ENOSTR: isize = 60; /* Device not a stream */
pub const ENODATA: isize = 61; /* No data available */
pub const ETIME: isize = 62; /* Timer expired */
pub const ENOSR: isize = 63; /* Out of streams resources */
pub const ENONET: isize = 64; /* Machine is not on the network */
pub const ENOPKG: isize = 65; /* Package not installed */
pub const EREMOTE: isize = 66; /* Object is remote */
pub const ENOLINK: isize = 67; /* Link has been severed */
pub const EADV: isize = 68; /* Advertise error */
pub const ESRMNT: isize = 69; /* Srmount error */
pub const ECOMM: isize = 70; /* Communication error on send */
pub const EPROTO: isize = 71; /* Protocol error */
pub const EMULTIHOP: isize = 72; /* Multihop attempted */
pub const EDOTDOT: isize = 73; /* RFS specific error */
pub const EBADMSG: isize = 74; /* Not a data message */
pub const EOVERFLOW: isize = 75; /* Value too large for defined data type */
pub const ENOTUNIQ: isize = 76; /* Name not unique on network */
pub const EBADFD: isize = 77; /* File descriptor in bad state */
pub const EREMCHG: isize = 78; /* Remote address changed */
pub const ELIBACC: isize = 79; /* Can not access a needed shared library */
pub const ELIBBAD: isize = 80; /* Accessing a corrupted shared library */
pub const ELIBSCN: isize = 81; /*.lib section in a.out corrupted */
pub const ELIBMAX: isize = 82; /* Attempting to link in too many shared libraries */
pub const ELIBEXEC: isize = 83; /* Cannot exec a shared library directly */
pub const EILSEQ: isize = 84; /* Illegal byte sequence */
pub const ERESTART: isize = 85; /* Interrupted system call should be restarted */
pub const ESTRPIPE: isize = 86; /* Streams pipe error */
pub const EUSERS: isize = 87; /* Too many users */
pub const ENOTSOCK: isize = 88; /* Socket operation on non-socket */
pub const EDESTADDRREQ: isize = 89; /* Destination address required */
pub const EMSGSIZE: isize = 90; /* Message too long */
pub const EPROTOTYPE: isize = 91; /* Protocol wrong type for socket */
pub const ENOPROTOOPT: isize = 92; /* Protocol not available */
pub const EPROTONOSUPPORT: isize = 93; /* Protocol not supported */
pub const ESOCKTNOSUPPORT: isize = 94; /* Socket type not supported */
pub const EOPNOTSUPP: isize = 95; /* Operation not supported on transport endpoint */
pub const EPFNOSUPPORT: isize = 96; /* Protocol family not supported */
pub const EAFNOSUPPORT: isize = 97; /* Address family not supported by protocol */
pub const EADDRINUSE: isize = 98; /* Address already in use */
pub const EADDRNOTAVAIL: isize = 99; /* Cannot assign requested address */
pub const ENETDOWN: isize = 100; /* Network is down */
pub const ENETUNREACH: isize = 101; /* Network is unreachable */
pub const ENETRESET: isize = 102; /* Network dropped connection because of reset */
pub const ECONNABORTED: isize = 103; /* Software caused connection abort */
pub const ECONNRESET: isize = 104; /* Connection reset by peer */
pub const ENOBUFS: isize = 105; /* No buffer space available */
pub const EISCONN: isize = 106; /* Transport endpoint is already connected */
pub const ENOTCONN: isize = 107; /* Transport endpoint is not connected */
pub const ESHUTDOWN: isize = 108; /* Cannot send after transport endpoint shutdown */
pub const ETOOMANYREFS: isize = 109; /* Too many references: cannot splice */
pub const ETIMEDOUT: isize = 110; /* Connection timed out */
pub const ECONNREFUSED: isize = 111; /* Connection refused */
pub const EHOSTDOWN: isize = 112; /* Host is down */
pub const EHOSTUNREACH: isize = 113; /* No route to host */
pub const EALREADY: isize = 114; /* Operation already in progress */
pub const EINPROGRESS: isize = 115; /* Operation now in progress */
pub const ESTALE: isize = 116; /* Stale NFS file handle */
pub const EUCLEAN: isize = 117; /* Structure needs cleaning */
pub const ENOTNAM: isize = 118; /* Not a XENIX named type file */
pub const ENAVAIL: isize = 119; /* No XENIX semaphores available */
pub const EISNAM: isize = 120; /* Is a named type file */
pub const EREMOTEIO: isize = 121; /* Remote I/O error */
pub const EDQUOT: isize = 122; /* Quota exceeded */
pub const ENOMEDIUM: isize = 123; /* No medium found */
|
pub const ENOKEY: isize = 126; /* Required key not available */
pub const EKEYEXPIRED: isize = 127; /* Key has expired */
pub const EKEYREVOKED: isize = 128; /* Key has been revoked */
pub const EKEYREJECTED: isize = 129; /* Key was rejected by service */
pub const EOWNERDEAD: isize = 130; /* Owner died */
pub const ENOTRECOVERABLE: isize = 131; /* State not recoverable */
pub const STR_ERROR: [&'static str; 132] = [
"Success",
"Operation not permitted",
"No such file or directory",
"No such process",
"Interrupted system call",
"I/O error",
"No such device or address",
"Argument list too long",
"Exec format error",
"Bad file number",
"No child processes",
"Try again",
"Out of memory",
"Permission denied",
"Bad address",
"Block device required",
"Device or resource busy",
"File exists",
"Cross-device link",
"No such device",
"Not a directory",
"Is a directory",
"Invalid argument",
"File table overflow",
"Too many open files",
"Not a typewriter",
"Text file busy",
"File too large",
"No space left on device",
"Illegal seek",
"Read-only file system",
"Too many links",
"Broken pipe",
"Math argument out of domain of func",
"Math result not representable",
"Resource deadlock would occur",
"File name too long",
"No record locks available",
"Function not implemented",
"Directory not empty",
"Too many symbolic links encountered",
"Operation would block",
"No message of desired type",
"Identifier removed",
"Channel number out of range",
"Level 2 not synchronized",
"Level 3 halted",
"Level 3 reset",
"Link number out of range",
"Protocol driver not attached",
"No CSI structure available",
"Level 2 halted",
"Invalid exchange",
"Invalid request descriptor",
"Exchange full",
"No anode",
"Invalid request code",
"Invalid slot",
"Resource deadlock would occur",
"Bad font file format",
"Device not a stream",
"No data available",
"Timer expired",
"Out of streams resources",
"Machine is not on the network",
"Package not installed",
"Object is remote",
"Link has been severed",
"Advertise error",
"Srmount error",
"Communication error on send",
"Protocol error",
"Multihop attempted",
"RFS specific error",
"Not a data message",
"Value too large for defined data type",
"Name not unique on network",
"File descriptor in bad state",
"Remote address changed",
"Can not access a needed shared library",
"Accessing a corrupted shared library",
".lib section in a.out corrupted",
"Attempting to link in too many shared libraries",
"Cannot exec a shared library directly",
"Illegal byte sequence",
"Interrupted system call should be restarted",
"Streams pipe error",
"Too many users",
"Socket operation on non-socket",
"Destination address required",
"Message too long",
"Protocol wrong type for socket",
"Protocol not available",
"Protocol not supported",
"Socket type not supported",
"Operation not supported on transport endpoint",
"Protocol family not supported",
"Address family not supported by protocol",
"Address already in use",
"Cannot assign requested address",
"Network is down",
"Network is unreachable",
"Network dropped connection because of reset",
"Software caused connection abort",
"Connection reset by peer",
"No buffer space available",
"Transport endpoint is already connected",
"Transport endpoint is not connected",
"Cannot send after transport endpoint shutdown",
"Too many references: cannot splice",
"Connection timed out",
"Connection refused",
"Host is down",
"No route to host",
"Operation already in progress",
"Operation now in progress",
"Stale NFS file handle",
"Structure needs cleaning",
"Not a XENIX named type file",
"No XENIX semaphores available",
"Is a named type file",
"Remote I/O error",
"Quota exceeded",
"No medium found",
"Wrong medium type",
"Operation Canceled",
"Required key not available",
"Key has expired",
"Key has been revoked",
"Key was rejected by service",
"Owner died",
"State not recoverable",
];
|
pub const EMEDIUMTYPE: isize = 124; /* Wrong medium type */
pub const ECANCELED: isize = 125; /* Operation Canceled */
|
random_line_split
|
common.rs
|
use core::fmt;
pub const SYS_DEBUG: usize = 0;
// Linux compatible
pub const SYS_BRK: usize = 45;
pub const SYS_CHDIR: usize = 12;
pub const SYS_CLOSE: usize = 6;
pub const SYS_CLONE: usize = 120;
pub const CLONE_VM: usize = 0x100;
pub const CLONE_FS: usize = 0x200;
pub const CLONE_FILES: usize = 0x400;
pub const CLONE_VFORK: usize = 0x4000;
pub const SYS_CLOCK_GETTIME: usize = 265;
pub const CLOCK_REALTIME: usize = 1;
pub const CLOCK_MONOTONIC: usize = 4;
pub const SYS_DUP: usize = 41;
pub const SYS_EXECVE: usize = 11;
pub const SYS_SPAWNVE: usize = 1011; //Extra to fix scheme execve
pub const SYS_EXIT: usize = 1;
pub const SYS_FPATH: usize = 3001;
pub const SYS_FSTAT: usize = 28;
pub const SYS_FSYNC: usize = 118;
pub const SYS_FTRUNCATE: usize = 93;
pub const SYS_GETPID: usize = 20;
pub const SYS_LINK: usize = 9;
pub const SYS_LSEEK: usize = 19;
pub const SEEK_SET: usize = 0;
pub const SEEK_CUR: usize = 1;
pub const SEEK_END: usize = 2;
pub const SYS_MKDIR: usize = 39;
pub const SYS_NANOSLEEP: usize = 162;
pub const SYS_OPEN: usize = 5;
pub const O_RDONLY: usize = 0;
pub const O_WRONLY: usize = 1;
pub const O_RDWR: usize = 2;
pub const O_NONBLOCK: usize = 4;
pub const O_APPEND: usize = 8;
pub const O_SHLOCK: usize = 0x10;
pub const O_EXLOCK: usize = 0x20;
pub const O_ASYNC: usize = 0x40;
pub const O_FSYNC: usize = 0x80;
pub const O_CREAT: usize = 0x200;
pub const O_TRUNC: usize = 0x400;
pub const O_EXCL: usize = 0x800;
pub const SYS_READ: usize = 3;
pub const SYS_UNLINK: usize = 10;
pub const SYS_WAITPID: usize = 7;
pub const SYS_WRITE: usize = 4;
pub const SYS_YIELD: usize = 158;
// Rust Memory
pub const SYS_ALLOC: usize = 1000;
pub const SYS_REALLOC: usize = 1001;
pub const SYS_REALLOC_INPLACE: usize = 1002;
pub const SYS_UNALLOC: usize = 1003;
// Structures
#[repr(packed)]
pub struct TimeSpec {
pub tv_sec: i64,
pub tv_nsec: i32,
}
// Errors
pub struct SysError {
errno: isize,
}
impl SysError {
pub fn new(errno: isize) -> SysError {
SysError {
errno: errno
}
}
pub fn mux(result: Result<usize, SysError>) -> usize {
match result {
Ok(value) => value,
Err(error) => -error.errno as usize
}
}
pub fn demux(value: usize) -> Result<usize, SysError> {
let errno = -(value as isize);
if errno >= 1 && errno < STR_ERROR.len() as isize {
Err(SysError::new(errno))
} else {
Ok(value)
}
}
pub fn text(&self) -> &str {
if let Some(description) = STR_ERROR.get(self.errno as usize) {
description
} else {
"Unknown Error"
}
}
}
impl fmt::Debug for SysError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error>
|
}
impl fmt::Display for SysError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.text())
}
}
pub const EPERM: isize = 1; /* Operation not permitted */
pub const ENOENT: isize = 2; /* No such file or directory */
pub const ESRCH: isize = 3; /* No such process */
pub const EINTR: isize = 4; /* Interrupted system call */
pub const EIO: isize = 5; /* I/O error */
pub const ENXIO: isize = 6; /* No such device or address */
pub const E2BIG: isize = 7; /* Argument list too long */
pub const ENOEXEC: isize = 8; /* Exec format error */
pub const EBADF: isize = 9; /* Bad file number */
pub const ECHILD: isize = 10; /* No child processes */
pub const EAGAIN: isize = 11; /* Try again */
pub const ENOMEM: isize = 12; /* Out of memory */
pub const EACCES: isize = 13; /* Permission denied */
pub const EFAULT: isize = 14; /* Bad address */
pub const ENOTBLK: isize = 15; /* Block device required */
pub const EBUSY: isize = 16; /* Device or resource busy */
pub const EEXIST: isize = 17; /* File exists */
pub const EXDEV: isize = 18; /* Cross-device link */
pub const ENODEV: isize = 19; /* No such device */
pub const ENOTDIR: isize = 20; /* Not a directory */
pub const EISDIR: isize = 21; /* Is a directory */
pub const EINVAL: isize = 22; /* Invalid argument */
pub const ENFILE: isize = 23; /* File table overflow */
pub const EMFILE: isize = 24; /* Too many open files */
pub const ENOTTY: isize = 25; /* Not a typewriter */
pub const ETXTBSY: isize = 26; /* Text file busy */
pub const EFBIG: isize = 27; /* File too large */
pub const ENOSPC: isize = 28; /* No space left on device */
pub const ESPIPE: isize = 29; /* Illegal seek */
pub const EROFS: isize = 30; /* Read-only file system */
pub const EMLINK: isize = 31; /* Too many links */
pub const EPIPE: isize = 32; /* Broken pipe */
pub const EDOM: isize = 33; /* Math argument out of domain of func */
pub const ERANGE: isize = 34; /* Math result not representable */
pub const EDEADLK: isize = 35; /* Resource deadlock would occur */
pub const ENAMETOOLONG: isize = 36; /* File name too long */
pub const ENOLCK: isize = 37; /* No record locks available */
pub const ENOSYS: isize = 38; /* Function not implemented */
pub const ENOTEMPTY: isize = 39; /* Directory not empty */
pub const ELOOP: isize = 40; /* Too many symbolic links encountered */
pub const EWOULDBLOCK: isize = 41; /* Operation would block */
pub const ENOMSG: isize = 42; /* No message of desired type */
pub const EIDRM: isize = 43; /* Identifier removed */
pub const ECHRNG: isize = 44; /* Channel number out of range */
pub const EL2NSYNC: isize = 45; /* Level 2 not synchronized */
pub const EL3HLT: isize = 46; /* Level 3 halted */
pub const EL3RST: isize = 47; /* Level 3 reset */
pub const ELNRNG: isize = 48; /* Link number out of range */
pub const EUNATCH: isize = 49; /* Protocol driver not attached */
pub const ENOCSI: isize = 50; /* No CSI structure available */
pub const EL2HLT: isize = 51; /* Level 2 halted */
pub const EBADE: isize = 52; /* Invalid exchange */
pub const EBADR: isize = 53; /* Invalid request descriptor */
pub const EXFULL: isize = 54; /* Exchange full */
pub const ENOANO: isize = 55; /* No anode */
pub const EBADRQC: isize = 56; /* Invalid request code */
pub const EBADSLT: isize = 57; /* Invalid slot */
pub const EDEADLOCK: isize = 58; /* Resource deadlock would occur */
pub const EBFONT: isize = 59; /* Bad font file format */
pub const ENOSTR: isize = 60; /* Device not a stream */
pub const ENODATA: isize = 61; /* No data available */
pub const ETIME: isize = 62; /* Timer expired */
pub const ENOSR: isize = 63; /* Out of streams resources */
pub const ENONET: isize = 64; /* Machine is not on the network */
pub const ENOPKG: isize = 65; /* Package not installed */
pub const EREMOTE: isize = 66; /* Object is remote */
pub const ENOLINK: isize = 67; /* Link has been severed */
pub const EADV: isize = 68; /* Advertise error */
pub const ESRMNT: isize = 69; /* Srmount error */
pub const ECOMM: isize = 70; /* Communication error on send */
pub const EPROTO: isize = 71; /* Protocol error */
pub const EMULTIHOP: isize = 72; /* Multihop attempted */
pub const EDOTDOT: isize = 73; /* RFS specific error */
pub const EBADMSG: isize = 74; /* Not a data message */
pub const EOVERFLOW: isize = 75; /* Value too large for defined data type */
pub const ENOTUNIQ: isize = 76; /* Name not unique on network */
pub const EBADFD: isize = 77; /* File descriptor in bad state */
pub const EREMCHG: isize = 78; /* Remote address changed */
pub const ELIBACC: isize = 79; /* Can not access a needed shared library */
pub const ELIBBAD: isize = 80; /* Accessing a corrupted shared library */
pub const ELIBSCN: isize = 81; /*.lib section in a.out corrupted */
pub const ELIBMAX: isize = 82; /* Attempting to link in too many shared libraries */
pub const ELIBEXEC: isize = 83; /* Cannot exec a shared library directly */
pub const EILSEQ: isize = 84; /* Illegal byte sequence */
pub const ERESTART: isize = 85; /* Interrupted system call should be restarted */
pub const ESTRPIPE: isize = 86; /* Streams pipe error */
pub const EUSERS: isize = 87; /* Too many users */
pub const ENOTSOCK: isize = 88; /* Socket operation on non-socket */
pub const EDESTADDRREQ: isize = 89; /* Destination address required */
pub const EMSGSIZE: isize = 90; /* Message too long */
pub const EPROTOTYPE: isize = 91; /* Protocol wrong type for socket */
pub const ENOPROTOOPT: isize = 92; /* Protocol not available */
pub const EPROTONOSUPPORT: isize = 93; /* Protocol not supported */
pub const ESOCKTNOSUPPORT: isize = 94; /* Socket type not supported */
pub const EOPNOTSUPP: isize = 95; /* Operation not supported on transport endpoint */
pub const EPFNOSUPPORT: isize = 96; /* Protocol family not supported */
pub const EAFNOSUPPORT: isize = 97; /* Address family not supported by protocol */
pub const EADDRINUSE: isize = 98; /* Address already in use */
pub const EADDRNOTAVAIL: isize = 99; /* Cannot assign requested address */
pub const ENETDOWN: isize = 100; /* Network is down */
pub const ENETUNREACH: isize = 101; /* Network is unreachable */
pub const ENETRESET: isize = 102; /* Network dropped connection because of reset */
pub const ECONNABORTED: isize = 103; /* Software caused connection abort */
pub const ECONNRESET: isize = 104; /* Connection reset by peer */
pub const ENOBUFS: isize = 105; /* No buffer space available */
pub const EISCONN: isize = 106; /* Transport endpoint is already connected */
pub const ENOTCONN: isize = 107; /* Transport endpoint is not connected */
pub const ESHUTDOWN: isize = 108; /* Cannot send after transport endpoint shutdown */
pub const ETOOMANYREFS: isize = 109; /* Too many references: cannot splice */
pub const ETIMEDOUT: isize = 110; /* Connection timed out */
pub const ECONNREFUSED: isize = 111; /* Connection refused */
pub const EHOSTDOWN: isize = 112; /* Host is down */
pub const EHOSTUNREACH: isize = 113; /* No route to host */
pub const EALREADY: isize = 114; /* Operation already in progress */
pub const EINPROGRESS: isize = 115; /* Operation now in progress */
pub const ESTALE: isize = 116; /* Stale NFS file handle */
pub const EUCLEAN: isize = 117; /* Structure needs cleaning */
pub const ENOTNAM: isize = 118; /* Not a XENIX named type file */
pub const ENAVAIL: isize = 119; /* No XENIX semaphores available */
pub const EISNAM: isize = 120; /* Is a named type file */
pub const EREMOTEIO: isize = 121; /* Remote I/O error */
pub const EDQUOT: isize = 122; /* Quota exceeded */
pub const ENOMEDIUM: isize = 123; /* No medium found */
pub const EMEDIUMTYPE: isize = 124; /* Wrong medium type */
pub const ECANCELED: isize = 125; /* Operation Canceled */
pub const ENOKEY: isize = 126; /* Required key not available */
pub const EKEYEXPIRED: isize = 127; /* Key has expired */
pub const EKEYREVOKED: isize = 128; /* Key has been revoked */
pub const EKEYREJECTED: isize = 129; /* Key was rejected by service */
pub const EOWNERDEAD: isize = 130; /* Owner died */
pub const ENOTRECOVERABLE: isize = 131; /* State not recoverable */
pub const STR_ERROR: [&'static str; 132] = [
"Success",
"Operation not permitted",
"No such file or directory",
"No such process",
"Interrupted system call",
"I/O error",
"No such device or address",
"Argument list too long",
"Exec format error",
"Bad file number",
"No child processes",
"Try again",
"Out of memory",
"Permission denied",
"Bad address",
"Block device required",
"Device or resource busy",
"File exists",
"Cross-device link",
"No such device",
"Not a directory",
"Is a directory",
"Invalid argument",
"File table overflow",
"Too many open files",
"Not a typewriter",
"Text file busy",
"File too large",
"No space left on device",
"Illegal seek",
"Read-only file system",
"Too many links",
"Broken pipe",
"Math argument out of domain of func",
"Math result not representable",
"Resource deadlock would occur",
"File name too long",
"No record locks available",
"Function not implemented",
"Directory not empty",
"Too many symbolic links encountered",
"Operation would block",
"No message of desired type",
"Identifier removed",
"Channel number out of range",
"Level 2 not synchronized",
"Level 3 halted",
"Level 3 reset",
"Link number out of range",
"Protocol driver not attached",
"No CSI structure available",
"Level 2 halted",
"Invalid exchange",
"Invalid request descriptor",
"Exchange full",
"No anode",
"Invalid request code",
"Invalid slot",
"Resource deadlock would occur",
"Bad font file format",
"Device not a stream",
"No data available",
"Timer expired",
"Out of streams resources",
"Machine is not on the network",
"Package not installed",
"Object is remote",
"Link has been severed",
"Advertise error",
"Srmount error",
"Communication error on send",
"Protocol error",
"Multihop attempted",
"RFS specific error",
"Not a data message",
"Value too large for defined data type",
"Name not unique on network",
"File descriptor in bad state",
"Remote address changed",
"Can not access a needed shared library",
"Accessing a corrupted shared library",
".lib section in a.out corrupted",
"Attempting to link in too many shared libraries",
"Cannot exec a shared library directly",
"Illegal byte sequence",
"Interrupted system call should be restarted",
"Streams pipe error",
"Too many users",
"Socket operation on non-socket",
"Destination address required",
"Message too long",
"Protocol wrong type for socket",
"Protocol not available",
"Protocol not supported",
"Socket type not supported",
"Operation not supported on transport endpoint",
"Protocol family not supported",
"Address family not supported by protocol",
"Address already in use",
"Cannot assign requested address",
"Network is down",
"Network is unreachable",
"Network dropped connection because of reset",
"Software caused connection abort",
"Connection reset by peer",
"No buffer space available",
"Transport endpoint is already connected",
"Transport endpoint is not connected",
"Cannot send after transport endpoint shutdown",
"Too many references: cannot splice",
"Connection timed out",
"Connection refused",
"Host is down",
"No route to host",
"Operation already in progress",
"Operation now in progress",
"Stale NFS file handle",
"Structure needs cleaning",
"Not a XENIX named type file",
"No XENIX semaphores available",
"Is a named type file",
"Remote I/O error",
"Quota exceeded",
"No medium found",
"Wrong medium type",
"Operation Canceled",
"Required key not available",
"Key has expired",
"Key has been revoked",
"Key was rejected by service",
"Owner died",
"State not recoverable",
];
|
{
f.write_str(self.text())
}
|
identifier_body
|
vm.rs
|
use crate::{builder, ffi, util, Value};
use std;
use std::fmt;
use libc;
use std::sync::Mutex;
/// A Ruby virtual machine.
pub struct VM;
/// We only want to be able to have one `VM` at a time.
static mut VM_EXISTS: bool = false;
lazy_static! {
static ref ACTIVE_VM: Mutex<VM> = Mutex::new(VM::new().expect("failed to create Ruby VM"));
}
// TODO:
// Implement hooked variables (rb_define_hooked_variable)
// Allows a callback to get/set variable value
#[derive(PartialEq)]
/// A Ruby error
pub enum ErrorKind
{
/// An internal VM error.
VM(String),
/// An exception was thrown.
Exception(Value),
}
impl VM
{
/// Gets the active VM.
pub fn get() -> &'static Mutex<VM> {
&ACTIVE_VM
}
/// Creates a new Ruby VM.
pub fn new() -> Result<Self, ErrorKind> {
unsafe {
if VM_EXISTS {
Err(ErrorKind::VM("can only have one Ruby VM at a time".to_owned()))
} else {
ffi::ruby_init();
VM_EXISTS = true;
Ok(VM)
}
}
}
/// Evaluates a line of code.
pub fn eval(&mut self, code: &str) -> Result<Value, ErrorKind> {
self.eval_advanced(code, false)
}
/// Evaluates a line of code in a sandbox.
///
/// Any variables defined will not be saved.
pub fn eval_sandbox(&mut self, code: &str) -> Result<Value, ErrorKind> {
self.eval_advanced(code, true)
}
/// `require`s a file.
pub fn require(&self, file_name: &str) -> Value {
Value::from(unsafe { ffi::rb_require(util::c_string(file_name).as_ptr()) })
}
/// Creates a new class.
pub fn class<S>(&mut self, name: S) -> builder::Class
where S: Into<String> {
builder::Class::new(name)
}
/// Creates a new module.
pub fn module<S>(&mut self, name: S) -> builder::Module
where S: Into<String> {
builder::Module::new(name)
}
/// Sets the value of a global variable or creates a new one.
pub fn set_global(&self, name: &str, value: Value) -> Value {
Value::from(unsafe { ffi::rb_gv_set(util::c_string(name).as_ptr(), value.0) })
}
/// Gets the value of a global variable.
pub fn get_global(&self, name: &str) -> Value {
Value::from(unsafe { ffi::rb_gv_get(util::c_string(name).as_ptr()) })
}
/// Sets a global constant.
pub fn set_global_const(&self, name: &str, value: Value) {
unsafe { ffi::rb_define_global_const(util::c_string(name).as_ptr(), value.0) }
}
/// Defines a global function.
pub fn define_global_function(&self, name: &str, f: *mut extern fn() -> Value, arg_count: u8) -> Value {
unsafe {
Value::from(ffi::rb_define_global_function(
util::c_string(name).as_ptr(),
f as *mut _,
arg_count as libc::c_int,
))
}
}
/// Gets the current receiver (can be `nil`).
pub fn current_receiver(&self) -> Value {
unsafe { Value::from(ffi::rb_current_receiver()) }
}
/// Raises an object and a message.
pub fn raise(&self, value: Value, message: &str) ->! {
unsafe { ffi::rb_raise(value.0, util::c_string(message).as_ptr()) }
}
/// Raises a fatal error.
pub fn fatal(&self, message: &str) ->! {
unsafe { ffi::rb_fatal(util::c_string(message).as_ptr()) }
}
/// Raises a bug.
pub fn bug(&self, message: &str) ->! {
unsafe { ffi::rb_bug(util::c_string(message).as_ptr()) }
}
/// Logs a Ruby warning.
pub fn warning(&self, message: &str) {
unsafe { ffi::rb_warning(util::c_string(message).as_ptr()) }
}
/// Prints Ruby version info to stdout.
pub fn show_ruby_version(&self) { unsafe { ffi::ruby_show_version() } }
/// Prints Ruby copyright info to stdout.
pub fn show_ruby_copyright(&self) { unsafe { ffi::ruby_show_copyright() } }
/// Sets the script name.
/// Essentially the same as `$0 = name`.
pub fn set_script_name(&self, name: &str) {
unsafe { ffi::ruby_script(util::c_string(name).as_ptr()) }
}
/// Gets the currently raised exception and clears it.
pub fn consume_exception(&mut self) -> Value {
let exception = self.current_exception();
unsafe { ffi::rb_set_errinfo(ffi::Qnil) };
exception
}
/// Gets the currently raised exception exception.
///
/// Can be `nil`.
pub fn current_exception(&self) -> Value {
Value::from(unsafe { ffi::rb_errinfo() })
}
fn eval_advanced(&mut self, code: &str, sandbox: bool) -> Result<Value, ErrorKind> {
let mut state: libc::c_int = 0;
let eval_fn = if sandbox
|
else {
ffi::rb_eval_string_wrap
};
let result = unsafe { eval_fn(util::c_string(code).as_ptr(), &mut state) };
if state == 0 {
Ok(Value::from(result))
} else {
Err(ErrorKind::Exception(self.consume_exception()))
}
}
}
impl std::ops::Drop for VM
{
fn drop(&mut self) {
unsafe {
VM_EXISTS = false;
ffi::ruby_cleanup(0);
};
}
}
impl fmt::Debug for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
ErrorKind::VM(ref msg) => write!(fmt, "virtual machine error: {}", msg),
ErrorKind::Exception(e) => write!(fmt, "{}: {:?}", e.class_name(), e),
}
}
}
|
{
ffi::rb_eval_string_protect
}
|
conditional_block
|
vm.rs
|
use crate::{builder, ffi, util, Value};
use std;
use std::fmt;
use libc;
use std::sync::Mutex;
/// A Ruby virtual machine.
pub struct VM;
/// We only want to be able to have one `VM` at a time.
static mut VM_EXISTS: bool = false;
lazy_static! {
static ref ACTIVE_VM: Mutex<VM> = Mutex::new(VM::new().expect("failed to create Ruby VM"));
}
// TODO:
// Implement hooked variables (rb_define_hooked_variable)
// Allows a callback to get/set variable value
#[derive(PartialEq)]
/// A Ruby error
pub enum ErrorKind
{
/// An internal VM error.
VM(String),
/// An exception was thrown.
Exception(Value),
}
impl VM
{
/// Gets the active VM.
pub fn get() -> &'static Mutex<VM> {
&ACTIVE_VM
}
/// Creates a new Ruby VM.
pub fn new() -> Result<Self, ErrorKind> {
unsafe {
if VM_EXISTS {
Err(ErrorKind::VM("can only have one Ruby VM at a time".to_owned()))
} else {
ffi::ruby_init();
VM_EXISTS = true;
Ok(VM)
}
}
}
/// Evaluates a line of code.
pub fn eval(&mut self, code: &str) -> Result<Value, ErrorKind> {
self.eval_advanced(code, false)
}
/// Evaluates a line of code in a sandbox.
///
/// Any variables defined will not be saved.
pub fn eval_sandbox(&mut self, code: &str) -> Result<Value, ErrorKind> {
self.eval_advanced(code, true)
}
/// `require`s a file.
pub fn require(&self, file_name: &str) -> Value {
Value::from(unsafe { ffi::rb_require(util::c_string(file_name).as_ptr()) })
}
/// Creates a new class.
pub fn class<S>(&mut self, name: S) -> builder::Class
where S: Into<String> {
builder::Class::new(name)
}
/// Creates a new module.
pub fn module<S>(&mut self, name: S) -> builder::Module
where S: Into<String> {
builder::Module::new(name)
}
/// Sets the value of a global variable or creates a new one.
pub fn set_global(&self, name: &str, value: Value) -> Value {
Value::from(unsafe { ffi::rb_gv_set(util::c_string(name).as_ptr(), value.0) })
}
/// Gets the value of a global variable.
pub fn get_global(&self, name: &str) -> Value {
Value::from(unsafe { ffi::rb_gv_get(util::c_string(name).as_ptr()) })
}
/// Sets a global constant.
pub fn set_global_const(&self, name: &str, value: Value) {
unsafe { ffi::rb_define_global_const(util::c_string(name).as_ptr(), value.0) }
}
/// Defines a global function.
pub fn define_global_function(&self, name: &str, f: *mut extern fn() -> Value, arg_count: u8) -> Value {
unsafe {
Value::from(ffi::rb_define_global_function(
util::c_string(name).as_ptr(),
f as *mut _,
arg_count as libc::c_int,
))
}
|
}
/// Gets the current receiver (can be `nil`).
pub fn current_receiver(&self) -> Value {
unsafe { Value::from(ffi::rb_current_receiver()) }
}
/// Raises an object and a message.
pub fn raise(&self, value: Value, message: &str) ->! {
unsafe { ffi::rb_raise(value.0, util::c_string(message).as_ptr()) }
}
/// Raises a fatal error.
pub fn fatal(&self, message: &str) ->! {
unsafe { ffi::rb_fatal(util::c_string(message).as_ptr()) }
}
/// Raises a bug.
pub fn bug(&self, message: &str) ->! {
unsafe { ffi::rb_bug(util::c_string(message).as_ptr()) }
}
/// Logs a Ruby warning.
pub fn warning(&self, message: &str) {
unsafe { ffi::rb_warning(util::c_string(message).as_ptr()) }
}
/// Prints Ruby version info to stdout.
pub fn show_ruby_version(&self) { unsafe { ffi::ruby_show_version() } }
/// Prints Ruby copyright info to stdout.
pub fn show_ruby_copyright(&self) { unsafe { ffi::ruby_show_copyright() } }
/// Sets the script name.
/// Essentially the same as `$0 = name`.
pub fn set_script_name(&self, name: &str) {
unsafe { ffi::ruby_script(util::c_string(name).as_ptr()) }
}
/// Gets the currently raised exception and clears it.
pub fn consume_exception(&mut self) -> Value {
let exception = self.current_exception();
unsafe { ffi::rb_set_errinfo(ffi::Qnil) };
exception
}
/// Gets the currently raised exception exception.
///
/// Can be `nil`.
pub fn current_exception(&self) -> Value {
Value::from(unsafe { ffi::rb_errinfo() })
}
fn eval_advanced(&mut self, code: &str, sandbox: bool) -> Result<Value, ErrorKind> {
let mut state: libc::c_int = 0;
let eval_fn = if sandbox {
ffi::rb_eval_string_protect
} else {
ffi::rb_eval_string_wrap
};
let result = unsafe { eval_fn(util::c_string(code).as_ptr(), &mut state) };
if state == 0 {
Ok(Value::from(result))
} else {
Err(ErrorKind::Exception(self.consume_exception()))
}
}
}
impl std::ops::Drop for VM
{
fn drop(&mut self) {
unsafe {
VM_EXISTS = false;
ffi::ruby_cleanup(0);
};
}
}
impl fmt::Debug for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
ErrorKind::VM(ref msg) => write!(fmt, "virtual machine error: {}", msg),
ErrorKind::Exception(e) => write!(fmt, "{}: {:?}", e.class_name(), e),
}
}
}
|
random_line_split
|
|
vm.rs
|
use crate::{builder, ffi, util, Value};
use std;
use std::fmt;
use libc;
use std::sync::Mutex;
/// A Ruby virtual machine.
pub struct
|
;
/// We only want to be able to have one `VM` at a time.
static mut VM_EXISTS: bool = false;
lazy_static! {
static ref ACTIVE_VM: Mutex<VM> = Mutex::new(VM::new().expect("failed to create Ruby VM"));
}
// TODO:
// Implement hooked variables (rb_define_hooked_variable)
// Allows a callback to get/set variable value
#[derive(PartialEq)]
/// A Ruby error
pub enum ErrorKind
{
/// An internal VM error.
VM(String),
/// An exception was thrown.
Exception(Value),
}
impl VM
{
/// Gets the active VM.
pub fn get() -> &'static Mutex<VM> {
&ACTIVE_VM
}
/// Creates a new Ruby VM.
pub fn new() -> Result<Self, ErrorKind> {
unsafe {
if VM_EXISTS {
Err(ErrorKind::VM("can only have one Ruby VM at a time".to_owned()))
} else {
ffi::ruby_init();
VM_EXISTS = true;
Ok(VM)
}
}
}
/// Evaluates a line of code.
pub fn eval(&mut self, code: &str) -> Result<Value, ErrorKind> {
self.eval_advanced(code, false)
}
/// Evaluates a line of code in a sandbox.
///
/// Any variables defined will not be saved.
pub fn eval_sandbox(&mut self, code: &str) -> Result<Value, ErrorKind> {
self.eval_advanced(code, true)
}
/// `require`s a file.
pub fn require(&self, file_name: &str) -> Value {
Value::from(unsafe { ffi::rb_require(util::c_string(file_name).as_ptr()) })
}
/// Creates a new class.
pub fn class<S>(&mut self, name: S) -> builder::Class
where S: Into<String> {
builder::Class::new(name)
}
/// Creates a new module.
pub fn module<S>(&mut self, name: S) -> builder::Module
where S: Into<String> {
builder::Module::new(name)
}
/// Sets the value of a global variable or creates a new one.
pub fn set_global(&self, name: &str, value: Value) -> Value {
Value::from(unsafe { ffi::rb_gv_set(util::c_string(name).as_ptr(), value.0) })
}
/// Gets the value of a global variable.
pub fn get_global(&self, name: &str) -> Value {
Value::from(unsafe { ffi::rb_gv_get(util::c_string(name).as_ptr()) })
}
/// Sets a global constant.
pub fn set_global_const(&self, name: &str, value: Value) {
unsafe { ffi::rb_define_global_const(util::c_string(name).as_ptr(), value.0) }
}
/// Defines a global function.
pub fn define_global_function(&self, name: &str, f: *mut extern fn() -> Value, arg_count: u8) -> Value {
unsafe {
Value::from(ffi::rb_define_global_function(
util::c_string(name).as_ptr(),
f as *mut _,
arg_count as libc::c_int,
))
}
}
/// Gets the current receiver (can be `nil`).
pub fn current_receiver(&self) -> Value {
unsafe { Value::from(ffi::rb_current_receiver()) }
}
/// Raises an object and a message.
pub fn raise(&self, value: Value, message: &str) ->! {
unsafe { ffi::rb_raise(value.0, util::c_string(message).as_ptr()) }
}
/// Raises a fatal error.
pub fn fatal(&self, message: &str) ->! {
unsafe { ffi::rb_fatal(util::c_string(message).as_ptr()) }
}
/// Raises a bug.
pub fn bug(&self, message: &str) ->! {
unsafe { ffi::rb_bug(util::c_string(message).as_ptr()) }
}
/// Logs a Ruby warning.
pub fn warning(&self, message: &str) {
unsafe { ffi::rb_warning(util::c_string(message).as_ptr()) }
}
/// Prints Ruby version info to stdout.
pub fn show_ruby_version(&self) { unsafe { ffi::ruby_show_version() } }
/// Prints Ruby copyright info to stdout.
pub fn show_ruby_copyright(&self) { unsafe { ffi::ruby_show_copyright() } }
/// Sets the script name.
/// Essentially the same as `$0 = name`.
pub fn set_script_name(&self, name: &str) {
unsafe { ffi::ruby_script(util::c_string(name).as_ptr()) }
}
/// Gets the currently raised exception and clears it.
pub fn consume_exception(&mut self) -> Value {
let exception = self.current_exception();
unsafe { ffi::rb_set_errinfo(ffi::Qnil) };
exception
}
/// Gets the currently raised exception exception.
///
/// Can be `nil`.
pub fn current_exception(&self) -> Value {
Value::from(unsafe { ffi::rb_errinfo() })
}
fn eval_advanced(&mut self, code: &str, sandbox: bool) -> Result<Value, ErrorKind> {
let mut state: libc::c_int = 0;
let eval_fn = if sandbox {
ffi::rb_eval_string_protect
} else {
ffi::rb_eval_string_wrap
};
let result = unsafe { eval_fn(util::c_string(code).as_ptr(), &mut state) };
if state == 0 {
Ok(Value::from(result))
} else {
Err(ErrorKind::Exception(self.consume_exception()))
}
}
}
impl std::ops::Drop for VM
{
fn drop(&mut self) {
unsafe {
VM_EXISTS = false;
ffi::ruby_cleanup(0);
};
}
}
impl fmt::Debug for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
ErrorKind::VM(ref msg) => write!(fmt, "virtual machine error: {}", msg),
ErrorKind::Exception(e) => write!(fmt, "{}: {:?}", e.class_name(), e),
}
}
}
|
VM
|
identifier_name
|
kinematic2.rs
|
extern crate nalgebra as na;
use na::{Point2, RealField, Vector2};
use ncollide2d::shape::{Ball, Cuboid, ShapeHandle};
use nphysics2d::force_generator::DefaultForceGeneratorSet;
use nphysics2d::joint::{DefaultJointConstraintSet, RevoluteJoint};
use nphysics2d::math::Velocity;
use nphysics2d::object::{
Body, BodyPartHandle, BodyStatus, ColliderDesc, DefaultBodySet, DefaultColliderSet, Ground,
MultibodyDesc, RigidBodyDesc,
};
use nphysics2d::world::{DefaultGeometricalWorld, DefaultMechanicalWorld};
use nphysics_testbed2d::Testbed;
/*
* NOTE: The `r` macro is only here to convert from f64 to the `N` scalar type.
* This simplifies experimentation with various scalar types (f32, fixed-point numbers, etc.)
*/
pub fn init_world<N: RealField>(testbed: &mut Testbed<N>) {
/*
* World
*/
let mechanical_world = DefaultMechanicalWorld::new(Vector2::new(r!(0.0), r!(-9.81)));
let geometrical_world = DefaultGeometricalWorld::new();
let mut bodies = DefaultBodySet::new();
let mut colliders = DefaultColliderSet::new();
let joint_constraints = DefaultJointConstraintSet::new();
let force_generators = DefaultForceGeneratorSet::new();
/*
* Ground
*/
let ground_size = r!(25.0);
let ground_shape = ShapeHandle::new(Cuboid::new(Vector2::new(ground_size, r!(1.0))));
let ground_handle = bodies.insert(Ground::new());
let co = ColliderDesc::new(ground_shape)
.translation(-Vector2::y())
.build(BodyPartHandle(ground_handle, 0));
colliders.insert(co);
/*
* Create boxes
*/
let num = 10;
let rad = r!(0.2);
let cuboid = ShapeHandle::new(Cuboid::new(Vector2::repeat(rad)));
let collider_desc = ColliderDesc::new(cuboid.clone()).density(r!(1.0));
let shift = (rad + ColliderDesc::<N>::default_margin()) * r!(2.0);
let centerx = shift * r!(num as f64) / r!(2.0);
let centery = shift / r!(2.0) + r!(3.04);
for i in 0usize..num {
for j in 0usize..num {
let x = r!(i as f64) * shift - centerx;
let y = r!(j as f64) * shift + centery;
// Build the rigid body.
let rb = RigidBodyDesc::new().translation(Vector2::new(x, y)).build();
let rb_handle = bodies.insert(rb);
// Build the collider.
let co = collider_desc.build(BodyPartHandle(rb_handle, 0));
colliders.insert(co);
}
}
/*
* Setup a kinematic rigid body.
*/
let platform_body = RigidBodyDesc::new()
.translation(Vector2::new(r!(0.0), r!(1.5)))
.velocity(Velocity::linear(r!(1.0), r!(0.0)))
.status(BodyStatus::Kinematic)
.build();
let platform_handle = bodies.insert(platform_body);
let platform_geom = ShapeHandle::new(Cuboid::new(Vector2::new(rad * r!(10.0), rad)));
let platform_collider = ColliderDesc::new(platform_geom)
.density(r!(1.0))
.build(BodyPartHandle(platform_handle, 0));
colliders.insert(platform_collider);
/*
* Setup a kinematic multibody.
*/
let joint = RevoluteJoint::new(r!(0.0));
let mut mb = MultibodyDesc::new(joint)
.body_shift(Vector2::x() * r!(2.0))
.parent_shift(Vector2::new(r!(5.0), r!(2.0)))
.build();
mb.set_status(BodyStatus::Kinematic);
mb.generalized_velocity_mut()[0] = r!(-3.0);
let mb_handle = bodies.insert(mb);
let mb_collider = collider_desc.build(BodyPartHandle(mb_handle, 0));
colliders.insert(mb_collider);
/*
* Setup a motorized multibody.
*/
let mut joint = RevoluteJoint::new(r!(0.0));
joint.set_desired_angular_motor_velocity(r!(-2.0));
joint.set_max_angular_motor_torque(r!(2.0));
joint.enable_angular_motor();
let mb = MultibodyDesc::new(joint)
.body_shift(Vector2::x() * r!(2.0))
.parent_shift(Vector2::new(r!(-4.0), r!(3.0)))
.build();
let mb_handle = bodies.insert(mb);
let geom = ShapeHandle::new(Ball::new(r!(2.0) * rad));
let ball_collider_desc = ColliderDesc::new(geom).density(r!(1.0));
let mb_collider = ball_collider_desc.build(BodyPartHandle(mb_handle, 0));
colliders.insert(mb_collider);
/*
* Setup a callback to control the platform.
*/
testbed.add_callback(move |_, _, bodies, _, _, time| {
if let Some(platform) = bodies.rigid_body_mut(platform_handle)
|
});
/*
* Run the simulation.
*/
testbed.set_ground_handle(Some(ground_handle));
testbed.set_world(
mechanical_world,
geometrical_world,
bodies,
colliders,
joint_constraints,
force_generators,
);
testbed.look_at(Point2::new(0.0, 5.0), 60.0);
}
fn main() {
let testbed = Testbed::<f32>::from_builders(0, vec![("Kinematic body", init_world)]);
testbed.run()
}
|
{
let platform_x = platform.position().translation.vector.x;
let mut vel = *platform.velocity();
vel.linear.y = (time * r!(5.0)).sin() * r!(0.8);
if platform_x >= rad * r!(10.0) {
vel.linear.x = r!(-1.0);
}
if platform_x <= -rad * r!(10.0) {
vel.linear.x = r!(1.0);
}
platform.set_velocity(vel);
}
|
conditional_block
|
kinematic2.rs
|
extern crate nalgebra as na;
use na::{Point2, RealField, Vector2};
use ncollide2d::shape::{Ball, Cuboid, ShapeHandle};
use nphysics2d::force_generator::DefaultForceGeneratorSet;
use nphysics2d::joint::{DefaultJointConstraintSet, RevoluteJoint};
use nphysics2d::math::Velocity;
use nphysics2d::object::{
Body, BodyPartHandle, BodyStatus, ColliderDesc, DefaultBodySet, DefaultColliderSet, Ground,
MultibodyDesc, RigidBodyDesc,
};
use nphysics2d::world::{DefaultGeometricalWorld, DefaultMechanicalWorld};
use nphysics_testbed2d::Testbed;
/*
* NOTE: The `r` macro is only here to convert from f64 to the `N` scalar type.
* This simplifies experimentation with various scalar types (f32, fixed-point numbers, etc.)
*/
pub fn init_world<N: RealField>(testbed: &mut Testbed<N>)
|
.build(BodyPartHandle(ground_handle, 0));
colliders.insert(co);
/*
* Create boxes
*/
let num = 10;
let rad = r!(0.2);
let cuboid = ShapeHandle::new(Cuboid::new(Vector2::repeat(rad)));
let collider_desc = ColliderDesc::new(cuboid.clone()).density(r!(1.0));
let shift = (rad + ColliderDesc::<N>::default_margin()) * r!(2.0);
let centerx = shift * r!(num as f64) / r!(2.0);
let centery = shift / r!(2.0) + r!(3.04);
for i in 0usize..num {
for j in 0usize..num {
let x = r!(i as f64) * shift - centerx;
let y = r!(j as f64) * shift + centery;
// Build the rigid body.
let rb = RigidBodyDesc::new().translation(Vector2::new(x, y)).build();
let rb_handle = bodies.insert(rb);
// Build the collider.
let co = collider_desc.build(BodyPartHandle(rb_handle, 0));
colliders.insert(co);
}
}
/*
* Setup a kinematic rigid body.
*/
let platform_body = RigidBodyDesc::new()
.translation(Vector2::new(r!(0.0), r!(1.5)))
.velocity(Velocity::linear(r!(1.0), r!(0.0)))
.status(BodyStatus::Kinematic)
.build();
let platform_handle = bodies.insert(platform_body);
let platform_geom = ShapeHandle::new(Cuboid::new(Vector2::new(rad * r!(10.0), rad)));
let platform_collider = ColliderDesc::new(platform_geom)
.density(r!(1.0))
.build(BodyPartHandle(platform_handle, 0));
colliders.insert(platform_collider);
/*
* Setup a kinematic multibody.
*/
let joint = RevoluteJoint::new(r!(0.0));
let mut mb = MultibodyDesc::new(joint)
.body_shift(Vector2::x() * r!(2.0))
.parent_shift(Vector2::new(r!(5.0), r!(2.0)))
.build();
mb.set_status(BodyStatus::Kinematic);
mb.generalized_velocity_mut()[0] = r!(-3.0);
let mb_handle = bodies.insert(mb);
let mb_collider = collider_desc.build(BodyPartHandle(mb_handle, 0));
colliders.insert(mb_collider);
/*
* Setup a motorized multibody.
*/
let mut joint = RevoluteJoint::new(r!(0.0));
joint.set_desired_angular_motor_velocity(r!(-2.0));
joint.set_max_angular_motor_torque(r!(2.0));
joint.enable_angular_motor();
let mb = MultibodyDesc::new(joint)
.body_shift(Vector2::x() * r!(2.0))
.parent_shift(Vector2::new(r!(-4.0), r!(3.0)))
.build();
let mb_handle = bodies.insert(mb);
let geom = ShapeHandle::new(Ball::new(r!(2.0) * rad));
let ball_collider_desc = ColliderDesc::new(geom).density(r!(1.0));
let mb_collider = ball_collider_desc.build(BodyPartHandle(mb_handle, 0));
colliders.insert(mb_collider);
/*
* Setup a callback to control the platform.
*/
testbed.add_callback(move |_, _, bodies, _, _, time| {
if let Some(platform) = bodies.rigid_body_mut(platform_handle) {
let platform_x = platform.position().translation.vector.x;
let mut vel = *platform.velocity();
vel.linear.y = (time * r!(5.0)).sin() * r!(0.8);
if platform_x >= rad * r!(10.0) {
vel.linear.x = r!(-1.0);
}
if platform_x <= -rad * r!(10.0) {
vel.linear.x = r!(1.0);
}
platform.set_velocity(vel);
}
});
/*
* Run the simulation.
*/
testbed.set_ground_handle(Some(ground_handle));
testbed.set_world(
mechanical_world,
geometrical_world,
bodies,
colliders,
joint_constraints,
force_generators,
);
testbed.look_at(Point2::new(0.0, 5.0), 60.0);
}
fn main() {
let testbed = Testbed::<f32>::from_builders(0, vec![("Kinematic body", init_world)]);
testbed.run()
}
|
{
/*
* World
*/
let mechanical_world = DefaultMechanicalWorld::new(Vector2::new(r!(0.0), r!(-9.81)));
let geometrical_world = DefaultGeometricalWorld::new();
let mut bodies = DefaultBodySet::new();
let mut colliders = DefaultColliderSet::new();
let joint_constraints = DefaultJointConstraintSet::new();
let force_generators = DefaultForceGeneratorSet::new();
/*
* Ground
*/
let ground_size = r!(25.0);
let ground_shape = ShapeHandle::new(Cuboid::new(Vector2::new(ground_size, r!(1.0))));
let ground_handle = bodies.insert(Ground::new());
let co = ColliderDesc::new(ground_shape)
.translation(-Vector2::y())
|
identifier_body
|
kinematic2.rs
|
extern crate nalgebra as na;
use na::{Point2, RealField, Vector2};
use ncollide2d::shape::{Ball, Cuboid, ShapeHandle};
use nphysics2d::force_generator::DefaultForceGeneratorSet;
use nphysics2d::joint::{DefaultJointConstraintSet, RevoluteJoint};
use nphysics2d::math::Velocity;
use nphysics2d::object::{
Body, BodyPartHandle, BodyStatus, ColliderDesc, DefaultBodySet, DefaultColliderSet, Ground,
MultibodyDesc, RigidBodyDesc,
};
use nphysics2d::world::{DefaultGeometricalWorld, DefaultMechanicalWorld};
use nphysics_testbed2d::Testbed;
/*
* NOTE: The `r` macro is only here to convert from f64 to the `N` scalar type.
* This simplifies experimentation with various scalar types (f32, fixed-point numbers, etc.)
*/
pub fn init_world<N: RealField>(testbed: &mut Testbed<N>) {
/*
* World
*/
let mechanical_world = DefaultMechanicalWorld::new(Vector2::new(r!(0.0), r!(-9.81)));
let geometrical_world = DefaultGeometricalWorld::new();
let mut bodies = DefaultBodySet::new();
let mut colliders = DefaultColliderSet::new();
let joint_constraints = DefaultJointConstraintSet::new();
let force_generators = DefaultForceGeneratorSet::new();
/*
* Ground
*/
let ground_size = r!(25.0);
let ground_shape = ShapeHandle::new(Cuboid::new(Vector2::new(ground_size, r!(1.0))));
let ground_handle = bodies.insert(Ground::new());
let co = ColliderDesc::new(ground_shape)
.translation(-Vector2::y())
.build(BodyPartHandle(ground_handle, 0));
colliders.insert(co);
/*
* Create boxes
*/
let num = 10;
let rad = r!(0.2);
let cuboid = ShapeHandle::new(Cuboid::new(Vector2::repeat(rad)));
let collider_desc = ColliderDesc::new(cuboid.clone()).density(r!(1.0));
let shift = (rad + ColliderDesc::<N>::default_margin()) * r!(2.0);
let centerx = shift * r!(num as f64) / r!(2.0);
let centery = shift / r!(2.0) + r!(3.04);
for i in 0usize..num {
for j in 0usize..num {
let x = r!(i as f64) * shift - centerx;
let y = r!(j as f64) * shift + centery;
// Build the rigid body.
let rb = RigidBodyDesc::new().translation(Vector2::new(x, y)).build();
let rb_handle = bodies.insert(rb);
// Build the collider.
let co = collider_desc.build(BodyPartHandle(rb_handle, 0));
colliders.insert(co);
}
}
/*
* Setup a kinematic rigid body.
*/
let platform_body = RigidBodyDesc::new()
.translation(Vector2::new(r!(0.0), r!(1.5)))
.velocity(Velocity::linear(r!(1.0), r!(0.0)))
.status(BodyStatus::Kinematic)
.build();
let platform_handle = bodies.insert(platform_body);
let platform_geom = ShapeHandle::new(Cuboid::new(Vector2::new(rad * r!(10.0), rad)));
let platform_collider = ColliderDesc::new(platform_geom)
.density(r!(1.0))
.build(BodyPartHandle(platform_handle, 0));
colliders.insert(platform_collider);
/*
* Setup a kinematic multibody.
*/
let joint = RevoluteJoint::new(r!(0.0));
let mut mb = MultibodyDesc::new(joint)
.body_shift(Vector2::x() * r!(2.0))
.parent_shift(Vector2::new(r!(5.0), r!(2.0)))
.build();
mb.set_status(BodyStatus::Kinematic);
mb.generalized_velocity_mut()[0] = r!(-3.0);
let mb_handle = bodies.insert(mb);
let mb_collider = collider_desc.build(BodyPartHandle(mb_handle, 0));
colliders.insert(mb_collider);
/*
* Setup a motorized multibody.
*/
let mut joint = RevoluteJoint::new(r!(0.0));
joint.set_desired_angular_motor_velocity(r!(-2.0));
joint.set_max_angular_motor_torque(r!(2.0));
joint.enable_angular_motor();
let mb = MultibodyDesc::new(joint)
.body_shift(Vector2::x() * r!(2.0))
.parent_shift(Vector2::new(r!(-4.0), r!(3.0)))
.build();
let mb_handle = bodies.insert(mb);
let geom = ShapeHandle::new(Ball::new(r!(2.0) * rad));
let ball_collider_desc = ColliderDesc::new(geom).density(r!(1.0));
let mb_collider = ball_collider_desc.build(BodyPartHandle(mb_handle, 0));
colliders.insert(mb_collider);
/*
* Setup a callback to control the platform.
*/
testbed.add_callback(move |_, _, bodies, _, _, time| {
if let Some(platform) = bodies.rigid_body_mut(platform_handle) {
let platform_x = platform.position().translation.vector.x;
let mut vel = *platform.velocity();
vel.linear.y = (time * r!(5.0)).sin() * r!(0.8);
if platform_x >= rad * r!(10.0) {
vel.linear.x = r!(-1.0);
}
if platform_x <= -rad * r!(10.0) {
vel.linear.x = r!(1.0);
}
platform.set_velocity(vel);
}
});
/*
* Run the simulation.
*/
testbed.set_ground_handle(Some(ground_handle));
testbed.set_world(
mechanical_world,
geometrical_world,
bodies,
colliders,
joint_constraints,
force_generators,
);
testbed.look_at(Point2::new(0.0, 5.0), 60.0);
}
fn
|
() {
let testbed = Testbed::<f32>::from_builders(0, vec![("Kinematic body", init_world)]);
testbed.run()
}
|
main
|
identifier_name
|
kinematic2.rs
|
extern crate nalgebra as na;
use na::{Point2, RealField, Vector2};
use ncollide2d::shape::{Ball, Cuboid, ShapeHandle};
use nphysics2d::force_generator::DefaultForceGeneratorSet;
use nphysics2d::joint::{DefaultJointConstraintSet, RevoluteJoint};
use nphysics2d::math::Velocity;
use nphysics2d::object::{
Body, BodyPartHandle, BodyStatus, ColliderDesc, DefaultBodySet, DefaultColliderSet, Ground,
MultibodyDesc, RigidBodyDesc,
};
use nphysics2d::world::{DefaultGeometricalWorld, DefaultMechanicalWorld};
use nphysics_testbed2d::Testbed;
/*
* NOTE: The `r` macro is only here to convert from f64 to the `N` scalar type.
* This simplifies experimentation with various scalar types (f32, fixed-point numbers, etc.)
*/
pub fn init_world<N: RealField>(testbed: &mut Testbed<N>) {
/*
* World
*/
let mechanical_world = DefaultMechanicalWorld::new(Vector2::new(r!(0.0), r!(-9.81)));
let geometrical_world = DefaultGeometricalWorld::new();
let mut bodies = DefaultBodySet::new();
let mut colliders = DefaultColliderSet::new();
let joint_constraints = DefaultJointConstraintSet::new();
let force_generators = DefaultForceGeneratorSet::new();
/*
* Ground
*/
let ground_size = r!(25.0);
let ground_shape = ShapeHandle::new(Cuboid::new(Vector2::new(ground_size, r!(1.0))));
let ground_handle = bodies.insert(Ground::new());
let co = ColliderDesc::new(ground_shape)
.translation(-Vector2::y())
.build(BodyPartHandle(ground_handle, 0));
colliders.insert(co);
/*
* Create boxes
*/
let num = 10;
let rad = r!(0.2);
let cuboid = ShapeHandle::new(Cuboid::new(Vector2::repeat(rad)));
let collider_desc = ColliderDesc::new(cuboid.clone()).density(r!(1.0));
let shift = (rad + ColliderDesc::<N>::default_margin()) * r!(2.0);
let centerx = shift * r!(num as f64) / r!(2.0);
let centery = shift / r!(2.0) + r!(3.04);
for i in 0usize..num {
for j in 0usize..num {
let x = r!(i as f64) * shift - centerx;
let y = r!(j as f64) * shift + centery;
// Build the rigid body.
let rb = RigidBodyDesc::new().translation(Vector2::new(x, y)).build();
let rb_handle = bodies.insert(rb);
// Build the collider.
let co = collider_desc.build(BodyPartHandle(rb_handle, 0));
colliders.insert(co);
}
}
/*
* Setup a kinematic rigid body.
*/
let platform_body = RigidBodyDesc::new()
.translation(Vector2::new(r!(0.0), r!(1.5)))
.velocity(Velocity::linear(r!(1.0), r!(0.0)))
.status(BodyStatus::Kinematic)
.build();
let platform_handle = bodies.insert(platform_body);
let platform_geom = ShapeHandle::new(Cuboid::new(Vector2::new(rad * r!(10.0), rad)));
let platform_collider = ColliderDesc::new(platform_geom)
.density(r!(1.0))
.build(BodyPartHandle(platform_handle, 0));
colliders.insert(platform_collider);
|
* Setup a kinematic multibody.
*/
let joint = RevoluteJoint::new(r!(0.0));
let mut mb = MultibodyDesc::new(joint)
.body_shift(Vector2::x() * r!(2.0))
.parent_shift(Vector2::new(r!(5.0), r!(2.0)))
.build();
mb.set_status(BodyStatus::Kinematic);
mb.generalized_velocity_mut()[0] = r!(-3.0);
let mb_handle = bodies.insert(mb);
let mb_collider = collider_desc.build(BodyPartHandle(mb_handle, 0));
colliders.insert(mb_collider);
/*
* Setup a motorized multibody.
*/
let mut joint = RevoluteJoint::new(r!(0.0));
joint.set_desired_angular_motor_velocity(r!(-2.0));
joint.set_max_angular_motor_torque(r!(2.0));
joint.enable_angular_motor();
let mb = MultibodyDesc::new(joint)
.body_shift(Vector2::x() * r!(2.0))
.parent_shift(Vector2::new(r!(-4.0), r!(3.0)))
.build();
let mb_handle = bodies.insert(mb);
let geom = ShapeHandle::new(Ball::new(r!(2.0) * rad));
let ball_collider_desc = ColliderDesc::new(geom).density(r!(1.0));
let mb_collider = ball_collider_desc.build(BodyPartHandle(mb_handle, 0));
colliders.insert(mb_collider);
/*
* Setup a callback to control the platform.
*/
testbed.add_callback(move |_, _, bodies, _, _, time| {
if let Some(platform) = bodies.rigid_body_mut(platform_handle) {
let platform_x = platform.position().translation.vector.x;
let mut vel = *platform.velocity();
vel.linear.y = (time * r!(5.0)).sin() * r!(0.8);
if platform_x >= rad * r!(10.0) {
vel.linear.x = r!(-1.0);
}
if platform_x <= -rad * r!(10.0) {
vel.linear.x = r!(1.0);
}
platform.set_velocity(vel);
}
});
/*
* Run the simulation.
*/
testbed.set_ground_handle(Some(ground_handle));
testbed.set_world(
mechanical_world,
geometrical_world,
bodies,
colliders,
joint_constraints,
force_generators,
);
testbed.look_at(Point2::new(0.0, 5.0), 60.0);
}
fn main() {
let testbed = Testbed::<f32>::from_builders(0, vec![("Kinematic body", init_world)]);
testbed.run()
}
|
/*
|
random_line_split
|
lib.rs
|
// Copyright 2014 The Servo Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name="string_cache_plugin"]
#![crate_type="dylib"]
#![feature(plugin_registrar, quote, box_syntax)]
#![feature(rustc_private, slice_patterns)]
#![cfg_attr(test, deny(warnings))]
#![allow(unused_imports)] // for quotes
extern crate syntax;
extern crate rustc;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate mac;
extern crate string_cache_shared;
use rustc::plugin::Registry;
mod atom;
|
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("atom", atom::expand_atom);
reg.register_macro("ns", atom::expand_ns);
}
|
// NB: This needs to be public or we get a linker error.
#[plugin_registrar]
|
random_line_split
|
lib.rs
|
// Copyright 2014 The Servo Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name="string_cache_plugin"]
#![crate_type="dylib"]
#![feature(plugin_registrar, quote, box_syntax)]
#![feature(rustc_private, slice_patterns)]
#![cfg_attr(test, deny(warnings))]
#![allow(unused_imports)] // for quotes
extern crate syntax;
extern crate rustc;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate mac;
extern crate string_cache_shared;
use rustc::plugin::Registry;
mod atom;
// NB: This needs to be public or we get a linker error.
#[plugin_registrar]
pub fn
|
(reg: &mut Registry) {
reg.register_macro("atom", atom::expand_atom);
reg.register_macro("ns", atom::expand_ns);
}
|
plugin_registrar
|
identifier_name
|
lib.rs
|
// Copyright 2014 The Servo Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name="string_cache_plugin"]
#![crate_type="dylib"]
#![feature(plugin_registrar, quote, box_syntax)]
#![feature(rustc_private, slice_patterns)]
#![cfg_attr(test, deny(warnings))]
#![allow(unused_imports)] // for quotes
extern crate syntax;
extern crate rustc;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate mac;
extern crate string_cache_shared;
use rustc::plugin::Registry;
mod atom;
// NB: This needs to be public or we get a linker error.
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry)
|
{
reg.register_macro("atom", atom::expand_atom);
reg.register_macro("ns", atom::expand_ns);
}
|
identifier_body
|
|
blake2xb.rs
|
use core::cmp::{min};
use {Blake2b, Hash, ParameterBlock};
use slice_ext::{SliceExt};
use unbuffered;
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub struct Blake2xb {
blake2b: Blake2b,
parameter_block: ParameterBlock,
len: u32,
}
pub struct Iter {
parameter_block: ParameterBlock,
block: [u64; 16],
block_len: usize,
max_out_len: u32,
out_len: u32,
}
impl Blake2xb {
pub fn new(len: Option<u32>) -> Self {
Self::keyed(len, &[])
}
pub fn keyed(len: Option<u32>, key: &[u8]) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
let parameter_block = ParameterBlock::new()
.set_digest_len(64)
.set_key_len(key.len() as u8)
.set_fanout(1)
.set_max_depth(1)
.set_node_offset((len.unwrap_or(u32::max_value()) as u64) << 32);
Self::with_parameter_block_keyed(len, key, parameter_block)
}
pub fn with_parameter_block_keyed(len: Option<u32>, key: &[u8], parameter_block: ParameterBlock) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
Blake2xb {
blake2b: Blake2b::with_parameter_block_keyed(64, key, parameter_block.clone()),
parameter_block: parameter_block,
len: len.unwrap_or(u32::max_value())
}
}
pub fn
|
(&self) -> usize {
self.len as usize
}
pub fn update(&mut self, data: &[u8]) {
self.blake2b.update(data);
}
pub fn finish(self) -> Iter {
let mut block = [0; 16];
block[..8].copy_from_slice(&self.blake2b.finish().into_inner());
let parameter_block = self.parameter_block
.set_key_len(0)
.set_fanout(0)
.set_max_depth(0)
.set_max_leaf_len(64)
.set_node_depth(0)
.set_inner_len(64);
Iter {
parameter_block: parameter_block,
block: block,
block_len: 64,
max_out_len: self.len,
out_len: 0,
}
}
}
impl Iter {
pub fn new(len: Option<u32>, seed: &[u8]) -> Self {
let parameter_block = ParameterBlock::new()
.set_max_leaf_len(64)
.set_inner_len(64);
Self::with_parameter_block(len, seed, parameter_block)
}
pub fn with_parameter_block(len: Option<u32>, seed: &[u8], parameter_block: ParameterBlock) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
assert!(seed.len() <= 128, "seed length must be <= 128");
let mut block = [0; 16];
block.as_mut_bytes()[..seed.len()].copy_from_slice(seed);
Iter {
parameter_block: parameter_block,
block: block,
block_len: seed.len(),
max_out_len: len.unwrap_or(u32::max_value()),
out_len: 0
}
}
pub fn max_out_len(&self) -> usize {
self.max_out_len as usize
}
pub fn out_len(&self) -> usize {
self.out_len as usize
}
}
impl Iterator for Iter {
type Item = Hash;
fn next(&mut self) -> Option<Self::Item> {
if self.out_len == self.max_out_len {
return None;
}
let len = min(self.max_out_len - self.out_len, 64);
let parameter_block = self.parameter_block.clone()
.set_digest_len(len as u8)
.set_node_offset((self.out_len as u64 / 64) | ((self.max_out_len as u64) << 32));
self.out_len += len;
Some(unbuffered::Blake2b::with_parameter_block(len as usize, parameter_block).finish(&self.block, self.block_len))
}
fn size_hint(&self) -> (usize, Option<usize>) {
let i = (self.max_out_len - self.out_len) as usize / 64 + if self.max_out_len % 64!= 0 { 1 } else { 0 };
(i, Some(i))
}
}
|
len
|
identifier_name
|
blake2xb.rs
|
use core::cmp::{min};
use {Blake2b, Hash, ParameterBlock};
use slice_ext::{SliceExt};
use unbuffered;
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub struct Blake2xb {
blake2b: Blake2b,
parameter_block: ParameterBlock,
len: u32,
}
pub struct Iter {
parameter_block: ParameterBlock,
block: [u64; 16],
block_len: usize,
max_out_len: u32,
out_len: u32,
}
impl Blake2xb {
pub fn new(len: Option<u32>) -> Self {
Self::keyed(len, &[])
}
pub fn keyed(len: Option<u32>, key: &[u8]) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
let parameter_block = ParameterBlock::new()
.set_digest_len(64)
.set_key_len(key.len() as u8)
.set_fanout(1)
.set_max_depth(1)
.set_node_offset((len.unwrap_or(u32::max_value()) as u64) << 32);
Self::with_parameter_block_keyed(len, key, parameter_block)
}
pub fn with_parameter_block_keyed(len: Option<u32>, key: &[u8], parameter_block: ParameterBlock) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
Blake2xb {
blake2b: Blake2b::with_parameter_block_keyed(64, key, parameter_block.clone()),
parameter_block: parameter_block,
len: len.unwrap_or(u32::max_value())
}
}
pub fn len(&self) -> usize {
self.len as usize
}
pub fn update(&mut self, data: &[u8]) {
self.blake2b.update(data);
}
pub fn finish(self) -> Iter {
let mut block = [0; 16];
block[..8].copy_from_slice(&self.blake2b.finish().into_inner());
let parameter_block = self.parameter_block
.set_key_len(0)
.set_fanout(0)
.set_max_depth(0)
.set_max_leaf_len(64)
.set_node_depth(0)
.set_inner_len(64);
Iter {
parameter_block: parameter_block,
block: block,
block_len: 64,
max_out_len: self.len,
out_len: 0,
}
}
}
impl Iter {
pub fn new(len: Option<u32>, seed: &[u8]) -> Self {
let parameter_block = ParameterBlock::new()
.set_max_leaf_len(64)
.set_inner_len(64);
Self::with_parameter_block(len, seed, parameter_block)
}
pub fn with_parameter_block(len: Option<u32>, seed: &[u8], parameter_block: ParameterBlock) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
assert!(seed.len() <= 128, "seed length must be <= 128");
let mut block = [0; 16];
block.as_mut_bytes()[..seed.len()].copy_from_slice(seed);
Iter {
parameter_block: parameter_block,
block: block,
block_len: seed.len(),
max_out_len: len.unwrap_or(u32::max_value()),
out_len: 0
}
}
pub fn max_out_len(&self) -> usize {
self.max_out_len as usize
}
pub fn out_len(&self) -> usize {
self.out_len as usize
}
}
impl Iterator for Iter {
type Item = Hash;
fn next(&mut self) -> Option<Self::Item> {
if self.out_len == self.max_out_len {
return None;
}
let len = min(self.max_out_len - self.out_len, 64);
let parameter_block = self.parameter_block.clone()
.set_digest_len(len as u8)
.set_node_offset((self.out_len as u64 / 64) | ((self.max_out_len as u64) << 32));
self.out_len += len;
Some(unbuffered::Blake2b::with_parameter_block(len as usize, parameter_block).finish(&self.block, self.block_len))
}
fn size_hint(&self) -> (usize, Option<usize>) {
let i = (self.max_out_len - self.out_len) as usize / 64 + if self.max_out_len % 64!= 0
|
else { 0 };
(i, Some(i))
}
}
|
{ 1 }
|
conditional_block
|
blake2xb.rs
|
use core::cmp::{min};
use {Blake2b, Hash, ParameterBlock};
use slice_ext::{SliceExt};
use unbuffered;
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub struct Blake2xb {
blake2b: Blake2b,
parameter_block: ParameterBlock,
len: u32,
}
pub struct Iter {
parameter_block: ParameterBlock,
block: [u64; 16],
block_len: usize,
max_out_len: u32,
out_len: u32,
}
impl Blake2xb {
pub fn new(len: Option<u32>) -> Self {
Self::keyed(len, &[])
}
pub fn keyed(len: Option<u32>, key: &[u8]) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
let parameter_block = ParameterBlock::new()
.set_digest_len(64)
.set_key_len(key.len() as u8)
.set_fanout(1)
.set_max_depth(1)
.set_node_offset((len.unwrap_or(u32::max_value()) as u64) << 32);
|
Self::with_parameter_block_keyed(len, key, parameter_block)
}
pub fn with_parameter_block_keyed(len: Option<u32>, key: &[u8], parameter_block: ParameterBlock) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
Blake2xb {
blake2b: Blake2b::with_parameter_block_keyed(64, key, parameter_block.clone()),
parameter_block: parameter_block,
len: len.unwrap_or(u32::max_value())
}
}
pub fn len(&self) -> usize {
self.len as usize
}
pub fn update(&mut self, data: &[u8]) {
self.blake2b.update(data);
}
pub fn finish(self) -> Iter {
let mut block = [0; 16];
block[..8].copy_from_slice(&self.blake2b.finish().into_inner());
let parameter_block = self.parameter_block
.set_key_len(0)
.set_fanout(0)
.set_max_depth(0)
.set_max_leaf_len(64)
.set_node_depth(0)
.set_inner_len(64);
Iter {
parameter_block: parameter_block,
block: block,
block_len: 64,
max_out_len: self.len,
out_len: 0,
}
}
}
impl Iter {
pub fn new(len: Option<u32>, seed: &[u8]) -> Self {
let parameter_block = ParameterBlock::new()
.set_max_leaf_len(64)
.set_inner_len(64);
Self::with_parameter_block(len, seed, parameter_block)
}
pub fn with_parameter_block(len: Option<u32>, seed: &[u8], parameter_block: ParameterBlock) -> Self {
assert!(len.map(|len| len!= 0 && len!= u32::max_value()).unwrap_or(true), "len must be >= 1 and <= 2^32 - 2");
assert!(seed.len() <= 128, "seed length must be <= 128");
let mut block = [0; 16];
block.as_mut_bytes()[..seed.len()].copy_from_slice(seed);
Iter {
parameter_block: parameter_block,
block: block,
block_len: seed.len(),
max_out_len: len.unwrap_or(u32::max_value()),
out_len: 0
}
}
pub fn max_out_len(&self) -> usize {
self.max_out_len as usize
}
pub fn out_len(&self) -> usize {
self.out_len as usize
}
}
impl Iterator for Iter {
type Item = Hash;
fn next(&mut self) -> Option<Self::Item> {
if self.out_len == self.max_out_len {
return None;
}
let len = min(self.max_out_len - self.out_len, 64);
let parameter_block = self.parameter_block.clone()
.set_digest_len(len as u8)
.set_node_offset((self.out_len as u64 / 64) | ((self.max_out_len as u64) << 32));
self.out_len += len;
Some(unbuffered::Blake2b::with_parameter_block(len as usize, parameter_block).finish(&self.block, self.block_len))
}
fn size_hint(&self) -> (usize, Option<usize>) {
let i = (self.max_out_len - self.out_len) as usize / 64 + if self.max_out_len % 64!= 0 { 1 } else { 0 };
(i, Some(i))
}
}
|
random_line_split
|
|
lib.rs
|
#![cfg(test)]
use ::rand::Rng;
use bincode_1::Options;
mod membership;
mod misc;
mod rand;
mod sway;
pub fn test_same_with_config<T, C, O>(t: &T, bincode_1_options: O, bincode_2_config: C)
where
T: bincode_2::Encode
+ bincode_2::Decode
+ serde::Serialize
+ serde::de::DeserializeOwned
+ core::fmt::Debug
+ PartialEq,
C: bincode_2::config::Config,
O: bincode_1::Options + Copy,
{
// This is what bincode 1 serializes to. This will be our comparison value.
let encoded = bincode_1_options.serialize(t).unwrap();
println!("Encoded {:?} as {:?}", t, encoded);
// Test bincode 2 encode
let bincode_2_output = bincode_2::encode_to_vec(t, bincode_2_config).unwrap();
assert_eq!(encoded, bincode_2_output, "{:?} serializes differently", t);
// Test bincode 2 serde serialize
let bincode_2_serde_output = bincode_2::serde::encode_to_vec(t, bincode_2_config).unwrap();
assert_eq!(
encoded, bincode_2_serde_output,
"{:?} serializes differently",
t
);
// Test bincode 1 deserialize
let decoded: T = bincode_1_options.deserialize(&encoded).unwrap();
assert_eq!(&decoded, t);
// Test bincode 2 decode
let decoded: T = bincode_2::decode_from_slice(&encoded, bincode_2_config)
.unwrap()
.0;
assert_eq!(&decoded, t);
// Test bincode 2 serde deserialize
let decoded: T = bincode_2::serde::decode_from_slice(&encoded, bincode_2_config)
.unwrap()
.0;
assert_eq!(&decoded, t);
}
pub fn test_same<T>(t: T)
where
T: bincode_2::Encode
+ bincode_2::Decode
+ serde::Serialize
+ serde::de::DeserializeOwned
+ core::fmt::Debug
+ PartialEq,
|
&t,
bincode_1::options()
.with_little_endian()
.with_varint_encoding(),
bincode_2::config::legacy()
.with_little_endian()
.with_variable_int_encoding(),
);
test_same_with_config(
&t,
bincode_1::options()
.with_big_endian()
.with_fixint_encoding(),
bincode_2::config::legacy()
.with_big_endian()
.with_fixed_int_encoding(),
);
test_same_with_config(
&t,
bincode_1::options()
.with_little_endian()
.with_fixint_encoding(),
bincode_2::config::legacy()
.with_little_endian()
.with_fixed_int_encoding(),
);
}
pub fn gen_string(rng: &mut impl Rng) -> String {
let len = rng.gen_range(0..100usize);
let mut result = String::with_capacity(len * 4);
for _ in 0..len {
result.push(rng.gen_range('\0'..char::MAX));
}
result
}
|
{
test_same_with_config(
&t,
// This is the config used internally by bincode 1
bincode_1::options().with_fixint_encoding(),
// Should match `::legacy()`
bincode_2::config::legacy(),
);
// Check a bunch of different configs:
test_same_with_config(
&t,
bincode_1::options()
.with_big_endian()
.with_varint_encoding(),
bincode_2::config::legacy()
.with_big_endian()
.with_variable_int_encoding(),
);
test_same_with_config(
|
identifier_body
|
lib.rs
|
#![cfg(test)]
use ::rand::Rng;
use bincode_1::Options;
mod membership;
mod misc;
mod rand;
mod sway;
pub fn test_same_with_config<T, C, O>(t: &T, bincode_1_options: O, bincode_2_config: C)
where
T: bincode_2::Encode
+ bincode_2::Decode
+ serde::Serialize
+ serde::de::DeserializeOwned
+ core::fmt::Debug
+ PartialEq,
C: bincode_2::config::Config,
O: bincode_1::Options + Copy,
{
// This is what bincode 1 serializes to. This will be our comparison value.
let encoded = bincode_1_options.serialize(t).unwrap();
println!("Encoded {:?} as {:?}", t, encoded);
// Test bincode 2 encode
let bincode_2_output = bincode_2::encode_to_vec(t, bincode_2_config).unwrap();
assert_eq!(encoded, bincode_2_output, "{:?} serializes differently", t);
// Test bincode 2 serde serialize
let bincode_2_serde_output = bincode_2::serde::encode_to_vec(t, bincode_2_config).unwrap();
assert_eq!(
encoded, bincode_2_serde_output,
"{:?} serializes differently",
t
);
// Test bincode 1 deserialize
let decoded: T = bincode_1_options.deserialize(&encoded).unwrap();
assert_eq!(&decoded, t);
// Test bincode 2 decode
let decoded: T = bincode_2::decode_from_slice(&encoded, bincode_2_config)
.unwrap()
.0;
assert_eq!(&decoded, t);
// Test bincode 2 serde deserialize
let decoded: T = bincode_2::serde::decode_from_slice(&encoded, bincode_2_config)
.unwrap()
.0;
assert_eq!(&decoded, t);
}
pub fn test_same<T>(t: T)
where
T: bincode_2::Encode
+ bincode_2::Decode
+ serde::Serialize
+ serde::de::DeserializeOwned
+ core::fmt::Debug
+ PartialEq,
{
test_same_with_config(
&t,
// This is the config used internally by bincode 1
bincode_1::options().with_fixint_encoding(),
// Should match `::legacy()`
bincode_2::config::legacy(),
);
// Check a bunch of different configs:
test_same_with_config(
&t,
bincode_1::options()
.with_big_endian()
.with_varint_encoding(),
|
.with_big_endian()
.with_variable_int_encoding(),
);
test_same_with_config(
&t,
bincode_1::options()
.with_little_endian()
.with_varint_encoding(),
bincode_2::config::legacy()
.with_little_endian()
.with_variable_int_encoding(),
);
test_same_with_config(
&t,
bincode_1::options()
.with_big_endian()
.with_fixint_encoding(),
bincode_2::config::legacy()
.with_big_endian()
.with_fixed_int_encoding(),
);
test_same_with_config(
&t,
bincode_1::options()
.with_little_endian()
.with_fixint_encoding(),
bincode_2::config::legacy()
.with_little_endian()
.with_fixed_int_encoding(),
);
}
pub fn gen_string(rng: &mut impl Rng) -> String {
let len = rng.gen_range(0..100usize);
let mut result = String::with_capacity(len * 4);
for _ in 0..len {
result.push(rng.gen_range('\0'..char::MAX));
}
result
}
|
bincode_2::config::legacy()
|
random_line_split
|
lib.rs
|
#![cfg(test)]
use ::rand::Rng;
use bincode_1::Options;
mod membership;
mod misc;
mod rand;
mod sway;
pub fn test_same_with_config<T, C, O>(t: &T, bincode_1_options: O, bincode_2_config: C)
where
T: bincode_2::Encode
+ bincode_2::Decode
+ serde::Serialize
+ serde::de::DeserializeOwned
+ core::fmt::Debug
+ PartialEq,
C: bincode_2::config::Config,
O: bincode_1::Options + Copy,
{
// This is what bincode 1 serializes to. This will be our comparison value.
let encoded = bincode_1_options.serialize(t).unwrap();
println!("Encoded {:?} as {:?}", t, encoded);
// Test bincode 2 encode
let bincode_2_output = bincode_2::encode_to_vec(t, bincode_2_config).unwrap();
assert_eq!(encoded, bincode_2_output, "{:?} serializes differently", t);
// Test bincode 2 serde serialize
let bincode_2_serde_output = bincode_2::serde::encode_to_vec(t, bincode_2_config).unwrap();
assert_eq!(
encoded, bincode_2_serde_output,
"{:?} serializes differently",
t
);
// Test bincode 1 deserialize
let decoded: T = bincode_1_options.deserialize(&encoded).unwrap();
assert_eq!(&decoded, t);
// Test bincode 2 decode
let decoded: T = bincode_2::decode_from_slice(&encoded, bincode_2_config)
.unwrap()
.0;
assert_eq!(&decoded, t);
// Test bincode 2 serde deserialize
let decoded: T = bincode_2::serde::decode_from_slice(&encoded, bincode_2_config)
.unwrap()
.0;
assert_eq!(&decoded, t);
}
pub fn test_same<T>(t: T)
where
T: bincode_2::Encode
+ bincode_2::Decode
+ serde::Serialize
+ serde::de::DeserializeOwned
+ core::fmt::Debug
+ PartialEq,
{
test_same_with_config(
&t,
// This is the config used internally by bincode 1
bincode_1::options().with_fixint_encoding(),
// Should match `::legacy()`
bincode_2::config::legacy(),
);
// Check a bunch of different configs:
test_same_with_config(
&t,
bincode_1::options()
.with_big_endian()
.with_varint_encoding(),
bincode_2::config::legacy()
.with_big_endian()
.with_variable_int_encoding(),
);
test_same_with_config(
&t,
bincode_1::options()
.with_little_endian()
.with_varint_encoding(),
bincode_2::config::legacy()
.with_little_endian()
.with_variable_int_encoding(),
);
test_same_with_config(
&t,
bincode_1::options()
.with_big_endian()
.with_fixint_encoding(),
bincode_2::config::legacy()
.with_big_endian()
.with_fixed_int_encoding(),
);
test_same_with_config(
&t,
bincode_1::options()
.with_little_endian()
.with_fixint_encoding(),
bincode_2::config::legacy()
.with_little_endian()
.with_fixed_int_encoding(),
);
}
pub fn
|
(rng: &mut impl Rng) -> String {
let len = rng.gen_range(0..100usize);
let mut result = String::with_capacity(len * 4);
for _ in 0..len {
result.push(rng.gen_range('\0'..char::MAX));
}
result
}
|
gen_string
|
identifier_name
|
iter-any.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn is_even(x: &uint) -> bool { (*x % 2) == 0 }
|
assert!(!old_iter::any(&Some(1u), is_even));
assert!(old_iter::any(&Some(2u), is_even));
assert!(!old_iter::any(&None::<uint>, is_even));
}
|
pub fn main() {
assert!(![1u, 3u].any(is_even));
assert!([1u, 2u].any(is_even));
assert!(![].any(is_even));
|
random_line_split
|
iter-any.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn is_even(x: &uint) -> bool { (*x % 2) == 0 }
pub fn main()
|
{
assert!(![1u, 3u].any(is_even));
assert!([1u, 2u].any(is_even));
assert!(![].any(is_even));
assert!(!old_iter::any(&Some(1u), is_even));
assert!(old_iter::any(&Some(2u), is_even));
assert!(!old_iter::any(&None::<uint>, is_even));
}
|
identifier_body
|
|
iter-any.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn
|
(x: &uint) -> bool { (*x % 2) == 0 }
pub fn main() {
assert!(![1u, 3u].any(is_even));
assert!([1u, 2u].any(is_even));
assert!(![].any(is_even));
assert!(!old_iter::any(&Some(1u), is_even));
assert!(old_iter::any(&Some(2u), is_even));
assert!(!old_iter::any(&None::<uint>, is_even));
}
|
is_even
|
identifier_name
|
mod.rs
|
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage Transactions
pub mod commands;
pub mod sched_pool;
pub mod scheduler;
mod actions;
pub use actions::{
acquire_pessimistic_lock::acquire_pessimistic_lock,
cleanup::cleanup,
commit::commit,
gc::gc,
prewrite::{prewrite, CommitKind, TransactionKind, TransactionProperties},
};
mod latch;
mod store;
use crate::storage::{
types::{MvccInfo, PessimisticLockRes, PrewriteResult, SecondaryLocksStatus, TxnStatus},
Error as StorageError, Result as StorageResult,
};
use error_code::{self, ErrorCode, ErrorCodeExt};
use kvproto::kvrpcpb::LockInfo;
use std::error;
use std::fmt;
use std::io::Error as IoError;
use txn_types::{Key, TimeStamp};
pub use self::commands::{Command, RESOLVE_LOCK_BATCH_SIZE};
pub use self::scheduler::Scheduler;
pub use self::store::{
EntryBatch, FixtureStore, FixtureStoreScanner, Scanner, SnapshotStore, Store, TxnEntry,
TxnEntryScanner, TxnEntryStore,
};
/// Process result of a command.
#[derive(Debug)]
pub enum ProcessResult {
Res,
MultiRes {
results: Vec<StorageResult<()>>,
},
PrewriteResult {
result: PrewriteResult,
},
MvccKey {
mvcc: MvccInfo,
},
MvccStartTs {
mvcc: Option<(Key, MvccInfo)>,
},
Locks {
locks: Vec<LockInfo>,
},
TxnStatus {
txn_status: TxnStatus,
},
NextCommand {
cmd: Command,
},
Failed {
err: StorageError,
},
PessimisticLockRes {
res: StorageResult<PessimisticLockRes>,
},
SecondaryLocksStatus {
status: SecondaryLocksStatus,
},
}
impl ProcessResult {
pub fn maybe_clone(&self) -> Option<ProcessResult> {
match self {
ProcessResult::PessimisticLockRes { res: Ok(r) } => {
Some(ProcessResult::PessimisticLockRes { res: Ok(r.clone()) })
}
_ => None,
}
}
}
quick_error! {
#[derive(Debug)]
pub enum ErrorInner {
Engine(err: crate::storage::kv::Error) {
from()
cause(err)
display("{}", err)
}
Codec(err: tikv_util::codec::Error) {
from()
cause(err)
display("{}", err)
}
ProtoBuf(err: protobuf::error::ProtobufError) {
from()
cause(err)
display("{}", err)
}
Mvcc(err: crate::storage::mvcc::Error) {
from()
cause(err)
display("{}", err)
}
Other(err: Box<dyn error::Error + Sync + Send>) {
from()
cause(err.as_ref())
display("{:?}", err)
}
Io(err: IoError) {
from()
cause(err)
display("{}", err)
}
InvalidTxnTso {start_ts: TimeStamp, commit_ts: TimeStamp} {
display("Invalid transaction tso with start_ts:{},commit_ts:{}",
start_ts,
commit_ts)
}
InvalidReqRange {start: Option<Vec<u8>>,
end: Option<Vec<u8>>,
lower_bound: Option<Vec<u8>>,
upper_bound: Option<Vec<u8>>} {
display("Request range exceeds bound, request range:[{}, end:{}), physical bound:[{}, {})",
start.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
end.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
lower_bound.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
upper_bound.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()))
}
MaxTimestampNotSynced { region_id: u64, start_ts: TimeStamp } {
display("Prewrite for async commit fails due to potentially stale max timestamp, start_ts: {}, region_id: {}",
start_ts,
region_id)
}
}
}
impl ErrorInner {
pub fn maybe_clone(&self) -> Option<ErrorInner> {
match *self {
ErrorInner::Engine(ref e) => e.maybe_clone().map(ErrorInner::Engine),
ErrorInner::Codec(ref e) => e.maybe_clone().map(ErrorInner::Codec),
ErrorInner::Mvcc(ref e) => e.maybe_clone().map(ErrorInner::Mvcc),
ErrorInner::InvalidTxnTso {
start_ts,
commit_ts,
} => Some(ErrorInner::InvalidTxnTso {
start_ts,
commit_ts,
}),
ErrorInner::InvalidReqRange {
ref start,
ref end,
ref lower_bound,
ref upper_bound,
} => Some(ErrorInner::InvalidReqRange {
start: start.clone(),
end: end.clone(),
lower_bound: lower_bound.clone(),
upper_bound: upper_bound.clone(),
}),
ErrorInner::MaxTimestampNotSynced {
region_id,
start_ts,
} => Some(ErrorInner::MaxTimestampNotSynced {
region_id,
start_ts,
}),
ErrorInner::Other(_) | ErrorInner::ProtoBuf(_) | ErrorInner::Io(_) => None,
}
}
}
pub struct Error(pub Box<ErrorInner>);
impl Error {
pub fn maybe_clone(&self) -> Option<Error> {
self.0.maybe_clone().map(Error::from)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, f)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)>
|
}
impl From<ErrorInner> for Error {
#[inline]
fn from(e: ErrorInner) -> Self {
Error(Box::new(e))
}
}
impl<T: Into<ErrorInner>> From<T> for Error {
#[inline]
default fn from(err: T) -> Self {
let err = err.into();
err.into()
}
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorCodeExt for Error {
fn error_code(&self) -> ErrorCode {
match self.0.as_ref() {
ErrorInner::Engine(e) => e.error_code(),
ErrorInner::Codec(e) => e.error_code(),
ErrorInner::ProtoBuf(_) => error_code::storage::PROTOBUF,
ErrorInner::Mvcc(e) => e.error_code(),
ErrorInner::Other(_) => error_code::storage::UNKNOWN,
ErrorInner::Io(_) => error_code::storage::IO,
ErrorInner::InvalidTxnTso {.. } => error_code::storage::INVALID_TXN_TSO,
ErrorInner::InvalidReqRange {.. } => error_code::storage::INVALID_REQ_RANGE,
ErrorInner::MaxTimestampNotSynced {.. } => {
error_code::storage::MAX_TIMESTAMP_NOT_SYNCED
}
}
}
}
pub mod tests {
use super::*;
pub use actions::acquire_pessimistic_lock::tests::{
must_err as must_acquire_pessimistic_lock_err,
must_err_return_value as must_acquire_pessimistic_lock_return_value_err,
must_pessimistic_locked, must_succeed as must_acquire_pessimistic_lock,
must_succeed_for_large_txn as must_acquire_pessimistic_lock_for_large_txn,
must_succeed_impl as must_acquire_pessimistic_lock_impl,
must_succeed_return_value as must_acquire_pessimistic_lock_return_value,
must_succeed_with_ttl as must_acquire_pessimistic_lock_with_ttl,
};
pub use actions::cleanup::tests::{
must_cleanup_with_gc_fence, must_err as must_cleanup_err, must_succeed as must_cleanup,
};
pub use actions::commit::tests::{must_err as must_commit_err, must_succeed as must_commit};
pub use actions::gc::tests::must_succeed as must_gc;
pub use actions::prewrite::tests::{
try_pessimistic_prewrite_check_not_exists, try_prewrite_check_not_exists,
try_prewrite_insert,
};
pub use actions::tests::*;
}
|
{
std::error::Error::source(&self.0)
}
|
identifier_body
|
mod.rs
|
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage Transactions
pub mod commands;
pub mod sched_pool;
pub mod scheduler;
mod actions;
pub use actions::{
acquire_pessimistic_lock::acquire_pessimistic_lock,
cleanup::cleanup,
commit::commit,
gc::gc,
prewrite::{prewrite, CommitKind, TransactionKind, TransactionProperties},
};
mod latch;
mod store;
use crate::storage::{
types::{MvccInfo, PessimisticLockRes, PrewriteResult, SecondaryLocksStatus, TxnStatus},
Error as StorageError, Result as StorageResult,
};
use error_code::{self, ErrorCode, ErrorCodeExt};
use kvproto::kvrpcpb::LockInfo;
use std::error;
use std::fmt;
use std::io::Error as IoError;
use txn_types::{Key, TimeStamp};
pub use self::commands::{Command, RESOLVE_LOCK_BATCH_SIZE};
pub use self::scheduler::Scheduler;
pub use self::store::{
EntryBatch, FixtureStore, FixtureStoreScanner, Scanner, SnapshotStore, Store, TxnEntry,
TxnEntryScanner, TxnEntryStore,
};
/// Process result of a command.
#[derive(Debug)]
pub enum ProcessResult {
Res,
MultiRes {
results: Vec<StorageResult<()>>,
},
PrewriteResult {
result: PrewriteResult,
},
MvccKey {
mvcc: MvccInfo,
},
MvccStartTs {
mvcc: Option<(Key, MvccInfo)>,
},
Locks {
locks: Vec<LockInfo>,
},
TxnStatus {
txn_status: TxnStatus,
},
NextCommand {
cmd: Command,
},
Failed {
err: StorageError,
},
PessimisticLockRes {
res: StorageResult<PessimisticLockRes>,
},
SecondaryLocksStatus {
status: SecondaryLocksStatus,
},
}
impl ProcessResult {
pub fn maybe_clone(&self) -> Option<ProcessResult> {
match self {
ProcessResult::PessimisticLockRes { res: Ok(r) } => {
Some(ProcessResult::PessimisticLockRes { res: Ok(r.clone()) })
}
_ => None,
}
}
}
quick_error! {
#[derive(Debug)]
pub enum ErrorInner {
Engine(err: crate::storage::kv::Error) {
from()
cause(err)
display("{}", err)
}
Codec(err: tikv_util::codec::Error) {
from()
cause(err)
display("{}", err)
}
ProtoBuf(err: protobuf::error::ProtobufError) {
from()
cause(err)
display("{}", err)
}
Mvcc(err: crate::storage::mvcc::Error) {
from()
cause(err)
display("{}", err)
}
Other(err: Box<dyn error::Error + Sync + Send>) {
from()
cause(err.as_ref())
display("{:?}", err)
}
Io(err: IoError) {
from()
cause(err)
display("{}", err)
}
InvalidTxnTso {start_ts: TimeStamp, commit_ts: TimeStamp} {
display("Invalid transaction tso with start_ts:{},commit_ts:{}",
start_ts,
commit_ts)
}
InvalidReqRange {start: Option<Vec<u8>>,
end: Option<Vec<u8>>,
lower_bound: Option<Vec<u8>>,
upper_bound: Option<Vec<u8>>} {
display("Request range exceeds bound, request range:[{}, end:{}), physical bound:[{}, {})",
start.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
end.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
lower_bound.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
upper_bound.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()))
}
MaxTimestampNotSynced { region_id: u64, start_ts: TimeStamp } {
display("Prewrite for async commit fails due to potentially stale max timestamp, start_ts: {}, region_id: {}",
start_ts,
region_id)
}
}
}
impl ErrorInner {
pub fn maybe_clone(&self) -> Option<ErrorInner> {
match *self {
ErrorInner::Engine(ref e) => e.maybe_clone().map(ErrorInner::Engine),
ErrorInner::Codec(ref e) => e.maybe_clone().map(ErrorInner::Codec),
ErrorInner::Mvcc(ref e) => e.maybe_clone().map(ErrorInner::Mvcc),
ErrorInner::InvalidTxnTso {
start_ts,
commit_ts,
} => Some(ErrorInner::InvalidTxnTso {
start_ts,
commit_ts,
}),
ErrorInner::InvalidReqRange {
ref start,
ref end,
ref lower_bound,
ref upper_bound,
} => Some(ErrorInner::InvalidReqRange {
start: start.clone(),
end: end.clone(),
lower_bound: lower_bound.clone(),
upper_bound: upper_bound.clone(),
}),
ErrorInner::MaxTimestampNotSynced {
region_id,
start_ts,
} => Some(ErrorInner::MaxTimestampNotSynced {
region_id,
start_ts,
}),
ErrorInner::Other(_) | ErrorInner::ProtoBuf(_) | ErrorInner::Io(_) => None,
}
}
}
pub struct Error(pub Box<ErrorInner>);
impl Error {
pub fn maybe_clone(&self) -> Option<Error> {
self.0.maybe_clone().map(Error::from)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, f)
}
}
impl fmt::Display for Error {
fn
|
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
std::error::Error::source(&self.0)
}
}
impl From<ErrorInner> for Error {
#[inline]
fn from(e: ErrorInner) -> Self {
Error(Box::new(e))
}
}
impl<T: Into<ErrorInner>> From<T> for Error {
#[inline]
default fn from(err: T) -> Self {
let err = err.into();
err.into()
}
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorCodeExt for Error {
fn error_code(&self) -> ErrorCode {
match self.0.as_ref() {
ErrorInner::Engine(e) => e.error_code(),
ErrorInner::Codec(e) => e.error_code(),
ErrorInner::ProtoBuf(_) => error_code::storage::PROTOBUF,
ErrorInner::Mvcc(e) => e.error_code(),
ErrorInner::Other(_) => error_code::storage::UNKNOWN,
ErrorInner::Io(_) => error_code::storage::IO,
ErrorInner::InvalidTxnTso {.. } => error_code::storage::INVALID_TXN_TSO,
ErrorInner::InvalidReqRange {.. } => error_code::storage::INVALID_REQ_RANGE,
ErrorInner::MaxTimestampNotSynced {.. } => {
error_code::storage::MAX_TIMESTAMP_NOT_SYNCED
}
}
}
}
pub mod tests {
use super::*;
pub use actions::acquire_pessimistic_lock::tests::{
must_err as must_acquire_pessimistic_lock_err,
must_err_return_value as must_acquire_pessimistic_lock_return_value_err,
must_pessimistic_locked, must_succeed as must_acquire_pessimistic_lock,
must_succeed_for_large_txn as must_acquire_pessimistic_lock_for_large_txn,
must_succeed_impl as must_acquire_pessimistic_lock_impl,
must_succeed_return_value as must_acquire_pessimistic_lock_return_value,
must_succeed_with_ttl as must_acquire_pessimistic_lock_with_ttl,
};
pub use actions::cleanup::tests::{
must_cleanup_with_gc_fence, must_err as must_cleanup_err, must_succeed as must_cleanup,
};
pub use actions::commit::tests::{must_err as must_commit_err, must_succeed as must_commit};
pub use actions::gc::tests::must_succeed as must_gc;
pub use actions::prewrite::tests::{
try_pessimistic_prewrite_check_not_exists, try_prewrite_check_not_exists,
try_prewrite_insert,
};
pub use actions::tests::*;
}
|
fmt
|
identifier_name
|
mod.rs
|
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
//! Storage Transactions
pub mod commands;
pub mod sched_pool;
pub mod scheduler;
mod actions;
pub use actions::{
acquire_pessimistic_lock::acquire_pessimistic_lock,
cleanup::cleanup,
commit::commit,
gc::gc,
prewrite::{prewrite, CommitKind, TransactionKind, TransactionProperties},
};
mod latch;
mod store;
use crate::storage::{
types::{MvccInfo, PessimisticLockRes, PrewriteResult, SecondaryLocksStatus, TxnStatus},
Error as StorageError, Result as StorageResult,
};
use error_code::{self, ErrorCode, ErrorCodeExt};
use kvproto::kvrpcpb::LockInfo;
use std::error;
use std::fmt;
use std::io::Error as IoError;
use txn_types::{Key, TimeStamp};
pub use self::commands::{Command, RESOLVE_LOCK_BATCH_SIZE};
pub use self::scheduler::Scheduler;
pub use self::store::{
EntryBatch, FixtureStore, FixtureStoreScanner, Scanner, SnapshotStore, Store, TxnEntry,
TxnEntryScanner, TxnEntryStore,
};
/// Process result of a command.
#[derive(Debug)]
pub enum ProcessResult {
Res,
MultiRes {
results: Vec<StorageResult<()>>,
},
PrewriteResult {
result: PrewriteResult,
},
MvccKey {
mvcc: MvccInfo,
},
MvccStartTs {
mvcc: Option<(Key, MvccInfo)>,
},
Locks {
locks: Vec<LockInfo>,
},
TxnStatus {
txn_status: TxnStatus,
},
NextCommand {
cmd: Command,
},
Failed {
err: StorageError,
},
PessimisticLockRes {
res: StorageResult<PessimisticLockRes>,
},
SecondaryLocksStatus {
status: SecondaryLocksStatus,
},
}
|
pub fn maybe_clone(&self) -> Option<ProcessResult> {
match self {
ProcessResult::PessimisticLockRes { res: Ok(r) } => {
Some(ProcessResult::PessimisticLockRes { res: Ok(r.clone()) })
}
_ => None,
}
}
}
quick_error! {
#[derive(Debug)]
pub enum ErrorInner {
Engine(err: crate::storage::kv::Error) {
from()
cause(err)
display("{}", err)
}
Codec(err: tikv_util::codec::Error) {
from()
cause(err)
display("{}", err)
}
ProtoBuf(err: protobuf::error::ProtobufError) {
from()
cause(err)
display("{}", err)
}
Mvcc(err: crate::storage::mvcc::Error) {
from()
cause(err)
display("{}", err)
}
Other(err: Box<dyn error::Error + Sync + Send>) {
from()
cause(err.as_ref())
display("{:?}", err)
}
Io(err: IoError) {
from()
cause(err)
display("{}", err)
}
InvalidTxnTso {start_ts: TimeStamp, commit_ts: TimeStamp} {
display("Invalid transaction tso with start_ts:{},commit_ts:{}",
start_ts,
commit_ts)
}
InvalidReqRange {start: Option<Vec<u8>>,
end: Option<Vec<u8>>,
lower_bound: Option<Vec<u8>>,
upper_bound: Option<Vec<u8>>} {
display("Request range exceeds bound, request range:[{}, end:{}), physical bound:[{}, {})",
start.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
end.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
lower_bound.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()),
upper_bound.as_ref().map(|x| &x[..]).map(log_wrappers::Value::key).map(|x| format!("{:?}", x)).unwrap_or_else(|| "(none)".to_owned()))
}
MaxTimestampNotSynced { region_id: u64, start_ts: TimeStamp } {
display("Prewrite for async commit fails due to potentially stale max timestamp, start_ts: {}, region_id: {}",
start_ts,
region_id)
}
}
}
impl ErrorInner {
pub fn maybe_clone(&self) -> Option<ErrorInner> {
match *self {
ErrorInner::Engine(ref e) => e.maybe_clone().map(ErrorInner::Engine),
ErrorInner::Codec(ref e) => e.maybe_clone().map(ErrorInner::Codec),
ErrorInner::Mvcc(ref e) => e.maybe_clone().map(ErrorInner::Mvcc),
ErrorInner::InvalidTxnTso {
start_ts,
commit_ts,
} => Some(ErrorInner::InvalidTxnTso {
start_ts,
commit_ts,
}),
ErrorInner::InvalidReqRange {
ref start,
ref end,
ref lower_bound,
ref upper_bound,
} => Some(ErrorInner::InvalidReqRange {
start: start.clone(),
end: end.clone(),
lower_bound: lower_bound.clone(),
upper_bound: upper_bound.clone(),
}),
ErrorInner::MaxTimestampNotSynced {
region_id,
start_ts,
} => Some(ErrorInner::MaxTimestampNotSynced {
region_id,
start_ts,
}),
ErrorInner::Other(_) | ErrorInner::ProtoBuf(_) | ErrorInner::Io(_) => None,
}
}
}
pub struct Error(pub Box<ErrorInner>);
impl Error {
pub fn maybe_clone(&self) -> Option<Error> {
self.0.maybe_clone().map(Error::from)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, f)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
std::error::Error::source(&self.0)
}
}
impl From<ErrorInner> for Error {
#[inline]
fn from(e: ErrorInner) -> Self {
Error(Box::new(e))
}
}
impl<T: Into<ErrorInner>> From<T> for Error {
#[inline]
default fn from(err: T) -> Self {
let err = err.into();
err.into()
}
}
pub type Result<T> = std::result::Result<T, Error>;
impl ErrorCodeExt for Error {
fn error_code(&self) -> ErrorCode {
match self.0.as_ref() {
ErrorInner::Engine(e) => e.error_code(),
ErrorInner::Codec(e) => e.error_code(),
ErrorInner::ProtoBuf(_) => error_code::storage::PROTOBUF,
ErrorInner::Mvcc(e) => e.error_code(),
ErrorInner::Other(_) => error_code::storage::UNKNOWN,
ErrorInner::Io(_) => error_code::storage::IO,
ErrorInner::InvalidTxnTso {.. } => error_code::storage::INVALID_TXN_TSO,
ErrorInner::InvalidReqRange {.. } => error_code::storage::INVALID_REQ_RANGE,
ErrorInner::MaxTimestampNotSynced {.. } => {
error_code::storage::MAX_TIMESTAMP_NOT_SYNCED
}
}
}
}
pub mod tests {
use super::*;
pub use actions::acquire_pessimistic_lock::tests::{
must_err as must_acquire_pessimistic_lock_err,
must_err_return_value as must_acquire_pessimistic_lock_return_value_err,
must_pessimistic_locked, must_succeed as must_acquire_pessimistic_lock,
must_succeed_for_large_txn as must_acquire_pessimistic_lock_for_large_txn,
must_succeed_impl as must_acquire_pessimistic_lock_impl,
must_succeed_return_value as must_acquire_pessimistic_lock_return_value,
must_succeed_with_ttl as must_acquire_pessimistic_lock_with_ttl,
};
pub use actions::cleanup::tests::{
must_cleanup_with_gc_fence, must_err as must_cleanup_err, must_succeed as must_cleanup,
};
pub use actions::commit::tests::{must_err as must_commit_err, must_succeed as must_commit};
pub use actions::gc::tests::must_succeed as must_gc;
pub use actions::prewrite::tests::{
try_pessimistic_prewrite_check_not_exists, try_prewrite_check_not_exists,
try_prewrite_insert,
};
pub use actions::tests::*;
}
|
impl ProcessResult {
|
random_line_split
|
mod.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
mod heartbeat;
mod lsp_notification_dispatch;
mod lsp_request_dispatch;
mod lsp_state;
mod lsp_state_resources;
|
completion::{on_completion, on_resolve_completion_item},
explore_schema_for_type::{on_explore_schema_for_type, ExploreSchemaForType},
goto_definition::{
on_get_source_location_of_type_definition, on_goto_definition,
GetSourceLocationOfTypeDefinition,
},
graphql_tools::on_graphql_execute_query,
graphql_tools::GraphQLExecuteQuery,
hover::on_hover,
js_language_server::JSLanguageServer,
lsp_process_error::LSPProcessResult,
lsp_runtime_error::LSPRuntimeError,
references::on_references,
resolved_types_at_location::{on_get_resolved_types_at_location, ResolvedTypesAtLocation},
search_schema_items::{on_search_schema_items, SearchSchemaItems},
server::{
lsp_state::handle_lsp_state_tasks, lsp_state_resources::LSPStateResources,
task_queue::TaskQueue,
},
shutdown::{on_exit, on_shutdown},
status_reporter::LSPStatusReporter,
status_updater::set_initializing_status,
text_documents::{
on_cancel, on_did_change_text_document, on_did_close_text_document,
on_did_open_text_document, on_did_save_text_document,
},
};
use common::{PerfLogEvent, PerfLogger};
use crossbeam::{channel::Receiver, select};
use log::debug;
pub use lsp_notification_dispatch::LSPNotificationDispatch;
pub use lsp_request_dispatch::LSPRequestDispatch;
use lsp_server::{
Connection, ErrorCode, Message, Notification, Response as ServerResponse, ResponseError,
};
use lsp_types::{
notification::{
Cancel, DidChangeTextDocument, DidCloseTextDocument, DidOpenTextDocument,
DidSaveTextDocument, Exit,
},
request::{
CodeActionRequest, Completion, GotoDefinition, HoverRequest, References,
ResolveCompletionItem, Shutdown,
},
CodeActionProviderCapability, CompletionOptions, InitializeParams, ServerCapabilities,
TextDocumentSyncCapability, TextDocumentSyncKind, WorkDoneProgressOptions,
};
use relay_compiler::{config::Config, NoopArtifactWriter};
use schema_documentation::{SchemaDocumentation, SchemaDocumentationLoader};
use std::sync::Arc;
pub use crate::LSPExtraDataProvider;
pub use lsp_state::{convert_diagnostic, GlobalState, LSPState, Schemas, SourcePrograms};
use heartbeat::{on_heartbeat, HeartbeatRequest};
use self::task_queue::TaskProcessor;
/// Initializes an LSP connection, handling the `initialize` message and `initialized` notification
/// handshake.
pub fn initialize(connection: &Connection) -> LSPProcessResult<InitializeParams> {
let server_capabilities = ServerCapabilities {
// Enable text document syncing so we can know when files are opened/changed/saved/closed
text_document_sync: Some(TextDocumentSyncCapability::Kind(TextDocumentSyncKind::Full)),
completion_provider: Some(CompletionOptions {
resolve_provider: Some(true),
trigger_characters: Some(vec!["(".into(), "\n".into(), ",".into(), "@".into()]),
work_done_progress_options: WorkDoneProgressOptions {
work_done_progress: None,
},
..Default::default()
}),
hover_provider: Some(lsp_types::HoverProviderCapability::Simple(true)),
definition_provider: Some(lsp_types::OneOf::Left(true)),
references_provider: Some(lsp_types::OneOf::Left(true)),
code_action_provider: Some(CodeActionProviderCapability::Simple(true)),
..Default::default()
};
let server_capabilities = serde_json::to_value(&server_capabilities)?;
let params = connection.initialize(server_capabilities)?;
let params: InitializeParams = serde_json::from_value(params)?;
Ok(params)
}
#[derive(Debug)]
pub enum Task {
InboundMessage(lsp_server::Message),
LSPState(lsp_state::Task),
}
/// Run the main server loop
pub async fn run<
TPerfLogger: PerfLogger +'static,
TSchemaDocumentation: SchemaDocumentation +'static,
>(
connection: Connection,
mut config: Config,
_params: InitializeParams,
perf_logger: Arc<TPerfLogger>,
extra_data_provider: Box<dyn LSPExtraDataProvider + Send + Sync>,
schema_documentation_loader: Option<Box<dyn SchemaDocumentationLoader<TSchemaDocumentation>>>,
js_resource: Option<
Box<dyn JSLanguageServer<TState = LSPState<TPerfLogger, TSchemaDocumentation>>>,
>,
) -> LSPProcessResult<()>
where
TPerfLogger: PerfLogger +'static,
{
debug!(
"Running language server with config root {:?}",
config.root_dir
);
set_initializing_status(&connection.sender);
let task_processor = LSPTaskProcessor;
let task_queue = TaskQueue::new(Arc::new(task_processor));
let task_scheduler = task_queue.get_scheduler();
config.artifact_writer = Box::new(NoopArtifactWriter);
config.status_reporter = Box::new(LSPStatusReporter::new(
config.root_dir.clone(),
connection.sender.clone(),
));
let lsp_state = Arc::new(LSPState::new(
Arc::new(config),
connection.sender.clone(),
Arc::clone(&task_scheduler),
Arc::clone(&perf_logger),
extra_data_provider,
schema_documentation_loader,
js_resource,
));
LSPStateResources::new(Arc::clone(&lsp_state)).watch();
while let Some(task) = next_task(&connection.receiver, &task_queue.receiver) {
task_queue.process(Arc::clone(&lsp_state), task);
}
panic!("Client exited without proper shutdown sequence.")
}
fn next_task(
lsp_receiver: &Receiver<Message>,
task_queue_receiver: &Receiver<Task>,
) -> Option<Task> {
select! {
recv(lsp_receiver) -> message => message.ok().map(Task::InboundMessage),
recv(task_queue_receiver) -> task => task.ok()
}
}
struct LSPTaskProcessor;
impl<TPerfLogger: PerfLogger +'static, TSchemaDocumentation: SchemaDocumentation +'static>
TaskProcessor<LSPState<TPerfLogger, TSchemaDocumentation>, Task> for LSPTaskProcessor
{
fn process(&self, state: Arc<LSPState<TPerfLogger, TSchemaDocumentation>>, task: Task) {
match task {
Task::InboundMessage(Message::Request(request)) => handle_request(state, request),
Task::InboundMessage(Message::Notification(notification)) => {
handle_notification(state, notification);
}
Task::LSPState(lsp_task) => {
handle_lsp_state_tasks(state, lsp_task);
}
Task::InboundMessage(Message::Response(_)) => {
// TODO: handle response from the client -> cancel message, etc
}
}
}
}
fn handle_request<TPerfLogger: PerfLogger +'static, TSchemaDocumentation: SchemaDocumentation>(
lsp_state: Arc<LSPState<TPerfLogger, TSchemaDocumentation>>,
request: lsp_server::Request,
) {
debug!("request received {:?}", request);
let get_server_response_bound = |req| dispatch_request(req, lsp_state.as_ref());
let get_response = with_request_logging(&lsp_state.perf_logger, get_server_response_bound);
lsp_state
.send_message(Message::Response(get_response(request)))
.expect("Unable to send message to a client.");
}
fn dispatch_request(request: lsp_server::Request, lsp_state: &impl GlobalState) -> ServerResponse {
let get_response = || -> Result<_, ServerResponse> {
let request = LSPRequestDispatch::new(request, lsp_state)
.on_request_sync::<ResolvedTypesAtLocation>(on_get_resolved_types_at_location)?
.on_request_sync::<SearchSchemaItems>(on_search_schema_items)?
.on_request_sync::<ExploreSchemaForType>(on_explore_schema_for_type)?
.on_request_sync::<GetSourceLocationOfTypeDefinition>(
on_get_source_location_of_type_definition,
)?
.on_request_sync::<HoverRequest>(on_hover)?
.on_request_sync::<GotoDefinition>(on_goto_definition)?
.on_request_sync::<References>(on_references)?
.on_request_sync::<Completion>(on_completion)?
.on_request_sync::<ResolveCompletionItem>(on_resolve_completion_item)?
.on_request_sync::<CodeActionRequest>(on_code_action)?
.on_request_sync::<Shutdown>(on_shutdown)?
.on_request_sync::<GraphQLExecuteQuery>(on_graphql_execute_query)?
.on_request_sync::<HeartbeatRequest>(on_heartbeat)?
.request();
// If we have gotten here, we have not handled the request
Ok(ServerResponse {
id: request.id,
result: None,
error: Some(ResponseError {
code: ErrorCode::MethodNotFound as i32,
data: None,
message: format!("No handler registered for method '{}'", request.method),
}),
})
};
get_response().unwrap_or_else(|response| response)
}
fn with_request_logging<'a, TPerfLogger: PerfLogger +'static>(
perf_logger: &'a Arc<TPerfLogger>,
get_response: impl FnOnce(lsp_server::Request) -> ServerResponse + 'a,
) -> impl FnOnce(lsp_server::Request) -> ServerResponse + 'a {
move |request| {
let lsp_request_event = perf_logger.create_event("lsp_message");
lsp_request_event.string("lsp_method", request.method.clone());
lsp_request_event.string("lsp_type", "request".to_string());
let lsp_request_processing_time = lsp_request_event.start("lsp_message_processing_time");
let response = get_response(request);
if response.result.is_some() {
lsp_request_event.string("lsp_outcome", "success".to_string());
} else if let Some(error) = &response.error {
if error.code == ErrorCode::RequestCanceled as i32 {
lsp_request_event.string("lsp_outcome", "canceled".to_string());
} else {
lsp_request_event.string("lsp_outcome", "error".to_string());
lsp_request_event.string("lsp_error_message", error.message.to_string());
if let Some(data) = &error.data {
lsp_request_event.string("lsp_error_data", data.to_string());
}
}
}
// N.B. we don't handle the case where the ServerResponse has neither a result nor
// an error, which is an invalid state.
lsp_request_event.stop(lsp_request_processing_time);
lsp_request_event.complete();
response
}
}
fn handle_notification<
TPerfLogger: PerfLogger +'static,
TSchemaDocumentation: SchemaDocumentation,
>(
lsp_state: Arc<LSPState<TPerfLogger, TSchemaDocumentation>>,
notification: Notification,
) {
debug!("notification received {:?}", notification);
let lsp_notification_event = lsp_state.perf_logger.create_event("lsp_message");
lsp_notification_event.string("lsp_method", notification.method.clone());
lsp_notification_event.string("lsp_type", "notification".to_string());
let lsp_notification_processing_time =
lsp_notification_event.start("lsp_message_processing_time");
let notification_result = dispatch_notification(notification, lsp_state.as_ref());
match notification_result {
Ok(()) => {
// The notification is not handled
lsp_notification_event.string("lsp_outcome", "error".to_string());
}
Err(err) => {
if let Some(err) = err {
lsp_notification_event.string("lsp_outcome", "error".to_string());
if let LSPRuntimeError::UnexpectedError(message) = err {
lsp_notification_event.string("lsp_error_message", message);
}
} else {
lsp_notification_event.string("lsp_outcome", "success".to_string());
}
}
}
lsp_notification_event.stop(lsp_notification_processing_time);
lsp_notification_event.complete();
}
fn dispatch_notification(
notification: lsp_server::Notification,
lsp_state: &impl GlobalState,
) -> Result<(), Option<LSPRuntimeError>> {
let notification = LSPNotificationDispatch::new(notification, lsp_state)
.on_notification_sync::<DidOpenTextDocument>(on_did_open_text_document)?
.on_notification_sync::<DidCloseTextDocument>(on_did_close_text_document)?
.on_notification_sync::<DidChangeTextDocument>(on_did_change_text_document)?
.on_notification_sync::<DidSaveTextDocument>(on_did_save_text_document)?
.on_notification_sync::<Cancel>(on_cancel)?
.on_notification_sync::<Exit>(on_exit)?
.notification();
// If we have gotten here, we have not handled the notification
debug!(
"Error: no handler registered for notification '{}'",
notification.method
);
Ok(())
}
|
mod task_queue;
use crate::{
code_action::on_code_action,
|
random_line_split
|
mod.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
mod heartbeat;
mod lsp_notification_dispatch;
mod lsp_request_dispatch;
mod lsp_state;
mod lsp_state_resources;
mod task_queue;
use crate::{
code_action::on_code_action,
completion::{on_completion, on_resolve_completion_item},
explore_schema_for_type::{on_explore_schema_for_type, ExploreSchemaForType},
goto_definition::{
on_get_source_location_of_type_definition, on_goto_definition,
GetSourceLocationOfTypeDefinition,
},
graphql_tools::on_graphql_execute_query,
graphql_tools::GraphQLExecuteQuery,
hover::on_hover,
js_language_server::JSLanguageServer,
lsp_process_error::LSPProcessResult,
lsp_runtime_error::LSPRuntimeError,
references::on_references,
resolved_types_at_location::{on_get_resolved_types_at_location, ResolvedTypesAtLocation},
search_schema_items::{on_search_schema_items, SearchSchemaItems},
server::{
lsp_state::handle_lsp_state_tasks, lsp_state_resources::LSPStateResources,
task_queue::TaskQueue,
},
shutdown::{on_exit, on_shutdown},
status_reporter::LSPStatusReporter,
status_updater::set_initializing_status,
text_documents::{
on_cancel, on_did_change_text_document, on_did_close_text_document,
on_did_open_text_document, on_did_save_text_document,
},
};
use common::{PerfLogEvent, PerfLogger};
use crossbeam::{channel::Receiver, select};
use log::debug;
pub use lsp_notification_dispatch::LSPNotificationDispatch;
pub use lsp_request_dispatch::LSPRequestDispatch;
use lsp_server::{
Connection, ErrorCode, Message, Notification, Response as ServerResponse, ResponseError,
};
use lsp_types::{
notification::{
Cancel, DidChangeTextDocument, DidCloseTextDocument, DidOpenTextDocument,
DidSaveTextDocument, Exit,
},
request::{
CodeActionRequest, Completion, GotoDefinition, HoverRequest, References,
ResolveCompletionItem, Shutdown,
},
CodeActionProviderCapability, CompletionOptions, InitializeParams, ServerCapabilities,
TextDocumentSyncCapability, TextDocumentSyncKind, WorkDoneProgressOptions,
};
use relay_compiler::{config::Config, NoopArtifactWriter};
use schema_documentation::{SchemaDocumentation, SchemaDocumentationLoader};
use std::sync::Arc;
pub use crate::LSPExtraDataProvider;
pub use lsp_state::{convert_diagnostic, GlobalState, LSPState, Schemas, SourcePrograms};
use heartbeat::{on_heartbeat, HeartbeatRequest};
use self::task_queue::TaskProcessor;
/// Initializes an LSP connection, handling the `initialize` message and `initialized` notification
/// handshake.
pub fn initialize(connection: &Connection) -> LSPProcessResult<InitializeParams>
|
let server_capabilities = serde_json::to_value(&server_capabilities)?;
let params = connection.initialize(server_capabilities)?;
let params: InitializeParams = serde_json::from_value(params)?;
Ok(params)
}
#[derive(Debug)]
pub enum Task {
InboundMessage(lsp_server::Message),
LSPState(lsp_state::Task),
}
/// Run the main server loop
pub async fn run<
TPerfLogger: PerfLogger +'static,
TSchemaDocumentation: SchemaDocumentation +'static,
>(
connection: Connection,
mut config: Config,
_params: InitializeParams,
perf_logger: Arc<TPerfLogger>,
extra_data_provider: Box<dyn LSPExtraDataProvider + Send + Sync>,
schema_documentation_loader: Option<Box<dyn SchemaDocumentationLoader<TSchemaDocumentation>>>,
js_resource: Option<
Box<dyn JSLanguageServer<TState = LSPState<TPerfLogger, TSchemaDocumentation>>>,
>,
) -> LSPProcessResult<()>
where
TPerfLogger: PerfLogger +'static,
{
debug!(
"Running language server with config root {:?}",
config.root_dir
);
set_initializing_status(&connection.sender);
let task_processor = LSPTaskProcessor;
let task_queue = TaskQueue::new(Arc::new(task_processor));
let task_scheduler = task_queue.get_scheduler();
config.artifact_writer = Box::new(NoopArtifactWriter);
config.status_reporter = Box::new(LSPStatusReporter::new(
config.root_dir.clone(),
connection.sender.clone(),
));
let lsp_state = Arc::new(LSPState::new(
Arc::new(config),
connection.sender.clone(),
Arc::clone(&task_scheduler),
Arc::clone(&perf_logger),
extra_data_provider,
schema_documentation_loader,
js_resource,
));
LSPStateResources::new(Arc::clone(&lsp_state)).watch();
while let Some(task) = next_task(&connection.receiver, &task_queue.receiver) {
task_queue.process(Arc::clone(&lsp_state), task);
}
panic!("Client exited without proper shutdown sequence.")
}
fn next_task(
lsp_receiver: &Receiver<Message>,
task_queue_receiver: &Receiver<Task>,
) -> Option<Task> {
select! {
recv(lsp_receiver) -> message => message.ok().map(Task::InboundMessage),
recv(task_queue_receiver) -> task => task.ok()
}
}
struct LSPTaskProcessor;
impl<TPerfLogger: PerfLogger +'static, TSchemaDocumentation: SchemaDocumentation +'static>
TaskProcessor<LSPState<TPerfLogger, TSchemaDocumentation>, Task> for LSPTaskProcessor
{
fn process(&self, state: Arc<LSPState<TPerfLogger, TSchemaDocumentation>>, task: Task) {
match task {
Task::InboundMessage(Message::Request(request)) => handle_request(state, request),
Task::InboundMessage(Message::Notification(notification)) => {
handle_notification(state, notification);
}
Task::LSPState(lsp_task) => {
handle_lsp_state_tasks(state, lsp_task);
}
Task::InboundMessage(Message::Response(_)) => {
// TODO: handle response from the client -> cancel message, etc
}
}
}
}
fn handle_request<TPerfLogger: PerfLogger +'static, TSchemaDocumentation: SchemaDocumentation>(
lsp_state: Arc<LSPState<TPerfLogger, TSchemaDocumentation>>,
request: lsp_server::Request,
) {
debug!("request received {:?}", request);
let get_server_response_bound = |req| dispatch_request(req, lsp_state.as_ref());
let get_response = with_request_logging(&lsp_state.perf_logger, get_server_response_bound);
lsp_state
.send_message(Message::Response(get_response(request)))
.expect("Unable to send message to a client.");
}
fn dispatch_request(request: lsp_server::Request, lsp_state: &impl GlobalState) -> ServerResponse {
let get_response = || -> Result<_, ServerResponse> {
let request = LSPRequestDispatch::new(request, lsp_state)
.on_request_sync::<ResolvedTypesAtLocation>(on_get_resolved_types_at_location)?
.on_request_sync::<SearchSchemaItems>(on_search_schema_items)?
.on_request_sync::<ExploreSchemaForType>(on_explore_schema_for_type)?
.on_request_sync::<GetSourceLocationOfTypeDefinition>(
on_get_source_location_of_type_definition,
)?
.on_request_sync::<HoverRequest>(on_hover)?
.on_request_sync::<GotoDefinition>(on_goto_definition)?
.on_request_sync::<References>(on_references)?
.on_request_sync::<Completion>(on_completion)?
.on_request_sync::<ResolveCompletionItem>(on_resolve_completion_item)?
.on_request_sync::<CodeActionRequest>(on_code_action)?
.on_request_sync::<Shutdown>(on_shutdown)?
.on_request_sync::<GraphQLExecuteQuery>(on_graphql_execute_query)?
.on_request_sync::<HeartbeatRequest>(on_heartbeat)?
.request();
// If we have gotten here, we have not handled the request
Ok(ServerResponse {
id: request.id,
result: None,
error: Some(ResponseError {
code: ErrorCode::MethodNotFound as i32,
data: None,
message: format!("No handler registered for method '{}'", request.method),
}),
})
};
get_response().unwrap_or_else(|response| response)
}
fn with_request_logging<'a, TPerfLogger: PerfLogger +'static>(
perf_logger: &'a Arc<TPerfLogger>,
get_response: impl FnOnce(lsp_server::Request) -> ServerResponse + 'a,
) -> impl FnOnce(lsp_server::Request) -> ServerResponse + 'a {
move |request| {
let lsp_request_event = perf_logger.create_event("lsp_message");
lsp_request_event.string("lsp_method", request.method.clone());
lsp_request_event.string("lsp_type", "request".to_string());
let lsp_request_processing_time = lsp_request_event.start("lsp_message_processing_time");
let response = get_response(request);
if response.result.is_some() {
lsp_request_event.string("lsp_outcome", "success".to_string());
} else if let Some(error) = &response.error {
if error.code == ErrorCode::RequestCanceled as i32 {
lsp_request_event.string("lsp_outcome", "canceled".to_string());
} else {
lsp_request_event.string("lsp_outcome", "error".to_string());
lsp_request_event.string("lsp_error_message", error.message.to_string());
if let Some(data) = &error.data {
lsp_request_event.string("lsp_error_data", data.to_string());
}
}
}
// N.B. we don't handle the case where the ServerResponse has neither a result nor
// an error, which is an invalid state.
lsp_request_event.stop(lsp_request_processing_time);
lsp_request_event.complete();
response
}
}
fn handle_notification<
TPerfLogger: PerfLogger +'static,
TSchemaDocumentation: SchemaDocumentation,
>(
lsp_state: Arc<LSPState<TPerfLogger, TSchemaDocumentation>>,
notification: Notification,
) {
debug!("notification received {:?}", notification);
let lsp_notification_event = lsp_state.perf_logger.create_event("lsp_message");
lsp_notification_event.string("lsp_method", notification.method.clone());
lsp_notification_event.string("lsp_type", "notification".to_string());
let lsp_notification_processing_time =
lsp_notification_event.start("lsp_message_processing_time");
let notification_result = dispatch_notification(notification, lsp_state.as_ref());
match notification_result {
Ok(()) => {
// The notification is not handled
lsp_notification_event.string("lsp_outcome", "error".to_string());
}
Err(err) => {
if let Some(err) = err {
lsp_notification_event.string("lsp_outcome", "error".to_string());
if let LSPRuntimeError::UnexpectedError(message) = err {
lsp_notification_event.string("lsp_error_message", message);
}
} else {
lsp_notification_event.string("lsp_outcome", "success".to_string());
}
}
}
lsp_notification_event.stop(lsp_notification_processing_time);
lsp_notification_event.complete();
}
fn dispatch_notification(
notification: lsp_server::Notification,
lsp_state: &impl GlobalState,
) -> Result<(), Option<LSPRuntimeError>> {
let notification = LSPNotificationDispatch::new(notification, lsp_state)
.on_notification_sync::<DidOpenTextDocument>(on_did_open_text_document)?
.on_notification_sync::<DidCloseTextDocument>(on_did_close_text_document)?
.on_notification_sync::<DidChangeTextDocument>(on_did_change_text_document)?
.on_notification_sync::<DidSaveTextDocument>(on_did_save_text_document)?
.on_notification_sync::<Cancel>(on_cancel)?
.on_notification_sync::<Exit>(on_exit)?
.notification();
// If we have gotten here, we have not handled the notification
debug!(
"Error: no handler registered for notification '{}'",
notification.method
);
Ok(())
}
|
{
let server_capabilities = ServerCapabilities {
// Enable text document syncing so we can know when files are opened/changed/saved/closed
text_document_sync: Some(TextDocumentSyncCapability::Kind(TextDocumentSyncKind::Full)),
completion_provider: Some(CompletionOptions {
resolve_provider: Some(true),
trigger_characters: Some(vec!["(".into(), "\n".into(), ",".into(), "@".into()]),
work_done_progress_options: WorkDoneProgressOptions {
work_done_progress: None,
},
..Default::default()
}),
hover_provider: Some(lsp_types::HoverProviderCapability::Simple(true)),
definition_provider: Some(lsp_types::OneOf::Left(true)),
references_provider: Some(lsp_types::OneOf::Left(true)),
code_action_provider: Some(CodeActionProviderCapability::Simple(true)),
..Default::default()
};
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.