file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
string_utils.rs | //! Various utilities for string operations.
/// Join items of a collection with separator.
pub trait JoinWithSeparator<S> {
/// Result type of the operation
type Output;
/// Join items of `self` with `separator`.
fn join(self, separator: S) -> Self::Output;
}
impl<S, S2, X> JoinWithSeparator<S2> for X
where S: AsRef<str>,
S2: AsRef<str>,
X: Iterator<Item = S>
{
type Output = String;
fn join(self, separator: S2) -> String {
self.fold("".to_string(), |a, b| {
let m = if a.is_empty() {
a
} else {
a + separator.as_ref()
};
m + b.as_ref()
})
}
}
/// Iterator over words in a camel-case
/// or snake-case string.
pub struct WordIterator<'a> {
string: &'a str,
index: usize,
}
impl<'a> WordIterator<'a> {
/// Create iterator over `string`.
pub fn new(string: &str) -> WordIterator {
WordIterator {
string: string,
index: 0,
}
}
}
fn char_at(str: &str, index: usize) -> char {
if index >= str.len() {
panic!("char_at: index out of bounds");
}
str[index..index + 1].chars().next().unwrap()
}
impl<'a> Iterator for WordIterator<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<&'a str> {
while self.index < self.string.len() && &self.string[self.index..self.index + 1] == "_" {
self.index += 1;
}
if self.index >= self.string.len() {
return None;
}
let mut i = self.index + 1;
let current_word_is_number = i < self.string.len() && char_at(self.string, i).is_digit(10);
while i < self.string.len() {
let current = char_at(self.string, i);
if current == '_' || current.is_uppercase() {
break;
}
if!current_word_is_number && current.is_digit(10) {
break;
}
i += 1;
}
let result = &self.string[self.index..i];
self.index = i;
Some(result)
}
}
/// Convert to string with different cases
pub trait CaseOperations {
/// Convert to class-case string ("WordWordWord")
fn to_class_case(self) -> String;
/// Convert to snake-case string ("word_word_word")
fn to_snake_case(self) -> String;
/// Convert to upper-case string ("WORD_WORD_WORD")
fn to_upper_case_words(self) -> String;
}
fn iterator_to_class_case<S: AsRef<str>, T: Iterator<Item = S>>(it: T) -> String {
it.map(|x| if char_at(x.as_ref(), 0).is_digit(10) {
x.as_ref().to_uppercase()
} else {
format!("{}{}",
x.as_ref()[0..1].to_uppercase(),
x.as_ref()[1..].to_lowercase())
})
.join("")
}
fn ends_with_digit<S: AsRef<str>>(s: S) -> bool {
let str = s.as_ref();
if str.len() > 0 {
str[str.len() - 1..str.len()]
.chars()
.next()
.unwrap()
.is_digit(10)
} else {
false
}
}
fn iterator_to_snake_case<S: AsRef<str>, T: Iterator<Item = S>>(it: T) -> String { | replace_all_sub_vecs(&mut parts, vec!["3", "d"]);
replace_all_sub_vecs(&mut parts, vec!["4", "d"]);
let mut str = String::new();
for (i, part) in parts.into_iter().enumerate() {
if part.is_empty() {
continue;
}
if i > 0 &&!(part.chars().all(|c| c.is_digit(10)) &&!ends_with_digit(&str)) {
str.push('_');
}
str.push_str(&part);
}
str
}
fn iterator_to_upper_case_words<S: AsRef<str>, T: Iterator<Item = S>>(it: T) -> String {
it.map(|x| x.as_ref().to_uppercase()).join("_")
}
#[cfg_attr(feature="clippy", allow(needless_range_loop))]
fn replace_all_sub_vecs(parts: &mut Vec<String>, needle: Vec<&str>) {
let mut any_found = true;
while any_found {
any_found = false;
if parts.len() + 1 >= needle.len() {
// TODO: maybe rewrite this
for i in 0..parts.len() + 1 - needle.len() {
if &parts[i..i + needle.len()] == &needle[..] {
for _ in 0..needle.len() - 1 {
parts.remove(i + 1);
}
parts[i] = needle.join("");
any_found = true;
break;
}
}
}
}
}
impl<'a> CaseOperations for &'a str {
fn to_class_case(self) -> String {
iterator_to_class_case(WordIterator::new(self))
}
fn to_snake_case(self) -> String {
iterator_to_snake_case(WordIterator::new(self))
}
fn to_upper_case_words(self) -> String {
iterator_to_upper_case_words(WordIterator::new(self))
}
}
impl<'a> CaseOperations for Vec<&'a str> {
fn to_class_case(self) -> String {
iterator_to_class_case(self.into_iter())
}
fn to_snake_case(self) -> String {
iterator_to_snake_case(self.into_iter())
}
fn to_upper_case_words(self) -> String {
iterator_to_upper_case_words(self.into_iter())
}
} | let mut parts: Vec<_> = it.map(|x| x.as_ref().to_lowercase()).collect();
replace_all_sub_vecs(&mut parts, vec!["na", "n"]);
replace_all_sub_vecs(&mut parts, vec!["open", "g", "l"]);
replace_all_sub_vecs(&mut parts, vec!["i", "o"]);
replace_all_sub_vecs(&mut parts, vec!["2", "d"]); | random_line_split |
test_register_deregister.rs | use mio::*;
use mio::tcp::*;
use bytes::SliceBuf;
use super::localhost;
const SERVER: Token = Token(0);
const CLIENT: Token = Token(1);
struct TestHandler {
server: TcpListener,
client: TcpStream,
state: usize,
}
impl TestHandler {
fn new(srv: TcpListener, cli: TcpStream) -> TestHandler {
TestHandler {
server: srv,
client: cli,
state: 0,
}
}
fn handle_read(&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, _: EventSet) {
match token {
SERVER => {
trace!("handle_read; token=SERVER");
let mut sock = self.server.accept().unwrap().unwrap().0;
sock.try_write_buf(&mut SliceBuf::wrap("foobar".as_bytes())).unwrap();
}
CLIENT => {
trace!("handle_read; token=CLIENT");
assert!(self.state == 0, "unexpected state {}", self.state);
self.state = 1;
event_loop.reregister(&self.client, CLIENT, EventSet::writable(), PollOpt::level()).unwrap();
}
_ => panic!("unexpected token"),
}
}
fn handle_write(&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, _: EventSet) |
}
impl Handler for TestHandler {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, events: EventSet) {
if events.is_readable() {
self.handle_read(event_loop, token, events);
}
if events.is_writable() {
self.handle_write(event_loop, token, events);
}
}
fn timeout(&mut self, event_loop: &mut EventLoop<TestHandler>, _: usize) {
trace!("timeout");
event_loop.shutdown();
}
}
#[test]
pub fn test_register_deregister() {
debug!("Starting TEST_REGISTER_DEREGISTER");
let mut event_loop = EventLoop::new().unwrap();
let addr = localhost();
let server = TcpListener::bind(&addr).unwrap();
info!("register server socket");
event_loop.register(&server, SERVER, EventSet::readable(), PollOpt::edge()).unwrap();
let client = TcpStream::connect(&addr).unwrap();
// Register client socket only as writable
event_loop.register(&client, CLIENT, EventSet::readable(), PollOpt::level()).unwrap();
let mut handler = TestHandler::new(server, client);
// Start the event loop
event_loop.run(&mut handler).unwrap();
assert!(handler.state == 2, "unexpected final state {}", handler.state);
}
| {
debug!("handle_write; token={:?}; state={:?}", token, self.state);
assert!(token == CLIENT, "unexpected token {:?}", token);
assert!(self.state == 1, "unexpected state {}", self.state);
self.state = 2;
event_loop.deregister(&self.client).unwrap();
event_loop.timeout_ms(1, 200).unwrap();
} | identifier_body |
test_register_deregister.rs | use mio::*;
use mio::tcp::*;
use bytes::SliceBuf;
use super::localhost;
const SERVER: Token = Token(0);
const CLIENT: Token = Token(1);
struct TestHandler {
server: TcpListener,
client: TcpStream,
state: usize,
}
impl TestHandler {
fn new(srv: TcpListener, cli: TcpStream) -> TestHandler {
TestHandler {
server: srv,
client: cli,
state: 0,
}
}
fn handle_read(&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, _: EventSet) {
match token {
SERVER => {
trace!("handle_read; token=SERVER");
let mut sock = self.server.accept().unwrap().unwrap().0;
sock.try_write_buf(&mut SliceBuf::wrap("foobar".as_bytes())).unwrap();
}
CLIENT => {
trace!("handle_read; token=CLIENT");
assert!(self.state == 0, "unexpected state {}", self.state);
self.state = 1;
event_loop.reregister(&self.client, CLIENT, EventSet::writable(), PollOpt::level()).unwrap();
}
_ => panic!("unexpected token"),
}
}
fn handle_write(&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, _: EventSet) {
debug!("handle_write; token={:?}; state={:?}", token, self.state);
assert!(token == CLIENT, "unexpected token {:?}", token);
assert!(self.state == 1, "unexpected state {}", self.state);
self.state = 2;
event_loop.deregister(&self.client).unwrap();
event_loop.timeout_ms(1, 200).unwrap();
}
}
impl Handler for TestHandler {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, events: EventSet) {
if events.is_readable() {
self.handle_read(event_loop, token, events);
}
if events.is_writable() {
self.handle_write(event_loop, token, events);
}
}
fn timeout(&mut self, event_loop: &mut EventLoop<TestHandler>, _: usize) {
trace!("timeout");
event_loop.shutdown();
}
}
#[test]
pub fn test_register_deregister() {
debug!("Starting TEST_REGISTER_DEREGISTER");
let mut event_loop = EventLoop::new().unwrap(); | let addr = localhost();
let server = TcpListener::bind(&addr).unwrap();
info!("register server socket");
event_loop.register(&server, SERVER, EventSet::readable(), PollOpt::edge()).unwrap();
let client = TcpStream::connect(&addr).unwrap();
// Register client socket only as writable
event_loop.register(&client, CLIENT, EventSet::readable(), PollOpt::level()).unwrap();
let mut handler = TestHandler::new(server, client);
// Start the event loop
event_loop.run(&mut handler).unwrap();
assert!(handler.state == 2, "unexpected final state {}", handler.state);
} | random_line_split |
|
test_register_deregister.rs | use mio::*;
use mio::tcp::*;
use bytes::SliceBuf;
use super::localhost;
const SERVER: Token = Token(0);
const CLIENT: Token = Token(1);
struct TestHandler {
server: TcpListener,
client: TcpStream,
state: usize,
}
impl TestHandler {
fn new(srv: TcpListener, cli: TcpStream) -> TestHandler {
TestHandler {
server: srv,
client: cli,
state: 0,
}
}
fn handle_read(&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, _: EventSet) {
match token {
SERVER => {
trace!("handle_read; token=SERVER");
let mut sock = self.server.accept().unwrap().unwrap().0;
sock.try_write_buf(&mut SliceBuf::wrap("foobar".as_bytes())).unwrap();
}
CLIENT => {
trace!("handle_read; token=CLIENT");
assert!(self.state == 0, "unexpected state {}", self.state);
self.state = 1;
event_loop.reregister(&self.client, CLIENT, EventSet::writable(), PollOpt::level()).unwrap();
}
_ => panic!("unexpected token"),
}
}
fn | (&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, _: EventSet) {
debug!("handle_write; token={:?}; state={:?}", token, self.state);
assert!(token == CLIENT, "unexpected token {:?}", token);
assert!(self.state == 1, "unexpected state {}", self.state);
self.state = 2;
event_loop.deregister(&self.client).unwrap();
event_loop.timeout_ms(1, 200).unwrap();
}
}
impl Handler for TestHandler {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<TestHandler>, token: Token, events: EventSet) {
if events.is_readable() {
self.handle_read(event_loop, token, events);
}
if events.is_writable() {
self.handle_write(event_loop, token, events);
}
}
fn timeout(&mut self, event_loop: &mut EventLoop<TestHandler>, _: usize) {
trace!("timeout");
event_loop.shutdown();
}
}
#[test]
pub fn test_register_deregister() {
debug!("Starting TEST_REGISTER_DEREGISTER");
let mut event_loop = EventLoop::new().unwrap();
let addr = localhost();
let server = TcpListener::bind(&addr).unwrap();
info!("register server socket");
event_loop.register(&server, SERVER, EventSet::readable(), PollOpt::edge()).unwrap();
let client = TcpStream::connect(&addr).unwrap();
// Register client socket only as writable
event_loop.register(&client, CLIENT, EventSet::readable(), PollOpt::level()).unwrap();
let mut handler = TestHandler::new(server, client);
// Start the event loop
event_loop.run(&mut handler).unwrap();
assert!(handler.state == 2, "unexpected final state {}", handler.state);
}
| handle_write | identifier_name |
global.rs | //! The global state.
use parking_lot::Mutex;
use std::collections::HashSet;
use std::{mem, panic};
use {rand, hazard, mpsc, debug, settings};
use garbage::Garbage;
lazy_static! {
/// The global state.
///
/// This state is shared between all the threads.
static ref STATE: State = State::new();
}
/// Create a new hazard.
///
/// This creates a new hazard and registers it in the global state. It's secondary, writer part is
/// returned.
pub fn create_hazard() -> hazard::Writer {
STATE.create_hazard()
}
/// Export garbage into the global state.
///
/// This adds the garbage, which will eventually be destroyed, to the global state. Note that this
/// does not tick, and thus cannot cause garbage collection.
pub fn export_garbage(garbage: Vec<Garbage>) {
STATE.export_garbage(garbage)
}
/// Attempt to garbage collect.
///
/// If another garbage collection is currently running, the thread will do nothing, and `Err(())`
/// will be returned. Otherwise, it returns `Ok(())`.
///
/// # Panic
///
/// If a destructor panics, this will panic as well.
pub fn try_gc() -> Result<(), ()> {
STATE.try_gc()
}
/// Tick the clock.
///
/// This shall be called when new garbage is added, as it will trigger a GC by some probability.
pub fn tick() {
// Generate a random number and compare it against the probability.
if rand::random::<usize>() < settings::get().gc_probability {
// The outfall was to (attempt at) GC.
let _ = try_gc();
}
}
/// A message to the global state.
enum Message {
/// Add new garbage.
Garbage(Vec<Garbage>),
/// Add a new hazard.
NewHazard(hazard::Reader),
}
/// The global state.
///
/// The global state is shared between all threads and keeps track of the garbage and the active
/// hazards.
///
/// It is divided into two parts: The channel and the garbo. The channel buffers messages, which
/// will eventually be executed at garbo, which holds all the data structures and is protected by a
/// mutex. The garbo holds the other end to the channel.
struct State {
/// The message-passing channel.
chan: mpsc::Sender<Message>,
/// The garbo part of the state.
garbo: Mutex<Garbo>,
}
impl State {
/// Initialize a new state.
fn new() -> State {
// Create the message-passing channel.
let (send, recv) = mpsc::channel();
// Construct the state from the two halfs of the channel.
State {
chan: send,
garbo: Mutex::new(Garbo {
chan: recv,
garbage: Vec::new(),
hazards: Vec::new(),
})
}
}
/// Create a new hazard.
///
/// This creates a new hazard and registers it in the global state. It's secondary, writer part
/// is returned.
fn create_hazard(&self) -> hazard::Writer {
// Create the hazard.
let (writer, reader) = hazard::create();
// Communicate the new hazard to the global state through the channel.
self.chan.send(Message::NewHazard(reader));
// Return the other half of the hazard.
writer
}
/// Export garbage into the global state.
///
/// This adds the garbage, which will eventually be destroyed, to the global state.
fn export_garbage(&self, garbage: Vec<Garbage>) {
// Send the garbage to the message-passing channel of the state.
self.chan.send(Message::Garbage(garbage));
}
/// Try to collect the garbage.
///
/// This will handle all of the messages in the channel and then attempt at collect the
/// garbage. If another thread is currently collecting garbage, `Err(())` is returned,
/// otherwise it returns `Ok(())`.
///
/// Garbage collection works by scanning the hazards and dropping all the garbage which is not
/// currently active in the hazards.
fn try_gc(&self) -> Result<(), ()> {
// Lock the "garbo" (the part of the state needed to GC).
if let Some(mut garbo) = self.garbo.try_lock() {
// Collect the garbage.
garbo.gc();
Ok(())
} else {
// Another thread is collecting.
Err(())
}
}
}
impl panic::RefUnwindSafe for State {}
/// The garbo part of the state.
///
/// This part is supposed to act like the garbage collecting part. It handles hazards, garbage, and
/// the receiving point of the message-passing channel.
struct | {
/// The channel of messages.
chan: mpsc::Receiver<Message>,
/// The to-be-destroyed garbage.
garbage: Vec<Garbage>,
/// The current hazards.
hazards: Vec<hazard::Reader>,
}
impl Garbo {
/// Handle a given message.
///
/// "Handle" in this case refers to applying the operation defined by the message to the state,
/// effectually executing the instruction of the message.
fn handle(&mut self, msg: Message) {
match msg {
// Append the garbage bulk to the garbage list.
Message::Garbage(mut garbage) => self.garbage.append(&mut garbage),
// Register the new hazard into the state.
Message::NewHazard(hazard) => self.hazards.push(hazard),
}
}
/// Handle all the messages and garbage collect all unused garbage.
///
/// # Panic
///
/// If a destructor panics, this will panic as well.
fn gc(&mut self) {
// Print message in debug mode.
debug::exec(|| println!("Collecting garbage."));
// Handle all the messages sent.
for msg in self.chan.recv_all() {
self.handle(msg);
}
// Create the set which will keep the _active_ hazards.
let mut active = HashSet::with_capacity(self.hazards.len());
// Take out the hazards and go over them one-by-one.
let len = self.hazards.len(); // TODO: This should be substituted into next line.
for hazard in mem::replace(&mut self.hazards, Vec::with_capacity(len)) {
match hazard.get() {
// The hazard is dead, so the other end (the writer) is not available anymore,
// hence we can safely destroy it.
hazard::State::Dead => unsafe { hazard.destroy() },
// The hazard is free and must thus be put back to the hazard list.
hazard::State::Free => self.hazards.push(hazard),
hazard::State::Protect(ptr) => {
// This hazard is active, hence we insert the pointer it contains in our
// "active" set.
active.insert(ptr);
// Since the hazard is still alive, we must put it back to the hazard list for
// future use.
self.hazards.push(hazard);
},
}
}
// Scan the garbage for unused objects.
self.garbage.retain(|garbage| active.contains(&garbage.ptr()))
}
}
impl Drop for Garbo {
fn drop(&mut self) {
// Do a final GC.
self.gc();
}
}
#[cfg(test)]
mod tests {
use super::*;
use garbage::Garbage;
use std::{panic, ptr};
#[test]
fn dtor_runs() {
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
let s = State::new();
for _ in 0..1000 {
let b = Box::new(0);
let h = s.create_hazard();
h.protect(&*b);
s.export_garbage(vec![Garbage::new(&*b, dtor)]);
while s.try_gc().is_err() {}
assert_eq!(*b, 0);
while s.try_gc().is_err() {}
h.free();
while s.try_gc().is_err() {}
assert_eq!(*b, 1);
h.kill();
}
}
#[test]
fn clean_up_state() {
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
for _ in 0..1000 {
let b = Box::new(0);
{
let s = State::new();
s.export_garbage(vec![Garbage::new(&*b, dtor)]);
}
assert_eq!(*b, 1);
}
}
#[test]
fn panic_invalidate_state() {
fn panic(_: *const u8) {
panic!();
}
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
let s = State::new();
let b = Box::new(0);
let h = create_hazard();
h.protect(&*b);
s.export_garbage(vec![Garbage::new(&*b, dtor), Garbage::new(0x2 as *const u8, panic)]);
let _ = panic::catch_unwind(|| {
while s.try_gc().is_err() {}
});
assert_eq!(*b, 0);
h.free();
while s.try_gc().is_err() {}
assert_eq!(*b, 1);
}
#[test]
#[should_panic]
fn panic_in_dtor() {
fn dtor(_: *const u8) {
panic!();
}
let s = State::new();
s.export_garbage(vec![Garbage::new(ptr::null(), dtor)]);
while s.try_gc().is_err() {}
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn debug_more_hazards() {
let s = State::new();
let h = s.create_hazard();
h.free();
mem::forget(h);
}
}
| Garbo | identifier_name |
global.rs | //! The global state.
use parking_lot::Mutex;
use std::collections::HashSet;
use std::{mem, panic};
use {rand, hazard, mpsc, debug, settings};
use garbage::Garbage;
lazy_static! {
/// The global state.
///
/// This state is shared between all the threads.
static ref STATE: State = State::new();
}
/// Create a new hazard.
///
/// This creates a new hazard and registers it in the global state. It's secondary, writer part is
/// returned.
pub fn create_hazard() -> hazard::Writer {
STATE.create_hazard()
}
/// Export garbage into the global state.
///
/// This adds the garbage, which will eventually be destroyed, to the global state. Note that this
/// does not tick, and thus cannot cause garbage collection.
pub fn export_garbage(garbage: Vec<Garbage>) {
STATE.export_garbage(garbage)
}
/// Attempt to garbage collect.
///
/// If another garbage collection is currently running, the thread will do nothing, and `Err(())`
/// will be returned. Otherwise, it returns `Ok(())`.
///
/// # Panic
///
/// If a destructor panics, this will panic as well.
pub fn try_gc() -> Result<(), ()> {
STATE.try_gc()
}
/// Tick the clock.
///
/// This shall be called when new garbage is added, as it will trigger a GC by some probability.
pub fn tick() {
// Generate a random number and compare it against the probability.
if rand::random::<usize>() < settings::get().gc_probability {
// The outfall was to (attempt at) GC.
let _ = try_gc();
}
}
/// A message to the global state.
enum Message {
/// Add new garbage.
Garbage(Vec<Garbage>),
/// Add a new hazard.
NewHazard(hazard::Reader),
}
/// The global state.
///
/// The global state is shared between all threads and keeps track of the garbage and the active
/// hazards.
///
/// It is divided into two parts: The channel and the garbo. The channel buffers messages, which
/// will eventually be executed at garbo, which holds all the data structures and is protected by a
/// mutex. The garbo holds the other end to the channel.
struct State {
/// The message-passing channel.
chan: mpsc::Sender<Message>,
/// The garbo part of the state.
garbo: Mutex<Garbo>,
}
impl State {
/// Initialize a new state.
fn new() -> State {
// Create the message-passing channel.
let (send, recv) = mpsc::channel();
// Construct the state from the two halfs of the channel.
State {
chan: send,
garbo: Mutex::new(Garbo {
chan: recv,
garbage: Vec::new(),
hazards: Vec::new(),
})
}
}
/// Create a new hazard.
///
/// This creates a new hazard and registers it in the global state. It's secondary, writer part
/// is returned.
fn create_hazard(&self) -> hazard::Writer {
// Create the hazard.
let (writer, reader) = hazard::create();
// Communicate the new hazard to the global state through the channel.
self.chan.send(Message::NewHazard(reader));
// Return the other half of the hazard.
writer
}
/// Export garbage into the global state.
///
/// This adds the garbage, which will eventually be destroyed, to the global state.
fn export_garbage(&self, garbage: Vec<Garbage>) {
// Send the garbage to the message-passing channel of the state.
self.chan.send(Message::Garbage(garbage));
}
/// Try to collect the garbage.
///
/// This will handle all of the messages in the channel and then attempt at collect the
/// garbage. If another thread is currently collecting garbage, `Err(())` is returned,
/// otherwise it returns `Ok(())`.
///
/// Garbage collection works by scanning the hazards and dropping all the garbage which is not
/// currently active in the hazards.
fn try_gc(&self) -> Result<(), ()> {
// Lock the "garbo" (the part of the state needed to GC).
if let Some(mut garbo) = self.garbo.try_lock() {
// Collect the garbage.
garbo.gc();
Ok(())
} else {
// Another thread is collecting.
Err(())
}
}
}
impl panic::RefUnwindSafe for State {}
/// The garbo part of the state.
///
/// This part is supposed to act like the garbage collecting part. It handles hazards, garbage, and
/// the receiving point of the message-passing channel.
struct Garbo {
/// The channel of messages.
chan: mpsc::Receiver<Message>,
/// The to-be-destroyed garbage.
garbage: Vec<Garbage>,
/// The current hazards.
hazards: Vec<hazard::Reader>,
}
impl Garbo {
/// Handle a given message.
///
/// "Handle" in this case refers to applying the operation defined by the message to the state,
/// effectually executing the instruction of the message.
fn handle(&mut self, msg: Message) {
match msg {
// Append the garbage bulk to the garbage list.
Message::Garbage(mut garbage) => self.garbage.append(&mut garbage),
// Register the new hazard into the state.
Message::NewHazard(hazard) => self.hazards.push(hazard),
}
}
/// Handle all the messages and garbage collect all unused garbage.
///
/// # Panic
///
/// If a destructor panics, this will panic as well.
fn gc(&mut self) {
// Print message in debug mode.
debug::exec(|| println!("Collecting garbage."));
// Handle all the messages sent.
for msg in self.chan.recv_all() {
self.handle(msg);
}
// Create the set which will keep the _active_ hazards.
let mut active = HashSet::with_capacity(self.hazards.len());
// Take out the hazards and go over them one-by-one.
let len = self.hazards.len(); // TODO: This should be substituted into next line.
for hazard in mem::replace(&mut self.hazards, Vec::with_capacity(len)) {
match hazard.get() {
// The hazard is dead, so the other end (the writer) is not available anymore,
// hence we can safely destroy it.
hazard::State::Dead => unsafe { hazard.destroy() },
// The hazard is free and must thus be put back to the hazard list.
hazard::State::Free => self.hazards.push(hazard),
hazard::State::Protect(ptr) => {
// This hazard is active, hence we insert the pointer it contains in our
// "active" set.
active.insert(ptr);
// Since the hazard is still alive, we must put it back to the hazard list for
// future use.
self.hazards.push(hazard);
},
}
}
// Scan the garbage for unused objects.
self.garbage.retain(|garbage| active.contains(&garbage.ptr()))
}
}
impl Drop for Garbo {
fn drop(&mut self) {
// Do a final GC.
self.gc();
}
}
#[cfg(test)]
mod tests {
use super::*;
use garbage::Garbage;
use std::{panic, ptr};
#[test]
fn dtor_runs() | }
}
#[test]
fn clean_up_state() {
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
for _ in 0..1000 {
let b = Box::new(0);
{
let s = State::new();
s.export_garbage(vec![Garbage::new(&*b, dtor)]);
}
assert_eq!(*b, 1);
}
}
#[test]
fn panic_invalidate_state() {
fn panic(_: *const u8) {
panic!();
}
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
let s = State::new();
let b = Box::new(0);
let h = create_hazard();
h.protect(&*b);
s.export_garbage(vec![Garbage::new(&*b, dtor), Garbage::new(0x2 as *const u8, panic)]);
let _ = panic::catch_unwind(|| {
while s.try_gc().is_err() {}
});
assert_eq!(*b, 0);
h.free();
while s.try_gc().is_err() {}
assert_eq!(*b, 1);
}
#[test]
#[should_panic]
fn panic_in_dtor() {
fn dtor(_: *const u8) {
panic!();
}
let s = State::new();
s.export_garbage(vec![Garbage::new(ptr::null(), dtor)]);
while s.try_gc().is_err() {}
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn debug_more_hazards() {
let s = State::new();
let h = s.create_hazard();
h.free();
mem::forget(h);
}
}
| {
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
let s = State::new();
for _ in 0..1000 {
let b = Box::new(0);
let h = s.create_hazard();
h.protect(&*b);
s.export_garbage(vec![Garbage::new(&*b, dtor)]);
while s.try_gc().is_err() {}
assert_eq!(*b, 0);
while s.try_gc().is_err() {}
h.free();
while s.try_gc().is_err() {}
assert_eq!(*b, 1);
h.kill(); | identifier_body |
global.rs | //! The global state.
use parking_lot::Mutex;
use std::collections::HashSet;
use std::{mem, panic};
use {rand, hazard, mpsc, debug, settings};
use garbage::Garbage;
lazy_static! {
/// The global state.
///
/// This state is shared between all the threads.
static ref STATE: State = State::new();
}
/// Create a new hazard.
///
/// This creates a new hazard and registers it in the global state. It's secondary, writer part is
/// returned.
pub fn create_hazard() -> hazard::Writer {
STATE.create_hazard()
}
/// Export garbage into the global state.
///
/// This adds the garbage, which will eventually be destroyed, to the global state. Note that this
/// does not tick, and thus cannot cause garbage collection.
pub fn export_garbage(garbage: Vec<Garbage>) {
STATE.export_garbage(garbage)
}
/// Attempt to garbage collect.
///
/// If another garbage collection is currently running, the thread will do nothing, and `Err(())`
/// will be returned. Otherwise, it returns `Ok(())`.
///
/// # Panic
///
/// If a destructor panics, this will panic as well.
pub fn try_gc() -> Result<(), ()> {
STATE.try_gc()
}
/// Tick the clock.
///
/// This shall be called when new garbage is added, as it will trigger a GC by some probability.
pub fn tick() {
// Generate a random number and compare it against the probability.
if rand::random::<usize>() < settings::get().gc_probability {
// The outfall was to (attempt at) GC.
let _ = try_gc();
}
}
/// A message to the global state.
enum Message {
/// Add new garbage.
Garbage(Vec<Garbage>),
/// Add a new hazard.
NewHazard(hazard::Reader),
}
/// The global state.
///
/// The global state is shared between all threads and keeps track of the garbage and the active
/// hazards.
///
/// It is divided into two parts: The channel and the garbo. The channel buffers messages, which
/// will eventually be executed at garbo, which holds all the data structures and is protected by a
/// mutex. The garbo holds the other end to the channel.
struct State {
/// The message-passing channel.
chan: mpsc::Sender<Message>,
/// The garbo part of the state.
garbo: Mutex<Garbo>,
}
impl State {
/// Initialize a new state.
fn new() -> State {
// Create the message-passing channel.
let (send, recv) = mpsc::channel();
// Construct the state from the two halfs of the channel.
State {
chan: send,
garbo: Mutex::new(Garbo {
chan: recv,
garbage: Vec::new(),
hazards: Vec::new(),
})
}
}
/// Create a new hazard.
///
/// This creates a new hazard and registers it in the global state. It's secondary, writer part
/// is returned.
fn create_hazard(&self) -> hazard::Writer {
// Create the hazard.
let (writer, reader) = hazard::create();
// Communicate the new hazard to the global state through the channel.
self.chan.send(Message::NewHazard(reader));
// Return the other half of the hazard.
writer
}
/// Export garbage into the global state.
///
/// This adds the garbage, which will eventually be destroyed, to the global state.
fn export_garbage(&self, garbage: Vec<Garbage>) {
// Send the garbage to the message-passing channel of the state.
self.chan.send(Message::Garbage(garbage));
} | /// garbage. If another thread is currently collecting garbage, `Err(())` is returned,
/// otherwise it returns `Ok(())`.
///
/// Garbage collection works by scanning the hazards and dropping all the garbage which is not
/// currently active in the hazards.
fn try_gc(&self) -> Result<(), ()> {
// Lock the "garbo" (the part of the state needed to GC).
if let Some(mut garbo) = self.garbo.try_lock() {
// Collect the garbage.
garbo.gc();
Ok(())
} else {
// Another thread is collecting.
Err(())
}
}
}
impl panic::RefUnwindSafe for State {}
/// The garbo part of the state.
///
/// This part is supposed to act like the garbage collecting part. It handles hazards, garbage, and
/// the receiving point of the message-passing channel.
struct Garbo {
/// The channel of messages.
chan: mpsc::Receiver<Message>,
/// The to-be-destroyed garbage.
garbage: Vec<Garbage>,
/// The current hazards.
hazards: Vec<hazard::Reader>,
}
impl Garbo {
/// Handle a given message.
///
/// "Handle" in this case refers to applying the operation defined by the message to the state,
/// effectually executing the instruction of the message.
fn handle(&mut self, msg: Message) {
match msg {
// Append the garbage bulk to the garbage list.
Message::Garbage(mut garbage) => self.garbage.append(&mut garbage),
// Register the new hazard into the state.
Message::NewHazard(hazard) => self.hazards.push(hazard),
}
}
/// Handle all the messages and garbage collect all unused garbage.
///
/// # Panic
///
/// If a destructor panics, this will panic as well.
fn gc(&mut self) {
// Print message in debug mode.
debug::exec(|| println!("Collecting garbage."));
// Handle all the messages sent.
for msg in self.chan.recv_all() {
self.handle(msg);
}
// Create the set which will keep the _active_ hazards.
let mut active = HashSet::with_capacity(self.hazards.len());
// Take out the hazards and go over them one-by-one.
let len = self.hazards.len(); // TODO: This should be substituted into next line.
for hazard in mem::replace(&mut self.hazards, Vec::with_capacity(len)) {
match hazard.get() {
// The hazard is dead, so the other end (the writer) is not available anymore,
// hence we can safely destroy it.
hazard::State::Dead => unsafe { hazard.destroy() },
// The hazard is free and must thus be put back to the hazard list.
hazard::State::Free => self.hazards.push(hazard),
hazard::State::Protect(ptr) => {
// This hazard is active, hence we insert the pointer it contains in our
// "active" set.
active.insert(ptr);
// Since the hazard is still alive, we must put it back to the hazard list for
// future use.
self.hazards.push(hazard);
},
}
}
// Scan the garbage for unused objects.
self.garbage.retain(|garbage| active.contains(&garbage.ptr()))
}
}
impl Drop for Garbo {
fn drop(&mut self) {
// Do a final GC.
self.gc();
}
}
#[cfg(test)]
mod tests {
use super::*;
use garbage::Garbage;
use std::{panic, ptr};
#[test]
fn dtor_runs() {
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
let s = State::new();
for _ in 0..1000 {
let b = Box::new(0);
let h = s.create_hazard();
h.protect(&*b);
s.export_garbage(vec![Garbage::new(&*b, dtor)]);
while s.try_gc().is_err() {}
assert_eq!(*b, 0);
while s.try_gc().is_err() {}
h.free();
while s.try_gc().is_err() {}
assert_eq!(*b, 1);
h.kill();
}
}
#[test]
fn clean_up_state() {
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
for _ in 0..1000 {
let b = Box::new(0);
{
let s = State::new();
s.export_garbage(vec![Garbage::new(&*b, dtor)]);
}
assert_eq!(*b, 1);
}
}
#[test]
fn panic_invalidate_state() {
fn panic(_: *const u8) {
panic!();
}
fn dtor(x: *const u8) {
unsafe {
*(x as *mut u8) = 1;
}
}
let s = State::new();
let b = Box::new(0);
let h = create_hazard();
h.protect(&*b);
s.export_garbage(vec![Garbage::new(&*b, dtor), Garbage::new(0x2 as *const u8, panic)]);
let _ = panic::catch_unwind(|| {
while s.try_gc().is_err() {}
});
assert_eq!(*b, 0);
h.free();
while s.try_gc().is_err() {}
assert_eq!(*b, 1);
}
#[test]
#[should_panic]
fn panic_in_dtor() {
fn dtor(_: *const u8) {
panic!();
}
let s = State::new();
s.export_garbage(vec![Garbage::new(ptr::null(), dtor)]);
while s.try_gc().is_err() {}
}
#[cfg(debug_assertions)]
#[test]
#[should_panic]
fn debug_more_hazards() {
let s = State::new();
let h = s.create_hazard();
h.free();
mem::forget(h);
}
} |
/// Try to collect the garbage.
///
/// This will handle all of the messages in the channel and then attempt at collect the | random_line_split |
hash.rs | use std::collections::HashMap;
fn | (number: &str) -> &str {
match number {
"798-1364" => "We're sorry, the call cannot be completed as dialed.
Please hang up and try again.",
"645-7689" => "Hello, this is Mr. Awesome's Pizza. My name is Fred.
What can I get for you today?",
_ => "Hi! Who is this again?"
}
}
fn main() {
let mut contacts = HashMap::new();
contacts.insert("Daniel", "798-1364");
contacts.insert("Ashley", "645-7689");
contacts.insert("Katie", "435-8291");
contacts.insert("Robert", "956-1745");
// 接受一个引用并返回 Option<&V>
match contacts.get(&"Daniel") {
Some(&number) => println!("Calling Daniel: {}", call(number)),
_ => println!("Don't have Daniel's number."),
}
// 如果被插入的值为新内容,那么 `HashMap::insert()` 返回 `None`,
// 否则返回 `Some(value)`
contacts.insert("Daniel", "164-6743");
match contacts.get(&"Ashley") {
Some(&number) => println!("Calling Ashley: {}", call(number)),
_ => println!("Don't have Ashley's number."),
}
contacts.remove(&("Ashley"));
// `HashMap::iter()` 返回一个迭代器,该迭代器获得
// 任意顺序的 (&'a key, &'a value) 对。
// (原文:`HashMap::iter()` returns an iterator that yields
// (&'a key, &'a value) pairs in arbitrary order.)
for (contact, &number) in contacts.iter() {
println!("Calling {}: {}", contact, call(number));
}
}
| call | identifier_name |
hash.rs | use std::collections::HashMap;
| "645-7689" => "Hello, this is Mr. Awesome's Pizza. My name is Fred.
What can I get for you today?",
_ => "Hi! Who is this again?"
}
}
fn main() {
let mut contacts = HashMap::new();
contacts.insert("Daniel", "798-1364");
contacts.insert("Ashley", "645-7689");
contacts.insert("Katie", "435-8291");
contacts.insert("Robert", "956-1745");
// 接受一个引用并返回 Option<&V>
match contacts.get(&"Daniel") {
Some(&number) => println!("Calling Daniel: {}", call(number)),
_ => println!("Don't have Daniel's number."),
}
// 如果被插入的值为新内容,那么 `HashMap::insert()` 返回 `None`,
// 否则返回 `Some(value)`
contacts.insert("Daniel", "164-6743");
match contacts.get(&"Ashley") {
Some(&number) => println!("Calling Ashley: {}", call(number)),
_ => println!("Don't have Ashley's number."),
}
contacts.remove(&("Ashley"));
// `HashMap::iter()` 返回一个迭代器,该迭代器获得
// 任意顺序的 (&'a key, &'a value) 对。
// (原文:`HashMap::iter()` returns an iterator that yields
// (&'a key, &'a value) pairs in arbitrary order.)
for (contact, &number) in contacts.iter() {
println!("Calling {}: {}", contact, call(number));
}
} | fn call(number: &str) -> &str {
match number {
"798-1364" => "We're sorry, the call cannot be completed as dialed.
Please hang up and try again.", | random_line_split |
hash.rs | use std::collections::HashMap;
fn call(number: &str) -> &str {
match number {
"798-1364" => "We're sorry, the call cannot be completed as dialed.
Please hang up and try again.",
"645-7689" => "Hello, this is Mr. Awesome's Pizza. My name is Fred.
What can I get for you today?",
_ => "Hi! Who is this again?"
}
}
fn main() | _ => println!("Don't have Ashley's number."),
}
contacts.remove(&("Ashley"));
// `HashMap::iter()` 返回一个迭代器,该迭代器获得
// 任意顺序的 (&'a key, &'a value) 对。
// (原文:`HashMap::iter()` returns an iterator that yields
// (&'a key, &'a value) pairs in arbitrary order.)
for (contact, &number) in contacts.iter() {
println!("Calling {}: {}", contact, call(number));
}
}
| {
let mut contacts = HashMap::new();
contacts.insert("Daniel", "798-1364");
contacts.insert("Ashley", "645-7689");
contacts.insert("Katie", "435-8291");
contacts.insert("Robert", "956-1745");
// 接受一个引用并返回 Option<&V>
match contacts.get(&"Daniel") {
Some(&number) => println!("Calling Daniel: {}", call(number)),
_ => println!("Don't have Daniel's number."),
}
// 如果被插入的值为新内容,那么 `HashMap::insert()` 返回 `None`,
// 否则返回 `Some(value)`
contacts.insert("Daniel", "164-6743");
match contacts.get(&"Ashley") {
Some(&number) => println!("Calling Ashley: {}", call(number)), | identifier_body |
sections.rs | use super::{ConfigParseError, Error};
use std::io::Read;
#[derive(Clone, Debug)]
pub struct Sections {
pub common_section: String,
pub page_sections: Vec<String>,
pub char_sections: Vec<String>,
pub kerning_sections: Vec<String>,
}
impl Sections {
pub fn new<R>(mut source: R) -> Result<Sections, Error>
where
R: Read,
{
// Load the entire file into a String.
let mut content = String::new();
source.read_to_string(&mut content)?;
// Expect the "info" section.
let mut lines = content.lines();
if!lines.next().map(|l| l.starts_with("info")).unwrap_or(false) {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"info",
))));
}
// Expect the "common" section.
let common_section = match lines.next() {
Some(line) if line.starts_with("common") => line.to_owned(),
_ => {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"common",
))))
}
};
// Expect the "page" sections.
let lines = lines
.skip_while(|l|!l.starts_with("page"))
.collect::<Vec<_>>();
let lines = lines.iter();
let page_sections = lines
.clone()
.take_while(|l| l.starts_with("page"))
.map(|s| s.to_string())
.collect::<Vec<_>>();
if page_sections.is_empty() { | ))));
}
let mut lines = lines.skip(page_sections.len());
// Expect the "char" sections.
let _ = lines.next().unwrap(); // char_count_section
let char_sections = lines
.clone()
.take_while(|l| l.starts_with("char"))
.map(|s| s.to_string())
.collect::<Vec<_>>();
if char_sections.is_empty() {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"char",
))));
}
let mut lines = lines.skip(char_sections.len());
// Expect the "kerning" sections.
let kerning_sections = if lines.next().is_some() {
lines
.clone()
.take_while(|l| l.starts_with("kerning"))
.map(|s| s.to_string())
.collect::<Vec<_>>()
} else {
Vec::new()
};
Ok(Sections {
common_section,
page_sections,
char_sections,
kerning_sections,
})
}
} | return Err(Error::from(ConfigParseError::MissingSection(String::from(
"page", | random_line_split |
sections.rs | use super::{ConfigParseError, Error};
use std::io::Read;
#[derive(Clone, Debug)]
pub struct Sections {
pub common_section: String,
pub page_sections: Vec<String>,
pub char_sections: Vec<String>,
pub kerning_sections: Vec<String>,
}
impl Sections {
pub fn new<R>(mut source: R) -> Result<Sections, Error>
where
R: Read,
{
// Load the entire file into a String.
let mut content = String::new();
source.read_to_string(&mut content)?;
// Expect the "info" section.
let mut lines = content.lines();
if!lines.next().map(|l| l.starts_with("info")).unwrap_or(false) {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"info",
))));
}
// Expect the "common" section.
let common_section = match lines.next() {
Some(line) if line.starts_with("common") => line.to_owned(),
_ => {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"common",
))))
}
};
// Expect the "page" sections.
let lines = lines
.skip_while(|l|!l.starts_with("page"))
.collect::<Vec<_>>();
let lines = lines.iter();
let page_sections = lines
.clone()
.take_while(|l| l.starts_with("page"))
.map(|s| s.to_string())
.collect::<Vec<_>>();
if page_sections.is_empty() {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"page",
))));
}
let mut lines = lines.skip(page_sections.len());
// Expect the "char" sections.
let _ = lines.next().unwrap(); // char_count_section
let char_sections = lines
.clone()
.take_while(|l| l.starts_with("char"))
.map(|s| s.to_string())
.collect::<Vec<_>>();
if char_sections.is_empty() {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"char",
))));
}
let mut lines = lines.skip(char_sections.len());
// Expect the "kerning" sections.
let kerning_sections = if lines.next().is_some() {
lines
.clone()
.take_while(|l| l.starts_with("kerning"))
.map(|s| s.to_string())
.collect::<Vec<_>>()
} else | ;
Ok(Sections {
common_section,
page_sections,
char_sections,
kerning_sections,
})
}
}
| {
Vec::new()
} | conditional_block |
sections.rs | use super::{ConfigParseError, Error};
use std::io::Read;
#[derive(Clone, Debug)]
pub struct | {
pub common_section: String,
pub page_sections: Vec<String>,
pub char_sections: Vec<String>,
pub kerning_sections: Vec<String>,
}
impl Sections {
pub fn new<R>(mut source: R) -> Result<Sections, Error>
where
R: Read,
{
// Load the entire file into a String.
let mut content = String::new();
source.read_to_string(&mut content)?;
// Expect the "info" section.
let mut lines = content.lines();
if!lines.next().map(|l| l.starts_with("info")).unwrap_or(false) {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"info",
))));
}
// Expect the "common" section.
let common_section = match lines.next() {
Some(line) if line.starts_with("common") => line.to_owned(),
_ => {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"common",
))))
}
};
// Expect the "page" sections.
let lines = lines
.skip_while(|l|!l.starts_with("page"))
.collect::<Vec<_>>();
let lines = lines.iter();
let page_sections = lines
.clone()
.take_while(|l| l.starts_with("page"))
.map(|s| s.to_string())
.collect::<Vec<_>>();
if page_sections.is_empty() {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"page",
))));
}
let mut lines = lines.skip(page_sections.len());
// Expect the "char" sections.
let _ = lines.next().unwrap(); // char_count_section
let char_sections = lines
.clone()
.take_while(|l| l.starts_with("char"))
.map(|s| s.to_string())
.collect::<Vec<_>>();
if char_sections.is_empty() {
return Err(Error::from(ConfigParseError::MissingSection(String::from(
"char",
))));
}
let mut lines = lines.skip(char_sections.len());
// Expect the "kerning" sections.
let kerning_sections = if lines.next().is_some() {
lines
.clone()
.take_while(|l| l.starts_with("kerning"))
.map(|s| s.to_string())
.collect::<Vec<_>>()
} else {
Vec::new()
};
Ok(Sections {
common_section,
page_sections,
char_sections,
kerning_sections,
})
}
}
| Sections | identifier_name |
pack.rs | // Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
// std
use std::slice;
// osmium
use http2::hpack::context::{self, ContextTrait};
use http2::hpack::flags;
use http2::hpack::number;
use http2::hpack::string;
use http2::hpack::table;
use http2::hpack::header_trait;
const NEVER_INDEXED: [String; 0] = [
// Date is a temporary example of a never indexed header
//header::HeaderName::Date
];
const LITERAL_WITHOUT_INDEXING: [&str; 1] = [
// Path is an example of a header which has common values which should be indexed i.e. '/' and '/index.html'
// but which will have many values which should not be fully indexed, just header name indexed
":path"
];
// TODO the table size update needs to come through here I think?
// how would that work.. hopefully there'll be an example in the spec
// TODO per header huffman coding setting?
// TODO comments need updating, they're still the ones I wrote while puzzling out the encoder.
pub fn pack<T>(headers: slice::Iter<T>, context: &mut context::SendContext, use_huffman_coding: bool) -> Vec<u8>
where T: header_trait::HpackHeaderTrait
{
let mut target = Vec::new();
// Check whether a decision has been made to change the dynamic table size.
if let Some(size_update) = context.get_size_update() {
// Update the size of the dynamic table used by the send context. This may cause evictions
// if the size is reduced.
// This could be done as soon as the decision to change the size is made, which might free
// up memory sooner. However, doing it here means that the change is always made at the same
// time as the signal to the remote table is created.
// TODO handle error here if the size_update is larger than the allowed size?
// TODO why is that taking usize?
context.set_max_size(size_update as usize);
// The size update signal is sent to the remote decoding table.
pack_dynamic_table_size_update(size_update, &mut target);
}
for header in headers {
let field = table::Field {
name: String::from(header.get_name()),
value: String::from(header.get_value())
};
trace!("{:?}", field);
if!header.is_allow_compression() || is_never_index_header(&field.name) {
// TODO it's really not clever to have to clone the value here to build a field for search.
// especially as find_field is never used without a Header available.
if let Some((index, _)) = context.find_field(&field) {
pack_literal_never_indexed_with_indexed_name(index, &field.value, &mut target);
}
else {
pack_literal_never_indexed(&field, use_huffman_coding, &mut target);
}
}
else {
trace!("okay, so we need to work out how to index this one");
if let Some((index, with_value)) = context.find_field(&field) {
trace!("found a field, with index {} and with value present {}", index, with_value);
// header name is indexed and value is indexed as indicated by with_value.
if with_value {
pack_indexed_header(index, &mut target);
}
else {
trace!("is indexed, but not with value");
// the value is not currently indexed, we could index and allow the value to be added to the
// dynamic table in the decoder, or we could not index and just refer to this header name.
if is_literal_without_indexing_header(&field.name) {
trace!("pack without indexing");
pack_literal_without_indexing_with_indexed_name(index, &field.value, use_huffman_coding, &mut target);
}
else {
pack_literal_with_indexing_with_indexed_name(index, &field.value, use_huffman_coding, &mut target);
context.insert(field);
}
}
}
else {
trace!("not found, start from scratch");
// header name is not currently indexed, we can index it now, or send a literal representation.
if is_literal_without_indexing_header(&field.name) {
pack_literal_without_indexing(&field, use_huffman_coding, &mut target);
}
else {
pack_literal_with_indexing(&field, use_huffman_coding, &mut target);
context.insert(field);
}
}
}
}
target
}
fn is_never_index_header(header_name: &str) -> bool {
for never_index_header_name in NEVER_INDEXED.into_iter() {
if header_name == never_index_header_name {
return true;
}
}
false
}
fn is_literal_without_indexing_header(header_name: &String) -> bool {
for literal_without_indexing_header_name in LITERAL_WITHOUT_INDEXING.into_iter() {
if header_name == literal_without_indexing_header_name {
return true;
}
}
false
}
fn pack_indexed_header(index: usize, target: &mut Vec<u8>) {
let encoded_index = number::encode(index as u32, 7);
target.push(flags::INDEXED_HEADER_FLAG | encoded_index.prefix);
if let Some(rest) = encoded_index.rest {
target.extend(rest);
}
trace!("packed indexed header with index {}, {:?}", index, target);
}
fn pack_literal_with_indexing_with_indexed_name(index: usize, header_value: &str, use_huffman_coding: bool, target: &mut Vec<u8>) {
let encoded_name_index = number::encode(index as u32, 6);
target.push(flags::LITERAL_WITH_INDEXING_FLAG | encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
trace!("{:?}", string::encode(String::from(header_value), use_huffman_coding));
target.extend(string::encode(String::from(header_value), use_huffman_coding));
}
fn pack_literal_with_indexing(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(flags::LITERAL_WITH_INDEXING_FLAG);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
target.extend(string::encode(String::from(field.value.clone()), use_huffman_coding));
}
fn pack_literal_without_indexing_with_indexed_name(index: usize, header_value: &str, use_huffman_coding: bool, target: &mut Vec<u8>) {
trace!("index to use {}", index);
let encoded_name_index = number::encode(index as u32, 4);
trace!("prefix {}", encoded_name_index.prefix);
target.push((!flags::LITERAL_WITH_INDEXING_FLAG) & encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
target.extend(string::encode(String::from(header_value.clone()), use_huffman_coding));
}
fn pack_literal_without_indexing(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(0u8);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
target.extend(string::encode(String::from(field.value.clone()), use_huffman_coding));
}
fn pack_literal_never_indexed_with_indexed_name(index: usize, header_value: &str, target: &mut Vec<u8>) {
let encoded_name_index = number::encode(index as u32, 4);
target.push(flags::LITERAL_NEVER_INDEX_FLAG | encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
// field should not be compressed... which means not indexed but the spec is not clear
// what should be done with regards to huffman coding.
// deliberately do not allow override of huffman coding for the value
target.extend(string::encode(String::from(header_value.clone()), false));
}
fn pack_literal_never_indexed(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(flags::LITERAL_NEVER_INDEX_FLAG);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
// deliberately do not allow override of huffman coding for the value
target.extend(string::encode(String::from(field.value.clone()), false));
}
fn pack_dynamic_table_size_update(size_update: u32, target: &mut Vec<u8>) {
let encoded_size_update = number::encode(size_update, 5);
target.push(flags::SIZE_UPDATE_FLAG | encoded_size_update.prefix);
if let Some(rest) = encoded_size_update.rest |
}
| {
target.extend(rest);
} | conditional_block |
pack.rs | // Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
// std
use std::slice;
// osmium
use http2::hpack::context::{self, ContextTrait};
use http2::hpack::flags;
use http2::hpack::number;
use http2::hpack::string;
use http2::hpack::table;
use http2::hpack::header_trait;
const NEVER_INDEXED: [String; 0] = [
// Date is a temporary example of a never indexed header
//header::HeaderName::Date
];
const LITERAL_WITHOUT_INDEXING: [&str; 1] = [
// Path is an example of a header which has common values which should be indexed i.e. '/' and '/index.html'
// but which will have many values which should not be fully indexed, just header name indexed
":path"
];
// TODO the table size update needs to come through here I think?
// how would that work.. hopefully there'll be an example in the spec
// TODO per header huffman coding setting?
// TODO comments need updating, they're still the ones I wrote while puzzling out the encoder.
pub fn pack<T>(headers: slice::Iter<T>, context: &mut context::SendContext, use_huffman_coding: bool) -> Vec<u8>
where T: header_trait::HpackHeaderTrait
{
let mut target = Vec::new();
// Check whether a decision has been made to change the dynamic table size.
if let Some(size_update) = context.get_size_update() {
// Update the size of the dynamic table used by the send context. This may cause evictions
// if the size is reduced.
// This could be done as soon as the decision to change the size is made, which might free
// up memory sooner. However, doing it here means that the change is always made at the same
// time as the signal to the remote table is created.
// TODO handle error here if the size_update is larger than the allowed size?
// TODO why is that taking usize?
context.set_max_size(size_update as usize);
// The size update signal is sent to the remote decoding table.
pack_dynamic_table_size_update(size_update, &mut target);
}
for header in headers {
let field = table::Field {
name: String::from(header.get_name()),
value: String::from(header.get_value())
};
trace!("{:?}", field);
if!header.is_allow_compression() || is_never_index_header(&field.name) {
// TODO it's really not clever to have to clone the value here to build a field for search.
// especially as find_field is never used without a Header available.
if let Some((index, _)) = context.find_field(&field) {
pack_literal_never_indexed_with_indexed_name(index, &field.value, &mut target);
}
else {
pack_literal_never_indexed(&field, use_huffman_coding, &mut target);
}
}
else {
trace!("okay, so we need to work out how to index this one");
if let Some((index, with_value)) = context.find_field(&field) {
trace!("found a field, with index {} and with value present {}", index, with_value);
// header name is indexed and value is indexed as indicated by with_value.
if with_value {
pack_indexed_header(index, &mut target);
}
else {
trace!("is indexed, but not with value");
// the value is not currently indexed, we could index and allow the value to be added to the
// dynamic table in the decoder, or we could not index and just refer to this header name.
if is_literal_without_indexing_header(&field.name) {
trace!("pack without indexing");
pack_literal_without_indexing_with_indexed_name(index, &field.value, use_huffman_coding, &mut target);
}
else {
pack_literal_with_indexing_with_indexed_name(index, &field.value, use_huffman_coding, &mut target);
context.insert(field);
}
}
}
else {
trace!("not found, start from scratch");
// header name is not currently indexed, we can index it now, or send a literal representation.
if is_literal_without_indexing_header(&field.name) {
pack_literal_without_indexing(&field, use_huffman_coding, &mut target);
}
else {
pack_literal_with_indexing(&field, use_huffman_coding, &mut target);
context.insert(field);
}
}
}
}
target
}
fn is_never_index_header(header_name: &str) -> bool {
for never_index_header_name in NEVER_INDEXED.into_iter() {
if header_name == never_index_header_name {
return true;
}
}
false
}
| if header_name == literal_without_indexing_header_name {
return true;
}
}
false
}
fn pack_indexed_header(index: usize, target: &mut Vec<u8>) {
let encoded_index = number::encode(index as u32, 7);
target.push(flags::INDEXED_HEADER_FLAG | encoded_index.prefix);
if let Some(rest) = encoded_index.rest {
target.extend(rest);
}
trace!("packed indexed header with index {}, {:?}", index, target);
}
fn pack_literal_with_indexing_with_indexed_name(index: usize, header_value: &str, use_huffman_coding: bool, target: &mut Vec<u8>) {
let encoded_name_index = number::encode(index as u32, 6);
target.push(flags::LITERAL_WITH_INDEXING_FLAG | encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
trace!("{:?}", string::encode(String::from(header_value), use_huffman_coding));
target.extend(string::encode(String::from(header_value), use_huffman_coding));
}
fn pack_literal_with_indexing(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(flags::LITERAL_WITH_INDEXING_FLAG);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
target.extend(string::encode(String::from(field.value.clone()), use_huffman_coding));
}
fn pack_literal_without_indexing_with_indexed_name(index: usize, header_value: &str, use_huffman_coding: bool, target: &mut Vec<u8>) {
trace!("index to use {}", index);
let encoded_name_index = number::encode(index as u32, 4);
trace!("prefix {}", encoded_name_index.prefix);
target.push((!flags::LITERAL_WITH_INDEXING_FLAG) & encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
target.extend(string::encode(String::from(header_value.clone()), use_huffman_coding));
}
fn pack_literal_without_indexing(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(0u8);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
target.extend(string::encode(String::from(field.value.clone()), use_huffman_coding));
}
fn pack_literal_never_indexed_with_indexed_name(index: usize, header_value: &str, target: &mut Vec<u8>) {
let encoded_name_index = number::encode(index as u32, 4);
target.push(flags::LITERAL_NEVER_INDEX_FLAG | encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
// field should not be compressed... which means not indexed but the spec is not clear
// what should be done with regards to huffman coding.
// deliberately do not allow override of huffman coding for the value
target.extend(string::encode(String::from(header_value.clone()), false));
}
fn pack_literal_never_indexed(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(flags::LITERAL_NEVER_INDEX_FLAG);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
// deliberately do not allow override of huffman coding for the value
target.extend(string::encode(String::from(field.value.clone()), false));
}
fn pack_dynamic_table_size_update(size_update: u32, target: &mut Vec<u8>) {
let encoded_size_update = number::encode(size_update, 5);
target.push(flags::SIZE_UPDATE_FLAG | encoded_size_update.prefix);
if let Some(rest) = encoded_size_update.rest {
target.extend(rest);
}
} | fn is_literal_without_indexing_header(header_name: &String) -> bool {
for literal_without_indexing_header_name in LITERAL_WITHOUT_INDEXING.into_iter() { | random_line_split |
pack.rs | // Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
// std
use std::slice;
// osmium
use http2::hpack::context::{self, ContextTrait};
use http2::hpack::flags;
use http2::hpack::number;
use http2::hpack::string;
use http2::hpack::table;
use http2::hpack::header_trait;
const NEVER_INDEXED: [String; 0] = [
// Date is a temporary example of a never indexed header
//header::HeaderName::Date
];
const LITERAL_WITHOUT_INDEXING: [&str; 1] = [
// Path is an example of a header which has common values which should be indexed i.e. '/' and '/index.html'
// but which will have many values which should not be fully indexed, just header name indexed
":path"
];
// TODO the table size update needs to come through here I think?
// how would that work.. hopefully there'll be an example in the spec
// TODO per header huffman coding setting?
// TODO comments need updating, they're still the ones I wrote while puzzling out the encoder.
pub fn pack<T>(headers: slice::Iter<T>, context: &mut context::SendContext, use_huffman_coding: bool) -> Vec<u8>
where T: header_trait::HpackHeaderTrait
{
let mut target = Vec::new();
// Check whether a decision has been made to change the dynamic table size.
if let Some(size_update) = context.get_size_update() {
// Update the size of the dynamic table used by the send context. This may cause evictions
// if the size is reduced.
// This could be done as soon as the decision to change the size is made, which might free
// up memory sooner. However, doing it here means that the change is always made at the same
// time as the signal to the remote table is created.
// TODO handle error here if the size_update is larger than the allowed size?
// TODO why is that taking usize?
context.set_max_size(size_update as usize);
// The size update signal is sent to the remote decoding table.
pack_dynamic_table_size_update(size_update, &mut target);
}
for header in headers {
let field = table::Field {
name: String::from(header.get_name()),
value: String::from(header.get_value())
};
trace!("{:?}", field);
if!header.is_allow_compression() || is_never_index_header(&field.name) {
// TODO it's really not clever to have to clone the value here to build a field for search.
// especially as find_field is never used without a Header available.
if let Some((index, _)) = context.find_field(&field) {
pack_literal_never_indexed_with_indexed_name(index, &field.value, &mut target);
}
else {
pack_literal_never_indexed(&field, use_huffman_coding, &mut target);
}
}
else {
trace!("okay, so we need to work out how to index this one");
if let Some((index, with_value)) = context.find_field(&field) {
trace!("found a field, with index {} and with value present {}", index, with_value);
// header name is indexed and value is indexed as indicated by with_value.
if with_value {
pack_indexed_header(index, &mut target);
}
else {
trace!("is indexed, but not with value");
// the value is not currently indexed, we could index and allow the value to be added to the
// dynamic table in the decoder, or we could not index and just refer to this header name.
if is_literal_without_indexing_header(&field.name) {
trace!("pack without indexing");
pack_literal_without_indexing_with_indexed_name(index, &field.value, use_huffman_coding, &mut target);
}
else {
pack_literal_with_indexing_with_indexed_name(index, &field.value, use_huffman_coding, &mut target);
context.insert(field);
}
}
}
else {
trace!("not found, start from scratch");
// header name is not currently indexed, we can index it now, or send a literal representation.
if is_literal_without_indexing_header(&field.name) {
pack_literal_without_indexing(&field, use_huffman_coding, &mut target);
}
else {
pack_literal_with_indexing(&field, use_huffman_coding, &mut target);
context.insert(field);
}
}
}
}
target
}
fn is_never_index_header(header_name: &str) -> bool {
for never_index_header_name in NEVER_INDEXED.into_iter() {
if header_name == never_index_header_name {
return true;
}
}
false
}
fn is_literal_without_indexing_header(header_name: &String) -> bool {
for literal_without_indexing_header_name in LITERAL_WITHOUT_INDEXING.into_iter() {
if header_name == literal_without_indexing_header_name {
return true;
}
}
false
}
fn pack_indexed_header(index: usize, target: &mut Vec<u8>) {
let encoded_index = number::encode(index as u32, 7);
target.push(flags::INDEXED_HEADER_FLAG | encoded_index.prefix);
if let Some(rest) = encoded_index.rest {
target.extend(rest);
}
trace!("packed indexed header with index {}, {:?}", index, target);
}
fn pack_literal_with_indexing_with_indexed_name(index: usize, header_value: &str, use_huffman_coding: bool, target: &mut Vec<u8>) {
let encoded_name_index = number::encode(index as u32, 6);
target.push(flags::LITERAL_WITH_INDEXING_FLAG | encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
trace!("{:?}", string::encode(String::from(header_value), use_huffman_coding));
target.extend(string::encode(String::from(header_value), use_huffman_coding));
}
fn | (field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(flags::LITERAL_WITH_INDEXING_FLAG);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
target.extend(string::encode(String::from(field.value.clone()), use_huffman_coding));
}
fn pack_literal_without_indexing_with_indexed_name(index: usize, header_value: &str, use_huffman_coding: bool, target: &mut Vec<u8>) {
trace!("index to use {}", index);
let encoded_name_index = number::encode(index as u32, 4);
trace!("prefix {}", encoded_name_index.prefix);
target.push((!flags::LITERAL_WITH_INDEXING_FLAG) & encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
target.extend(string::encode(String::from(header_value.clone()), use_huffman_coding));
}
fn pack_literal_without_indexing(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(0u8);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
target.extend(string::encode(String::from(field.value.clone()), use_huffman_coding));
}
fn pack_literal_never_indexed_with_indexed_name(index: usize, header_value: &str, target: &mut Vec<u8>) {
let encoded_name_index = number::encode(index as u32, 4);
target.push(flags::LITERAL_NEVER_INDEX_FLAG | encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
// field should not be compressed... which means not indexed but the spec is not clear
// what should be done with regards to huffman coding.
// deliberately do not allow override of huffman coding for the value
target.extend(string::encode(String::from(header_value.clone()), false));
}
fn pack_literal_never_indexed(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(flags::LITERAL_NEVER_INDEX_FLAG);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
// deliberately do not allow override of huffman coding for the value
target.extend(string::encode(String::from(field.value.clone()), false));
}
fn pack_dynamic_table_size_update(size_update: u32, target: &mut Vec<u8>) {
let encoded_size_update = number::encode(size_update, 5);
target.push(flags::SIZE_UPDATE_FLAG | encoded_size_update.prefix);
if let Some(rest) = encoded_size_update.rest {
target.extend(rest);
}
}
| pack_literal_with_indexing | identifier_name |
pack.rs | // Copyright 2017 ThetaSinner
//
// This file is part of Osmium.
// Osmium is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Osmium is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Osmium. If not, see <http://www.gnu.org/licenses/>.
// std
use std::slice;
// osmium
use http2::hpack::context::{self, ContextTrait};
use http2::hpack::flags;
use http2::hpack::number;
use http2::hpack::string;
use http2::hpack::table;
use http2::hpack::header_trait;
const NEVER_INDEXED: [String; 0] = [
// Date is a temporary example of a never indexed header
//header::HeaderName::Date
];
const LITERAL_WITHOUT_INDEXING: [&str; 1] = [
// Path is an example of a header which has common values which should be indexed i.e. '/' and '/index.html'
// but which will have many values which should not be fully indexed, just header name indexed
":path"
];
// TODO the table size update needs to come through here I think?
// how would that work.. hopefully there'll be an example in the spec
// TODO per header huffman coding setting?
// TODO comments need updating, they're still the ones I wrote while puzzling out the encoder.
pub fn pack<T>(headers: slice::Iter<T>, context: &mut context::SendContext, use_huffman_coding: bool) -> Vec<u8>
where T: header_trait::HpackHeaderTrait
{
let mut target = Vec::new();
// Check whether a decision has been made to change the dynamic table size.
if let Some(size_update) = context.get_size_update() {
// Update the size of the dynamic table used by the send context. This may cause evictions
// if the size is reduced.
// This could be done as soon as the decision to change the size is made, which might free
// up memory sooner. However, doing it here means that the change is always made at the same
// time as the signal to the remote table is created.
// TODO handle error here if the size_update is larger than the allowed size?
// TODO why is that taking usize?
context.set_max_size(size_update as usize);
// The size update signal is sent to the remote decoding table.
pack_dynamic_table_size_update(size_update, &mut target);
}
for header in headers {
let field = table::Field {
name: String::from(header.get_name()),
value: String::from(header.get_value())
};
trace!("{:?}", field);
if!header.is_allow_compression() || is_never_index_header(&field.name) {
// TODO it's really not clever to have to clone the value here to build a field for search.
// especially as find_field is never used without a Header available.
if let Some((index, _)) = context.find_field(&field) {
pack_literal_never_indexed_with_indexed_name(index, &field.value, &mut target);
}
else {
pack_literal_never_indexed(&field, use_huffman_coding, &mut target);
}
}
else {
trace!("okay, so we need to work out how to index this one");
if let Some((index, with_value)) = context.find_field(&field) {
trace!("found a field, with index {} and with value present {}", index, with_value);
// header name is indexed and value is indexed as indicated by with_value.
if with_value {
pack_indexed_header(index, &mut target);
}
else {
trace!("is indexed, but not with value");
// the value is not currently indexed, we could index and allow the value to be added to the
// dynamic table in the decoder, or we could not index and just refer to this header name.
if is_literal_without_indexing_header(&field.name) {
trace!("pack without indexing");
pack_literal_without_indexing_with_indexed_name(index, &field.value, use_huffman_coding, &mut target);
}
else {
pack_literal_with_indexing_with_indexed_name(index, &field.value, use_huffman_coding, &mut target);
context.insert(field);
}
}
}
else {
trace!("not found, start from scratch");
// header name is not currently indexed, we can index it now, or send a literal representation.
if is_literal_without_indexing_header(&field.name) {
pack_literal_without_indexing(&field, use_huffman_coding, &mut target);
}
else {
pack_literal_with_indexing(&field, use_huffman_coding, &mut target);
context.insert(field);
}
}
}
}
target
}
fn is_never_index_header(header_name: &str) -> bool {
for never_index_header_name in NEVER_INDEXED.into_iter() {
if header_name == never_index_header_name {
return true;
}
}
false
}
fn is_literal_without_indexing_header(header_name: &String) -> bool {
for literal_without_indexing_header_name in LITERAL_WITHOUT_INDEXING.into_iter() {
if header_name == literal_without_indexing_header_name {
return true;
}
}
false
}
fn pack_indexed_header(index: usize, target: &mut Vec<u8>) {
let encoded_index = number::encode(index as u32, 7);
target.push(flags::INDEXED_HEADER_FLAG | encoded_index.prefix);
if let Some(rest) = encoded_index.rest {
target.extend(rest);
}
trace!("packed indexed header with index {}, {:?}", index, target);
}
fn pack_literal_with_indexing_with_indexed_name(index: usize, header_value: &str, use_huffman_coding: bool, target: &mut Vec<u8>) {
let encoded_name_index = number::encode(index as u32, 6);
target.push(flags::LITERAL_WITH_INDEXING_FLAG | encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
trace!("{:?}", string::encode(String::from(header_value), use_huffman_coding));
target.extend(string::encode(String::from(header_value), use_huffman_coding));
}
fn pack_literal_with_indexing(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(flags::LITERAL_WITH_INDEXING_FLAG);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
target.extend(string::encode(String::from(field.value.clone()), use_huffman_coding));
}
fn pack_literal_without_indexing_with_indexed_name(index: usize, header_value: &str, use_huffman_coding: bool, target: &mut Vec<u8>) |
fn pack_literal_without_indexing(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(0u8);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
target.extend(string::encode(String::from(field.value.clone()), use_huffman_coding));
}
fn pack_literal_never_indexed_with_indexed_name(index: usize, header_value: &str, target: &mut Vec<u8>) {
let encoded_name_index = number::encode(index as u32, 4);
target.push(flags::LITERAL_NEVER_INDEX_FLAG | encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
// field should not be compressed... which means not indexed but the spec is not clear
// what should be done with regards to huffman coding.
// deliberately do not allow override of huffman coding for the value
target.extend(string::encode(String::from(header_value.clone()), false));
}
fn pack_literal_never_indexed(field: &table::Field, use_huffman_coding: bool, target: &mut Vec<u8>) {
target.push(flags::LITERAL_NEVER_INDEX_FLAG);
target.extend(string::encode(String::from(field.name.clone()), use_huffman_coding));
// deliberately do not allow override of huffman coding for the value
target.extend(string::encode(String::from(field.value.clone()), false));
}
fn pack_dynamic_table_size_update(size_update: u32, target: &mut Vec<u8>) {
let encoded_size_update = number::encode(size_update, 5);
target.push(flags::SIZE_UPDATE_FLAG | encoded_size_update.prefix);
if let Some(rest) = encoded_size_update.rest {
target.extend(rest);
}
}
| {
trace!("index to use {}", index);
let encoded_name_index = number::encode(index as u32, 4);
trace!("prefix {}", encoded_name_index.prefix);
target.push((!flags::LITERAL_WITH_INDEXING_FLAG) & encoded_name_index.prefix);
if let Some(rest) = encoded_name_index.rest {
target.extend(rest);
}
target.extend(string::encode(String::from(header_value.clone()), use_huffman_coding));
} | identifier_body |
in6addr.rs | // Copyright © 2015-2017 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed | // except according to those terms.
use shared::minwindef::{UCHAR, USHORT};
UNION!{union in6_addr_u {
[u16; 8],
Byte Byte_mut: [UCHAR; 16],
Word Word_mut: [USHORT; 8],
}}
STRUCT!{struct in6_addr {
u: in6_addr_u,
}}
pub type IN6_ADDR = in6_addr;
pub type PIN6_ADDR = *mut IN6_ADDR;
pub type LPIN6_ADDR = *mut IN6_ADDR; | random_line_split |
|
common.rs | use bindings::types::*;
/* automatically generated by rust-bindgen */
pub type mraa_boolean_t = ::libc::c_uint;
#[link(name = "mraa")]
extern "C" { | -> mraa_boolean_t;
pub fn mraa_adc_raw_bits() -> ::libc::c_uint;
pub fn mraa_adc_supported_bits() -> ::libc::c_uint;
pub fn mraa_set_log_level(level: ::libc::c_int) -> mraa_result_t;
pub fn mraa_get_platform_name() -> *mut ::libc::c_char;
pub fn mraa_set_priority(priority: ::libc::c_uint) -> ::libc::c_int;
pub fn mraa_get_version() -> *const ::libc::c_char;
pub fn mraa_result_print(result: mraa_result_t) -> ();
pub fn mraa_get_platform_type() -> mraa_platform_t;
pub fn mraa_get_pin_count() -> ::libc::c_uint;
pub fn mraa_get_pin_name(pin: ::libc::c_int) -> *mut ::libc::c_char;
} | pub fn mraa_init() -> mraa_result_t;
pub fn mraa_deinit() -> ();
pub fn mraa_pin_mode_test(pin: ::libc::c_int, mode: mraa_pinmodes_t) | random_line_split |
map_renderer.rs | /*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/map_renderer.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only to render the voxel map.
*/
use std::{ vec, ptr, mem, cast, cell };
use extra;
use gl2 = opengles::gl2;
use gfx;
use math;
use obj;
use obj::voxel;
use log::Log;
use console;
use super::{ State, Director, Deferred };
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
pub struct Map_Renderer
{
map: @mut obj::Voxel_Map,
vao: gl2::GLuint,
vox_vbo: gl2::GLuint,
offset_tex_vbo: gl2::GLuint,
offset_tex: gl2::GLuint,
ibos: ~[gl2::GLuint],
curr_ibo: u32,
visible_voxels: Option<~[u32]>,
prev_visible_voxel_count: u32,
/* states, visible */
map_stream: extra::comm::DuplexStream<(cell::Cell<~[u32]>, cell::Cell<~[u32]>), (~[u32], ~[u32])>,
wireframe: bool,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
voxel_size_loc: gl2::GLint,
offsets_loc: gl2::GLint,
}
impl Map_Renderer
{
pub fn new(map: @mut obj::Voxel_Map) -> @mut Map_Renderer
{
let (local_stream, _) = extra::comm::DuplexStream();
let mr = @mut Map_Renderer
{
map: map,
vao: 0,
vox_vbo: 0,
offset_tex_vbo: 0,
offset_tex: 0,
ibos: vec::from_elem(2, 2u32),
curr_ibo: 0,
visible_voxels: Some(vec::from_elem((map.resolution * map.resolution * map.resolution) as uint, 0u32)),
prev_visible_voxel_count: 0,
map_stream: local_stream,
wireframe: false,
shader: gfx::Shader_Builder::new_with_files("data/shaders/voxel.vert", "data/shaders/voxel.frag"),
proj_loc: 0,
world_loc: 0,
voxel_size_loc: 0,
offsets_loc: 0,
};
/* Single voxel that will be instance-rendered. */
let h: f32 = mr.map.voxel_size / 2.0;
let voxel: ~[f32] = /* TRIANGLE_STRIP style. */
~[
-h,-h,h, h,-h,h,
-h,h,h, h,h,h,
h,-h,h, h,-h,-h,
h,h,h, h,h,-h,
h,-h,-h, -h,-h,-h,
h,h,-h, -h,h,-h,
-h,-h,-h, -h,-h,h,
-h,h,-h, -h,h,h,
-h,-h,-h, h,-h,-h,
-h,-h,h, h,-h,h,
-h,h,h, h,h,h,
-h,h,-h, h,h,-h,
];
let names = check!(gl2::gen_vertex_arrays(1));
log_assert!(names.len() == 1);
mr.vao = names[0];
let names = check!(gl2::gen_buffers(4));
log_assert!(names.len() == 4);
mr.vox_vbo = names[0];
mr.offset_tex_vbo = names[1];
mr.ibos[0] = names[2];
mr.ibos[1] = names[3];
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.vox_vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, voxel, gl2::STATIC_DRAW));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[0]));
let ibo_buf = vec::from_elem((mr.map.resolution * mr.map.resolution * mr.map.resolution) as uint, 0);
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, ibo_buf, gl2::DYNAMIC_DRAW));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[1]));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, ibo_buf, gl2::DYNAMIC_DRAW));
check!(gl2::bind_buffer(gl2::TEXTURE_BUFFER, mr.offset_tex_vbo));
check!(gl2::buffer_data(gl2::TEXTURE_BUFFER, mr.map.voxels, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_vertex_array(mr.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.vox_vbo));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false, 0, 0));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[0]));
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[1]));
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
/* Generate buffer texture. */
let name = check!(gl2::gen_textures(1));
log_assert!(name.len() == 1);
mr.offset_tex = name[0];
check!(gl2::bind_texture(gl2::TEXTURE_BUFFER, mr.offset_tex));
check!(gl2::tex_buffer(gl2::TEXTURE_BUFFER, 0x8815 /* RGB32F */, mr.offset_tex_vbo));
/* Console functions. */
struct Tmp_Deferred
{ mr: @mut Map_Renderer }
impl Deferred for Tmp_Deferred
{
fn call(&mut self)
{
console::Console::get().add_accessor("map.wireframe", self.mr as @console::Accessor);
console::Console::get().add_mutator("map.wireframe", self.mr as @mut console::Mutator);
}
}
Director::push_deferred(@mut Tmp_Deferred{ mr: mr } as @mut Deferred);
mr
}
#[fixed_stack_segment]
pub fn | (&mut self)
{
self.prev_visible_voxel_count = self.visible_voxels.get_ref().len() as u32;
let cam = gfx::Camera::get_active();
let dist = (cam.near_far.y / self.map.voxel_size) as i32; /* How far the camera can see. */
let res = self.map.resolution as f32;
let pos = math::Vec3f::new(cam.position.x / self.map.voxel_size,
cam.position.y / self.map.voxel_size,
cam.position.z / self.map.voxel_size)
+ math::Vec3f::new(res / 2.0, res / 2.0, res / 2.0);
let start = math::Vec3i::new
(
(pos.x - dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.y - dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.z - dist as f32).clamp(&0.0, &(res - 1.0)) as i32
);
let end = math::Vec3i::new
(
(pos.x + dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.y + dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.z + dist as f32).clamp(&0.0, &(res - 1.0)) as i32
);
self.visible_voxels.get_mut_ref().clear();
/* Updating visible voxels is an expensive task. To remedy this,
* the work is done on a background thread that has a shared OpenGL
* context. While that work is being done, the map renderer will
* not have visible voxels or voxel states, since they're moved
* to the task. Once the task is finished, however, the fields are
* sent back. */
let (local_stream, remote_stream) = extra::comm::DuplexStream();
self.map_stream = local_stream;
/* Send out the voxel states and visible voxels. */
self.map_stream.send((cell::Cell::new(self.map.states.take_unwrap()), cell::Cell::new(self.visible_voxels.take_unwrap())));
/* Start the new background task of culling far-away voxels. */
let resolution = self.map.resolution;
let ibo = self.ibos[self.curr_ibo];
do gfx::Worker::new_task
{
let (cell_states, cell_visible_voxels) = remote_stream.recv();
let states = cell_states.take();
let mut visible_voxels = cell_visible_voxels.take();
for z in range(start.z, end.z)
{
for y in range(start.y, end.y)
{
for x in range(start.x, end.x)
{
let index = (z * ((resolution * resolution) as i32)) + (y * (resolution as i32)) + x;
if (states[index] & voxel::Visible)!= 0
{ visible_voxels.push(states[index] &!voxel::Visible); }
}
}
}
/* Upload the data to the inactive buffer. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, ibo));
unsafe
{
let size = visible_voxels.len() * mem::size_of::<u32>();
let mem = check!(gl2::map_buffer_range(gl2::ARRAY_BUFFER, 0, size as i64, gl2::MAP_WRITE_BIT));
log_assert!(mem!= ptr::null());
ptr::copy_nonoverlapping_memory(cast::transmute(mem), vec::raw::to_ptr(visible_voxels), size);
check!(gl2::unmap_buffer(gl2::ARRAY_BUFFER));
}
/* Send the member data back. */
remote_stream.send((states, visible_voxels));
false /* Don't kill the GL worker. */
}
}
}
impl State for Map_Renderer
{
fn load(&mut self)
{
log_debug!("Loading map renderer state");
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
self.voxel_size_loc = self.shader.get_uniform_location("voxel_size");
self.offsets_loc = self.shader.get_uniform_location("offsets");
self.shader.update_uniform_i32(self.offsets_loc, 0);
self.update_visibility();
}
fn unload(&mut self)
{
log_debug!("Unloading map renderer state");
/* Since the background worker is doing its thing, we'll
* need to wait for it to finish so that it doesn't try
* to update us when we're dead. */
let (states, visible_voxels) = self.map_stream.recv();
self.map.states = Some(states);
self.visible_voxels = Some(visible_voxels);
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vox_vbo, self.offset_tex_vbo,
self.ibos[0], self.ibos[1]]));
}
fn get_key(&self) -> &str
{ &"map_renderer" }
fn update(&mut self, _delta: f32) -> bool /* dt is in terms of seconds. */
{
/* Check if there is data available between the background
* thread and us. The last thing it does is send back some
* member data that we'll need to put back in place before
* doing any more work. */
if!self.map_stream.peek()
{ return false; }
/* Extract the new data. */
let (states, visible_voxels) = self.map_stream.recv();
self.map.states = Some(states);
self.visible_voxels = Some(visible_voxels);
/* TODO: Work goes here. */
/* Swap the current IBO and begin updating the old one. */
if self.curr_ibo == 0
{ self.curr_ibo = 1; }
else
{ self.curr_ibo = 0; }
self.update_visibility();
false
}
fn render(&mut self) -> bool
{
let camera = gfx::Camera::get_active();
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &camera.projection);
self.shader.update_uniform_mat(self.world_loc, &camera.view);
self.shader.update_uniform_f32(self.voxel_size_loc, self.map.voxel_size);
check!(gl2::bind_vertex_array(self.vao));
if self.curr_ibo == 0
{ check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.ibos[1])); }
else
{ check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.ibos[0])); }
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
check!(gl2::bind_texture(gl2::TEXTURE_BUFFER, self.offset_tex));
if self.wireframe
{ check!(gl2::polygon_mode(gl2::FRONT_AND_BACK, gl2::LINE)); }
check!(gl2::draw_arrays_instanced(gl2::TRIANGLE_STRIP, 0, 24, self.prev_visible_voxel_count as i32));
if self.wireframe
{ check!(gl2::polygon_mode(gl2::FRONT_AND_BACK, gl2::FILL)); }
check!(gl2::bind_vertex_array(0));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, 0));
false
}
}
impl console::Accessor for Map_Renderer
{
fn access(&self, name: &str) -> ~str
{
match name
{
"map.wireframe" =>
{ self.wireframe.to_str() }
_ => ~"ERROR"
}
}
}
impl console::Mutator for Map_Renderer
{
fn mutate(&mut self, name: &str, val: &str) -> Option<~str>
{
match name
{
"map.wireframe" =>
{
let res = console::Util::parse_bool(name, val);
match res
{
Ok(val) => { self.wireframe = val; None },
Err(msg) => { Some(msg) }
}
}
_ => Some(~"ERROR"),
}
}
}
| update_visibility | identifier_name |
map_renderer.rs | /*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/map_renderer.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only to render the voxel map.
*/
use std::{ vec, ptr, mem, cast, cell };
use extra;
use gl2 = opengles::gl2;
use gfx;
use math;
use obj;
use obj::voxel;
use log::Log;
use console;
use super::{ State, Director, Deferred };
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
pub struct Map_Renderer
{
map: @mut obj::Voxel_Map,
vao: gl2::GLuint,
vox_vbo: gl2::GLuint,
offset_tex_vbo: gl2::GLuint,
offset_tex: gl2::GLuint,
ibos: ~[gl2::GLuint],
curr_ibo: u32,
visible_voxels: Option<~[u32]>,
prev_visible_voxel_count: u32,
/* states, visible */
map_stream: extra::comm::DuplexStream<(cell::Cell<~[u32]>, cell::Cell<~[u32]>), (~[u32], ~[u32])>,
wireframe: bool,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
voxel_size_loc: gl2::GLint,
offsets_loc: gl2::GLint,
}
impl Map_Renderer
{
pub fn new(map: @mut obj::Voxel_Map) -> @mut Map_Renderer
{
let (local_stream, _) = extra::comm::DuplexStream();
let mr = @mut Map_Renderer
{
map: map,
vao: 0,
vox_vbo: 0,
offset_tex_vbo: 0,
offset_tex: 0,
ibos: vec::from_elem(2, 2u32),
curr_ibo: 0,
visible_voxels: Some(vec::from_elem((map.resolution * map.resolution * map.resolution) as uint, 0u32)),
prev_visible_voxel_count: 0,
map_stream: local_stream,
wireframe: false,
shader: gfx::Shader_Builder::new_with_files("data/shaders/voxel.vert", "data/shaders/voxel.frag"),
proj_loc: 0,
world_loc: 0,
voxel_size_loc: 0,
offsets_loc: 0,
};
/* Single voxel that will be instance-rendered. */
let h: f32 = mr.map.voxel_size / 2.0;
let voxel: ~[f32] = /* TRIANGLE_STRIP style. */
~[
-h,-h,h, h,-h,h,
-h,h,h, h,h,h,
h,-h,h, h,-h,-h,
h,h,h, h,h,-h,
h,-h,-h, -h,-h,-h,
h,h,-h, -h,h,-h,
-h,-h,-h, -h,-h,h,
-h,h,-h, -h,h,h,
-h,-h,-h, h,-h,-h,
-h,-h,h, h,-h,h,
-h,h,h, h,h,h,
-h,h,-h, h,h,-h,
];
let names = check!(gl2::gen_vertex_arrays(1));
log_assert!(names.len() == 1);
mr.vao = names[0];
let names = check!(gl2::gen_buffers(4));
log_assert!(names.len() == 4);
mr.vox_vbo = names[0];
mr.offset_tex_vbo = names[1];
mr.ibos[0] = names[2];
mr.ibos[1] = names[3];
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.vox_vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, voxel, gl2::STATIC_DRAW));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[0]));
let ibo_buf = vec::from_elem((mr.map.resolution * mr.map.resolution * mr.map.resolution) as uint, 0);
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, ibo_buf, gl2::DYNAMIC_DRAW));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[1]));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, ibo_buf, gl2::DYNAMIC_DRAW));
check!(gl2::bind_buffer(gl2::TEXTURE_BUFFER, mr.offset_tex_vbo));
check!(gl2::buffer_data(gl2::TEXTURE_BUFFER, mr.map.voxels, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_vertex_array(mr.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.vox_vbo));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false, 0, 0));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[0]));
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[1]));
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
/* Generate buffer texture. */
let name = check!(gl2::gen_textures(1));
log_assert!(name.len() == 1);
mr.offset_tex = name[0];
check!(gl2::bind_texture(gl2::TEXTURE_BUFFER, mr.offset_tex));
check!(gl2::tex_buffer(gl2::TEXTURE_BUFFER, 0x8815 /* RGB32F */, mr.offset_tex_vbo));
/* Console functions. */
struct Tmp_Deferred
{ mr: @mut Map_Renderer }
impl Deferred for Tmp_Deferred
{
fn call(&mut self)
{
console::Console::get().add_accessor("map.wireframe", self.mr as @console::Accessor);
console::Console::get().add_mutator("map.wireframe", self.mr as @mut console::Mutator);
}
}
Director::push_deferred(@mut Tmp_Deferred{ mr: mr } as @mut Deferred);
mr
}
#[fixed_stack_segment]
pub fn update_visibility(&mut self)
{
self.prev_visible_voxel_count = self.visible_voxels.get_ref().len() as u32;
let cam = gfx::Camera::get_active();
let dist = (cam.near_far.y / self.map.voxel_size) as i32; /* How far the camera can see. */
let res = self.map.resolution as f32;
let pos = math::Vec3f::new(cam.position.x / self.map.voxel_size,
cam.position.y / self.map.voxel_size,
cam.position.z / self.map.voxel_size)
+ math::Vec3f::new(res / 2.0, res / 2.0, res / 2.0);
let start = math::Vec3i::new
(
(pos.x - dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.y - dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.z - dist as f32).clamp(&0.0, &(res - 1.0)) as i32
);
let end = math::Vec3i::new
(
(pos.x + dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.y + dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.z + dist as f32).clamp(&0.0, &(res - 1.0)) as i32
);
self.visible_voxels.get_mut_ref().clear();
/* Updating visible voxels is an expensive task. To remedy this,
* the work is done on a background thread that has a shared OpenGL
* context. While that work is being done, the map renderer will
* not have visible voxels or voxel states, since they're moved
* to the task. Once the task is finished, however, the fields are
* sent back. */
let (local_stream, remote_stream) = extra::comm::DuplexStream();
self.map_stream = local_stream;
/* Send out the voxel states and visible voxels. */
self.map_stream.send((cell::Cell::new(self.map.states.take_unwrap()), cell::Cell::new(self.visible_voxels.take_unwrap())));
/* Start the new background task of culling far-away voxels. */
let resolution = self.map.resolution;
let ibo = self.ibos[self.curr_ibo];
do gfx::Worker::new_task
{
let (cell_states, cell_visible_voxels) = remote_stream.recv();
let states = cell_states.take();
let mut visible_voxels = cell_visible_voxels.take();
for z in range(start.z, end.z)
{
for y in range(start.y, end.y)
{
for x in range(start.x, end.x)
{
let index = (z * ((resolution * resolution) as i32)) + (y * (resolution as i32)) + x;
if (states[index] & voxel::Visible)!= 0
{ visible_voxels.push(states[index] &!voxel::Visible); }
}
}
}
/* Upload the data to the inactive buffer. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, ibo));
unsafe
{
let size = visible_voxels.len() * mem::size_of::<u32>();
let mem = check!(gl2::map_buffer_range(gl2::ARRAY_BUFFER, 0, size as i64, gl2::MAP_WRITE_BIT));
log_assert!(mem!= ptr::null());
ptr::copy_nonoverlapping_memory(cast::transmute(mem), vec::raw::to_ptr(visible_voxels), size);
check!(gl2::unmap_buffer(gl2::ARRAY_BUFFER));
}
/* Send the member data back. */
remote_stream.send((states, visible_voxels));
false /* Don't kill the GL worker. */
}
}
}
impl State for Map_Renderer
{
fn load(&mut self)
{
log_debug!("Loading map renderer state");
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
self.voxel_size_loc = self.shader.get_uniform_location("voxel_size");
self.offsets_loc = self.shader.get_uniform_location("offsets");
self.shader.update_uniform_i32(self.offsets_loc, 0);
self.update_visibility();
}
fn unload(&mut self)
{
log_debug!("Unloading map renderer state");
/* Since the background worker is doing its thing, we'll
* need to wait for it to finish so that it doesn't try
* to update us when we're dead. */
let (states, visible_voxels) = self.map_stream.recv();
self.map.states = Some(states);
self.visible_voxels = Some(visible_voxels);
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vox_vbo, self.offset_tex_vbo,
self.ibos[0], self.ibos[1]]));
}
fn get_key(&self) -> &str
{ &"map_renderer" }
fn update(&mut self, _delta: f32) -> bool /* dt is in terms of seconds. */
{
/* Check if there is data available between the background
* thread and us. The last thing it does is send back some
* member data that we'll need to put back in place before
* doing any more work. */
if!self.map_stream.peek()
{ return false; }
/* Extract the new data. */
let (states, visible_voxels) = self.map_stream.recv();
self.map.states = Some(states);
self.visible_voxels = Some(visible_voxels);
/* TODO: Work goes here. */
/* Swap the current IBO and begin updating the old one. */
if self.curr_ibo == 0
{ self.curr_ibo = 1; }
else
{ self.curr_ibo = 0; }
self.update_visibility();
false
}
fn render(&mut self) -> bool
{
let camera = gfx::Camera::get_active();
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &camera.projection);
self.shader.update_uniform_mat(self.world_loc, &camera.view);
self.shader.update_uniform_f32(self.voxel_size_loc, self.map.voxel_size);
check!(gl2::bind_vertex_array(self.vao));
if self.curr_ibo == 0
{ check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.ibos[1])); }
else
{ check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.ibos[0])); }
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
check!(gl2::bind_texture(gl2::TEXTURE_BUFFER, self.offset_tex));
if self.wireframe
{ check!(gl2::polygon_mode(gl2::FRONT_AND_BACK, gl2::LINE)); }
check!(gl2::draw_arrays_instanced(gl2::TRIANGLE_STRIP, 0, 24, self.prev_visible_voxel_count as i32));
if self.wireframe
{ check!(gl2::polygon_mode(gl2::FRONT_AND_BACK, gl2::FILL)); }
check!(gl2::bind_vertex_array(0));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, 0));
false
}
}
impl console::Accessor for Map_Renderer
{
fn access(&self, name: &str) -> ~str
{
match name
{
"map.wireframe" =>
{ self.wireframe.to_str() }
_ => ~"ERROR"
}
}
}
impl console::Mutator for Map_Renderer
{
fn mutate(&mut self, name: &str, val: &str) -> Option<~str>
|
}
| {
match name
{
"map.wireframe" =>
{
let res = console::Util::parse_bool(name, val);
match res
{
Ok(val) => { self.wireframe = val; None },
Err(msg) => { Some(msg) }
}
}
_ => Some(~"ERROR"),
}
} | identifier_body |
map_renderer.rs | /*
Copyright 2013 Jesse 'Jeaye' Wilkerson | Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only to render the voxel map.
*/
use std::{ vec, ptr, mem, cast, cell };
use extra;
use gl2 = opengles::gl2;
use gfx;
use math;
use obj;
use obj::voxel;
use log::Log;
use console;
use super::{ State, Director, Deferred };
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
pub struct Map_Renderer
{
map: @mut obj::Voxel_Map,
vao: gl2::GLuint,
vox_vbo: gl2::GLuint,
offset_tex_vbo: gl2::GLuint,
offset_tex: gl2::GLuint,
ibos: ~[gl2::GLuint],
curr_ibo: u32,
visible_voxels: Option<~[u32]>,
prev_visible_voxel_count: u32,
/* states, visible */
map_stream: extra::comm::DuplexStream<(cell::Cell<~[u32]>, cell::Cell<~[u32]>), (~[u32], ~[u32])>,
wireframe: bool,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
voxel_size_loc: gl2::GLint,
offsets_loc: gl2::GLint,
}
impl Map_Renderer
{
pub fn new(map: @mut obj::Voxel_Map) -> @mut Map_Renderer
{
let (local_stream, _) = extra::comm::DuplexStream();
let mr = @mut Map_Renderer
{
map: map,
vao: 0,
vox_vbo: 0,
offset_tex_vbo: 0,
offset_tex: 0,
ibos: vec::from_elem(2, 2u32),
curr_ibo: 0,
visible_voxels: Some(vec::from_elem((map.resolution * map.resolution * map.resolution) as uint, 0u32)),
prev_visible_voxel_count: 0,
map_stream: local_stream,
wireframe: false,
shader: gfx::Shader_Builder::new_with_files("data/shaders/voxel.vert", "data/shaders/voxel.frag"),
proj_loc: 0,
world_loc: 0,
voxel_size_loc: 0,
offsets_loc: 0,
};
/* Single voxel that will be instance-rendered. */
let h: f32 = mr.map.voxel_size / 2.0;
let voxel: ~[f32] = /* TRIANGLE_STRIP style. */
~[
-h,-h,h, h,-h,h,
-h,h,h, h,h,h,
h,-h,h, h,-h,-h,
h,h,h, h,h,-h,
h,-h,-h, -h,-h,-h,
h,h,-h, -h,h,-h,
-h,-h,-h, -h,-h,h,
-h,h,-h, -h,h,h,
-h,-h,-h, h,-h,-h,
-h,-h,h, h,-h,h,
-h,h,h, h,h,h,
-h,h,-h, h,h,-h,
];
let names = check!(gl2::gen_vertex_arrays(1));
log_assert!(names.len() == 1);
mr.vao = names[0];
let names = check!(gl2::gen_buffers(4));
log_assert!(names.len() == 4);
mr.vox_vbo = names[0];
mr.offset_tex_vbo = names[1];
mr.ibos[0] = names[2];
mr.ibos[1] = names[3];
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.vox_vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, voxel, gl2::STATIC_DRAW));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[0]));
let ibo_buf = vec::from_elem((mr.map.resolution * mr.map.resolution * mr.map.resolution) as uint, 0);
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, ibo_buf, gl2::DYNAMIC_DRAW));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[1]));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, ibo_buf, gl2::DYNAMIC_DRAW));
check!(gl2::bind_buffer(gl2::TEXTURE_BUFFER, mr.offset_tex_vbo));
check!(gl2::buffer_data(gl2::TEXTURE_BUFFER, mr.map.voxels, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_vertex_array(mr.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.vox_vbo));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false, 0, 0));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[0]));
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[1]));
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
/* Generate buffer texture. */
let name = check!(gl2::gen_textures(1));
log_assert!(name.len() == 1);
mr.offset_tex = name[0];
check!(gl2::bind_texture(gl2::TEXTURE_BUFFER, mr.offset_tex));
check!(gl2::tex_buffer(gl2::TEXTURE_BUFFER, 0x8815 /* RGB32F */, mr.offset_tex_vbo));
/* Console functions. */
struct Tmp_Deferred
{ mr: @mut Map_Renderer }
impl Deferred for Tmp_Deferred
{
fn call(&mut self)
{
console::Console::get().add_accessor("map.wireframe", self.mr as @console::Accessor);
console::Console::get().add_mutator("map.wireframe", self.mr as @mut console::Mutator);
}
}
Director::push_deferred(@mut Tmp_Deferred{ mr: mr } as @mut Deferred);
mr
}
#[fixed_stack_segment]
pub fn update_visibility(&mut self)
{
self.prev_visible_voxel_count = self.visible_voxels.get_ref().len() as u32;
let cam = gfx::Camera::get_active();
let dist = (cam.near_far.y / self.map.voxel_size) as i32; /* How far the camera can see. */
let res = self.map.resolution as f32;
let pos = math::Vec3f::new(cam.position.x / self.map.voxel_size,
cam.position.y / self.map.voxel_size,
cam.position.z / self.map.voxel_size)
+ math::Vec3f::new(res / 2.0, res / 2.0, res / 2.0);
let start = math::Vec3i::new
(
(pos.x - dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.y - dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.z - dist as f32).clamp(&0.0, &(res - 1.0)) as i32
);
let end = math::Vec3i::new
(
(pos.x + dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.y + dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.z + dist as f32).clamp(&0.0, &(res - 1.0)) as i32
);
self.visible_voxels.get_mut_ref().clear();
/* Updating visible voxels is an expensive task. To remedy this,
* the work is done on a background thread that has a shared OpenGL
* context. While that work is being done, the map renderer will
* not have visible voxels or voxel states, since they're moved
* to the task. Once the task is finished, however, the fields are
* sent back. */
let (local_stream, remote_stream) = extra::comm::DuplexStream();
self.map_stream = local_stream;
/* Send out the voxel states and visible voxels. */
self.map_stream.send((cell::Cell::new(self.map.states.take_unwrap()), cell::Cell::new(self.visible_voxels.take_unwrap())));
/* Start the new background task of culling far-away voxels. */
let resolution = self.map.resolution;
let ibo = self.ibos[self.curr_ibo];
do gfx::Worker::new_task
{
let (cell_states, cell_visible_voxels) = remote_stream.recv();
let states = cell_states.take();
let mut visible_voxels = cell_visible_voxels.take();
for z in range(start.z, end.z)
{
for y in range(start.y, end.y)
{
for x in range(start.x, end.x)
{
let index = (z * ((resolution * resolution) as i32)) + (y * (resolution as i32)) + x;
if (states[index] & voxel::Visible)!= 0
{ visible_voxels.push(states[index] &!voxel::Visible); }
}
}
}
/* Upload the data to the inactive buffer. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, ibo));
unsafe
{
let size = visible_voxels.len() * mem::size_of::<u32>();
let mem = check!(gl2::map_buffer_range(gl2::ARRAY_BUFFER, 0, size as i64, gl2::MAP_WRITE_BIT));
log_assert!(mem!= ptr::null());
ptr::copy_nonoverlapping_memory(cast::transmute(mem), vec::raw::to_ptr(visible_voxels), size);
check!(gl2::unmap_buffer(gl2::ARRAY_BUFFER));
}
/* Send the member data back. */
remote_stream.send((states, visible_voxels));
false /* Don't kill the GL worker. */
}
}
}
impl State for Map_Renderer
{
fn load(&mut self)
{
log_debug!("Loading map renderer state");
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
self.voxel_size_loc = self.shader.get_uniform_location("voxel_size");
self.offsets_loc = self.shader.get_uniform_location("offsets");
self.shader.update_uniform_i32(self.offsets_loc, 0);
self.update_visibility();
}
fn unload(&mut self)
{
log_debug!("Unloading map renderer state");
/* Since the background worker is doing its thing, we'll
* need to wait for it to finish so that it doesn't try
* to update us when we're dead. */
let (states, visible_voxels) = self.map_stream.recv();
self.map.states = Some(states);
self.visible_voxels = Some(visible_voxels);
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vox_vbo, self.offset_tex_vbo,
self.ibos[0], self.ibos[1]]));
}
fn get_key(&self) -> &str
{ &"map_renderer" }
fn update(&mut self, _delta: f32) -> bool /* dt is in terms of seconds. */
{
/* Check if there is data available between the background
* thread and us. The last thing it does is send back some
* member data that we'll need to put back in place before
* doing any more work. */
if!self.map_stream.peek()
{ return false; }
/* Extract the new data. */
let (states, visible_voxels) = self.map_stream.recv();
self.map.states = Some(states);
self.visible_voxels = Some(visible_voxels);
/* TODO: Work goes here. */
/* Swap the current IBO and begin updating the old one. */
if self.curr_ibo == 0
{ self.curr_ibo = 1; }
else
{ self.curr_ibo = 0; }
self.update_visibility();
false
}
fn render(&mut self) -> bool
{
let camera = gfx::Camera::get_active();
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &camera.projection);
self.shader.update_uniform_mat(self.world_loc, &camera.view);
self.shader.update_uniform_f32(self.voxel_size_loc, self.map.voxel_size);
check!(gl2::bind_vertex_array(self.vao));
if self.curr_ibo == 0
{ check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.ibos[1])); }
else
{ check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.ibos[0])); }
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
check!(gl2::bind_texture(gl2::TEXTURE_BUFFER, self.offset_tex));
if self.wireframe
{ check!(gl2::polygon_mode(gl2::FRONT_AND_BACK, gl2::LINE)); }
check!(gl2::draw_arrays_instanced(gl2::TRIANGLE_STRIP, 0, 24, self.prev_visible_voxel_count as i32));
if self.wireframe
{ check!(gl2::polygon_mode(gl2::FRONT_AND_BACK, gl2::FILL)); }
check!(gl2::bind_vertex_array(0));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, 0));
false
}
}
impl console::Accessor for Map_Renderer
{
fn access(&self, name: &str) -> ~str
{
match name
{
"map.wireframe" =>
{ self.wireframe.to_str() }
_ => ~"ERROR"
}
}
}
impl console::Mutator for Map_Renderer
{
fn mutate(&mut self, name: &str, val: &str) -> Option<~str>
{
match name
{
"map.wireframe" =>
{
let res = console::Util::parse_bool(name, val);
match res
{
Ok(val) => { self.wireframe = val; None },
Err(msg) => { Some(msg) }
}
}
_ => Some(~"ERROR"),
}
}
} | See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/map_renderer.rs | random_line_split |
map_renderer.rs | /*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/map_renderer.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only to render the voxel map.
*/
use std::{ vec, ptr, mem, cast, cell };
use extra;
use gl2 = opengles::gl2;
use gfx;
use math;
use obj;
use obj::voxel;
use log::Log;
use console;
use super::{ State, Director, Deferred };
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
pub struct Map_Renderer
{
map: @mut obj::Voxel_Map,
vao: gl2::GLuint,
vox_vbo: gl2::GLuint,
offset_tex_vbo: gl2::GLuint,
offset_tex: gl2::GLuint,
ibos: ~[gl2::GLuint],
curr_ibo: u32,
visible_voxels: Option<~[u32]>,
prev_visible_voxel_count: u32,
/* states, visible */
map_stream: extra::comm::DuplexStream<(cell::Cell<~[u32]>, cell::Cell<~[u32]>), (~[u32], ~[u32])>,
wireframe: bool,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
voxel_size_loc: gl2::GLint,
offsets_loc: gl2::GLint,
}
impl Map_Renderer
{
pub fn new(map: @mut obj::Voxel_Map) -> @mut Map_Renderer
{
let (local_stream, _) = extra::comm::DuplexStream();
let mr = @mut Map_Renderer
{
map: map,
vao: 0,
vox_vbo: 0,
offset_tex_vbo: 0,
offset_tex: 0,
ibos: vec::from_elem(2, 2u32),
curr_ibo: 0,
visible_voxels: Some(vec::from_elem((map.resolution * map.resolution * map.resolution) as uint, 0u32)),
prev_visible_voxel_count: 0,
map_stream: local_stream,
wireframe: false,
shader: gfx::Shader_Builder::new_with_files("data/shaders/voxel.vert", "data/shaders/voxel.frag"),
proj_loc: 0,
world_loc: 0,
voxel_size_loc: 0,
offsets_loc: 0,
};
/* Single voxel that will be instance-rendered. */
let h: f32 = mr.map.voxel_size / 2.0;
let voxel: ~[f32] = /* TRIANGLE_STRIP style. */
~[
-h,-h,h, h,-h,h,
-h,h,h, h,h,h,
h,-h,h, h,-h,-h,
h,h,h, h,h,-h,
h,-h,-h, -h,-h,-h,
h,h,-h, -h,h,-h,
-h,-h,-h, -h,-h,h,
-h,h,-h, -h,h,h,
-h,-h,-h, h,-h,-h,
-h,-h,h, h,-h,h,
-h,h,h, h,h,h,
-h,h,-h, h,h,-h,
];
let names = check!(gl2::gen_vertex_arrays(1));
log_assert!(names.len() == 1);
mr.vao = names[0];
let names = check!(gl2::gen_buffers(4));
log_assert!(names.len() == 4);
mr.vox_vbo = names[0];
mr.offset_tex_vbo = names[1];
mr.ibos[0] = names[2];
mr.ibos[1] = names[3];
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.vox_vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, voxel, gl2::STATIC_DRAW));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[0]));
let ibo_buf = vec::from_elem((mr.map.resolution * mr.map.resolution * mr.map.resolution) as uint, 0);
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, ibo_buf, gl2::DYNAMIC_DRAW));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[1]));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, ibo_buf, gl2::DYNAMIC_DRAW));
check!(gl2::bind_buffer(gl2::TEXTURE_BUFFER, mr.offset_tex_vbo));
check!(gl2::buffer_data(gl2::TEXTURE_BUFFER, mr.map.voxels, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_vertex_array(mr.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.vox_vbo));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false, 0, 0));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[0]));
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, mr.ibos[1]));
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
/* Generate buffer texture. */
let name = check!(gl2::gen_textures(1));
log_assert!(name.len() == 1);
mr.offset_tex = name[0];
check!(gl2::bind_texture(gl2::TEXTURE_BUFFER, mr.offset_tex));
check!(gl2::tex_buffer(gl2::TEXTURE_BUFFER, 0x8815 /* RGB32F */, mr.offset_tex_vbo));
/* Console functions. */
struct Tmp_Deferred
{ mr: @mut Map_Renderer }
impl Deferred for Tmp_Deferred
{
fn call(&mut self)
{
console::Console::get().add_accessor("map.wireframe", self.mr as @console::Accessor);
console::Console::get().add_mutator("map.wireframe", self.mr as @mut console::Mutator);
}
}
Director::push_deferred(@mut Tmp_Deferred{ mr: mr } as @mut Deferred);
mr
}
#[fixed_stack_segment]
pub fn update_visibility(&mut self)
{
self.prev_visible_voxel_count = self.visible_voxels.get_ref().len() as u32;
let cam = gfx::Camera::get_active();
let dist = (cam.near_far.y / self.map.voxel_size) as i32; /* How far the camera can see. */
let res = self.map.resolution as f32;
let pos = math::Vec3f::new(cam.position.x / self.map.voxel_size,
cam.position.y / self.map.voxel_size,
cam.position.z / self.map.voxel_size)
+ math::Vec3f::new(res / 2.0, res / 2.0, res / 2.0);
let start = math::Vec3i::new
(
(pos.x - dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.y - dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.z - dist as f32).clamp(&0.0, &(res - 1.0)) as i32
);
let end = math::Vec3i::new
(
(pos.x + dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.y + dist as f32).clamp(&0.0, &(res - 1.0)) as i32,
(pos.z + dist as f32).clamp(&0.0, &(res - 1.0)) as i32
);
self.visible_voxels.get_mut_ref().clear();
/* Updating visible voxels is an expensive task. To remedy this,
* the work is done on a background thread that has a shared OpenGL
* context. While that work is being done, the map renderer will
* not have visible voxels or voxel states, since they're moved
* to the task. Once the task is finished, however, the fields are
* sent back. */
let (local_stream, remote_stream) = extra::comm::DuplexStream();
self.map_stream = local_stream;
/* Send out the voxel states and visible voxels. */
self.map_stream.send((cell::Cell::new(self.map.states.take_unwrap()), cell::Cell::new(self.visible_voxels.take_unwrap())));
/* Start the new background task of culling far-away voxels. */
let resolution = self.map.resolution;
let ibo = self.ibos[self.curr_ibo];
do gfx::Worker::new_task
{
let (cell_states, cell_visible_voxels) = remote_stream.recv();
let states = cell_states.take();
let mut visible_voxels = cell_visible_voxels.take();
for z in range(start.z, end.z)
{
for y in range(start.y, end.y)
{
for x in range(start.x, end.x)
{
let index = (z * ((resolution * resolution) as i32)) + (y * (resolution as i32)) + x;
if (states[index] & voxel::Visible)!= 0
|
}
}
}
/* Upload the data to the inactive buffer. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, ibo));
unsafe
{
let size = visible_voxels.len() * mem::size_of::<u32>();
let mem = check!(gl2::map_buffer_range(gl2::ARRAY_BUFFER, 0, size as i64, gl2::MAP_WRITE_BIT));
log_assert!(mem!= ptr::null());
ptr::copy_nonoverlapping_memory(cast::transmute(mem), vec::raw::to_ptr(visible_voxels), size);
check!(gl2::unmap_buffer(gl2::ARRAY_BUFFER));
}
/* Send the member data back. */
remote_stream.send((states, visible_voxels));
false /* Don't kill the GL worker. */
}
}
}
impl State for Map_Renderer
{
fn load(&mut self)
{
log_debug!("Loading map renderer state");
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
self.voxel_size_loc = self.shader.get_uniform_location("voxel_size");
self.offsets_loc = self.shader.get_uniform_location("offsets");
self.shader.update_uniform_i32(self.offsets_loc, 0);
self.update_visibility();
}
fn unload(&mut self)
{
log_debug!("Unloading map renderer state");
/* Since the background worker is doing its thing, we'll
* need to wait for it to finish so that it doesn't try
* to update us when we're dead. */
let (states, visible_voxels) = self.map_stream.recv();
self.map.states = Some(states);
self.visible_voxels = Some(visible_voxels);
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vox_vbo, self.offset_tex_vbo,
self.ibos[0], self.ibos[1]]));
}
fn get_key(&self) -> &str
{ &"map_renderer" }
fn update(&mut self, _delta: f32) -> bool /* dt is in terms of seconds. */
{
/* Check if there is data available between the background
* thread and us. The last thing it does is send back some
* member data that we'll need to put back in place before
* doing any more work. */
if!self.map_stream.peek()
{ return false; }
/* Extract the new data. */
let (states, visible_voxels) = self.map_stream.recv();
self.map.states = Some(states);
self.visible_voxels = Some(visible_voxels);
/* TODO: Work goes here. */
/* Swap the current IBO and begin updating the old one. */
if self.curr_ibo == 0
{ self.curr_ibo = 1; }
else
{ self.curr_ibo = 0; }
self.update_visibility();
false
}
fn render(&mut self) -> bool
{
let camera = gfx::Camera::get_active();
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &camera.projection);
self.shader.update_uniform_mat(self.world_loc, &camera.view);
self.shader.update_uniform_f32(self.voxel_size_loc, self.map.voxel_size);
check!(gl2::bind_vertex_array(self.vao));
if self.curr_ibo == 0
{ check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.ibos[1])); }
else
{ check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.ibos[0])); }
check!(gl2::vertex_attrib_i_pointer_i32(1, 1, 0, 0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_divisor(1, 1));
check!(gl2::bind_texture(gl2::TEXTURE_BUFFER, self.offset_tex));
if self.wireframe
{ check!(gl2::polygon_mode(gl2::FRONT_AND_BACK, gl2::LINE)); }
check!(gl2::draw_arrays_instanced(gl2::TRIANGLE_STRIP, 0, 24, self.prev_visible_voxel_count as i32));
if self.wireframe
{ check!(gl2::polygon_mode(gl2::FRONT_AND_BACK, gl2::FILL)); }
check!(gl2::bind_vertex_array(0));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, 0));
false
}
}
impl console::Accessor for Map_Renderer
{
fn access(&self, name: &str) -> ~str
{
match name
{
"map.wireframe" =>
{ self.wireframe.to_str() }
_ => ~"ERROR"
}
}
}
impl console::Mutator for Map_Renderer
{
fn mutate(&mut self, name: &str, val: &str) -> Option<~str>
{
match name
{
"map.wireframe" =>
{
let res = console::Util::parse_bool(name, val);
match res
{
Ok(val) => { self.wireframe = val; None },
Err(msg) => { Some(msg) }
}
}
_ => Some(~"ERROR"),
}
}
}
| { visible_voxels.push(states[index] & !voxel::Visible); } | conditional_block |
too-much-recursion-unwinding.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test leaks
// error-pattern:ran out of stack
// Test that the thread panicks after hitting the recursion limit
// during unwinding
fn recurse() {
println!("don't optimize me out");
recurse();
}
struct r {
recursed: *mut bool,
}
impl Drop for r {
fn drop(&mut self) {
unsafe {
if!*(self.recursed) {
*(self.recursed) = true;
recurse();
}
}
}
} |
fn r(recursed: *mut bool) -> r {
r { recursed: recursed }
}
fn main() {
let mut recursed = false;
let _r = r(&mut recursed);
recurse();
} | random_line_split |
|
too-much-recursion-unwinding.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test leaks
// error-pattern:ran out of stack
// Test that the thread panicks after hitting the recursion limit
// during unwinding
fn | () {
println!("don't optimize me out");
recurse();
}
struct r {
recursed: *mut bool,
}
impl Drop for r {
fn drop(&mut self) {
unsafe {
if!*(self.recursed) {
*(self.recursed) = true;
recurse();
}
}
}
}
fn r(recursed: *mut bool) -> r {
r { recursed: recursed }
}
fn main() {
let mut recursed = false;
let _r = r(&mut recursed);
recurse();
}
| recurse | identifier_name |
too-much-recursion-unwinding.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test leaks
// error-pattern:ran out of stack
// Test that the thread panicks after hitting the recursion limit
// during unwinding
fn recurse() {
println!("don't optimize me out");
recurse();
}
struct r {
recursed: *mut bool,
}
impl Drop for r {
fn drop(&mut self) {
unsafe {
if!*(self.recursed) |
}
}
}
fn r(recursed: *mut bool) -> r {
r { recursed: recursed }
}
fn main() {
let mut recursed = false;
let _r = r(&mut recursed);
recurse();
}
| {
*(self.recursed) = true;
recurse();
} | conditional_block |
committed.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The Committed trait and associated errors.
use failure::Fail;
use keychain::BlindingFactor;
use util::secp::key::SecretKey;
use util::secp::pedersen::Commitment;
use util::{secp, secp_static, static_secp_instance};
/// Errors from summing and verifying kernel excesses via committed trait.
#[derive(Debug, Clone, PartialEq, Eq, Fail, Serialize, Deserialize)]
pub enum Error {
/// Keychain related error.
#[fail(display = "Keychain error {}", _0)]
Keychain(keychain::Error),
/// Secp related error.
#[fail(display = "Secp error {}", _0)]
Secp(secp::Error),
/// Kernel sums do not equal output sums.
#[fail(display = "Kernel sum mismatch")]
KernelSumMismatch,
/// Committed overage (fee or reward) is invalid
#[fail(display = "Invalid value")]
InvalidValue,
}
impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error {
Error::Secp(e)
}
}
impl From<keychain::Error> for Error {
fn from(e: keychain::Error) -> Error {
Error::Keychain(e)
}
}
/// Implemented by types that hold inputs and outputs (and kernels)
/// containing Pedersen commitments.
/// Handles the collection of the commitments as well as their
/// summing, taking potential explicit overages of fees into account.
pub trait Committed {
/// Gather the kernel excesses and sum them.
fn sum_kernel_excesses(
&self,
offset: &BlindingFactor,
) -> Result<(Commitment, Commitment), Error> {
// then gather the kernel excess commitments
let kernel_commits = self.kernels_committed();
// sum the commitments
let kernel_sum = sum_commits(kernel_commits, vec![])?;
// sum the commitments along with the
// commit to zero built from the offset
let kernel_sum_plus_offset = {
let secp = static_secp_instance();
let secp = secp.lock();
let mut commits = vec![kernel_sum];
if *offset!= BlindingFactor::zero() {
let key = offset.secret_key(&secp)?;
let offset_commit = secp.commit(0, key)?;
commits.push(offset_commit);
}
secp.commit_sum(commits, vec![])?
};
Ok((kernel_sum, kernel_sum_plus_offset))
}
/// Gathers commitments and sum them.
fn sum_commitments(&self, overage: i64) -> Result<Commitment, Error> {
// gather the commitments
let mut input_commits = self.inputs_committed();
let mut output_commits = self.outputs_committed();
// add the overage as output commitment if positive,
// or as an input commitment if negative
if overage!= 0 {
let over_commit = {
let secp = static_secp_instance();
let secp = secp.lock();
let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64;
secp.commit_value(overage_abs).unwrap()
};
if overage < 0 | else {
output_commits.push(over_commit);
}
}
sum_commits(output_commits, input_commits)
}
/// Vector of input commitments to verify.
fn inputs_committed(&self) -> Vec<Commitment>;
/// Vector of output commitments to verify.
fn outputs_committed(&self) -> Vec<Commitment>;
/// Vector of kernel excesses to verify.
fn kernels_committed(&self) -> Vec<Commitment>;
/// Verify the sum of the kernel excesses equals the
/// sum of the outputs, taking into account both
/// the kernel_offset and overage.
fn verify_kernel_sums(
&self,
overage: i64,
kernel_offset: BlindingFactor,
) -> Result<(Commitment, Commitment), Error> {
// Sum all input|output|overage commitments.
let utxo_sum = self.sum_commitments(overage)?;
// Sum the kernel excesses accounting for the kernel offset.
let (kernel_sum, kernel_sum_plus_offset) = self.sum_kernel_excesses(&kernel_offset)?;
if utxo_sum!= kernel_sum_plus_offset {
return Err(Error::KernelSumMismatch);
}
Ok((utxo_sum, kernel_sum))
}
}
/// Utility to sum positive and negative commitments, eliminating zero values
pub fn sum_commits(
mut positive: Vec<Commitment>,
mut negative: Vec<Commitment>,
) -> Result<Commitment, Error> {
let zero_commit = secp_static::commit_to_zero_value();
positive.retain(|x| *x!= zero_commit);
negative.retain(|x| *x!= zero_commit);
let secp = static_secp_instance();
let secp = secp.lock();
Ok(secp.commit_sum(positive, negative)?)
}
/// Utility function to take sets of positive and negative kernel offsets as
/// blinding factors, convert them to private key filtering zero values and
/// summing all of them. Useful to build blocks.
pub fn sum_kernel_offsets(
positive: Vec<BlindingFactor>,
negative: Vec<BlindingFactor>,
) -> Result<BlindingFactor, Error> {
let secp = static_secp_instance();
let secp = secp.lock();
let positive = to_secrets(positive, &secp);
let negative = to_secrets(negative, &secp);
if positive.is_empty() {
Ok(BlindingFactor::zero())
} else {
let sum = secp.blind_sum(positive, negative)?;
Ok(BlindingFactor::from_secret_key(sum))
}
}
fn to_secrets(bf: Vec<BlindingFactor>, secp: &secp::Secp256k1) -> Vec<SecretKey> {
bf.into_iter()
.filter(|x| *x!= BlindingFactor::zero())
.filter_map(|x| x.secret_key(&secp).ok())
.collect::<Vec<_>>()
}
| {
input_commits.push(over_commit);
} | conditional_block |
committed.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The Committed trait and associated errors.
use failure::Fail;
use keychain::BlindingFactor;
use util::secp::key::SecretKey;
use util::secp::pedersen::Commitment;
use util::{secp, secp_static, static_secp_instance};
/// Errors from summing and verifying kernel excesses via committed trait.
#[derive(Debug, Clone, PartialEq, Eq, Fail, Serialize, Deserialize)]
pub enum Error {
/// Keychain related error.
#[fail(display = "Keychain error {}", _0)]
Keychain(keychain::Error),
/// Secp related error.
#[fail(display = "Secp error {}", _0)]
Secp(secp::Error),
/// Kernel sums do not equal output sums.
#[fail(display = "Kernel sum mismatch")]
KernelSumMismatch,
/// Committed overage (fee or reward) is invalid
#[fail(display = "Invalid value")]
InvalidValue,
}
impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error {
Error::Secp(e)
}
}
impl From<keychain::Error> for Error {
fn from(e: keychain::Error) -> Error {
Error::Keychain(e)
}
}
/// Implemented by types that hold inputs and outputs (and kernels)
/// containing Pedersen commitments.
/// Handles the collection of the commitments as well as their
/// summing, taking potential explicit overages of fees into account.
pub trait Committed {
/// Gather the kernel excesses and sum them.
fn sum_kernel_excesses(
&self,
offset: &BlindingFactor,
) -> Result<(Commitment, Commitment), Error> {
// then gather the kernel excess commitments
let kernel_commits = self.kernels_committed();
// sum the commitments
let kernel_sum = sum_commits(kernel_commits, vec![])?;
// sum the commitments along with the
// commit to zero built from the offset
let kernel_sum_plus_offset = {
let secp = static_secp_instance();
let secp = secp.lock();
let mut commits = vec![kernel_sum];
if *offset!= BlindingFactor::zero() {
let key = offset.secret_key(&secp)?;
let offset_commit = secp.commit(0, key)?;
commits.push(offset_commit);
}
secp.commit_sum(commits, vec![])?
};
Ok((kernel_sum, kernel_sum_plus_offset))
}
/// Gathers commitments and sum them.
fn sum_commitments(&self, overage: i64) -> Result<Commitment, Error> {
// gather the commitments
let mut input_commits = self.inputs_committed();
let mut output_commits = self.outputs_committed();
// add the overage as output commitment if positive,
// or as an input commitment if negative
if overage!= 0 {
let over_commit = {
let secp = static_secp_instance();
let secp = secp.lock();
let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64;
secp.commit_value(overage_abs).unwrap()
};
if overage < 0 {
input_commits.push(over_commit);
} else {
output_commits.push(over_commit);
}
}
sum_commits(output_commits, input_commits)
}
/// Vector of input commitments to verify.
fn inputs_committed(&self) -> Vec<Commitment>;
/// Vector of output commitments to verify.
fn outputs_committed(&self) -> Vec<Commitment>;
/// Vector of kernel excesses to verify.
fn kernels_committed(&self) -> Vec<Commitment>;
/// Verify the sum of the kernel excesses equals the
/// sum of the outputs, taking into account both
/// the kernel_offset and overage.
fn verify_kernel_sums(
&self,
overage: i64,
kernel_offset: BlindingFactor,
) -> Result<(Commitment, Commitment), Error> |
}
/// Utility to sum positive and negative commitments, eliminating zero values
pub fn sum_commits(
mut positive: Vec<Commitment>,
mut negative: Vec<Commitment>,
) -> Result<Commitment, Error> {
let zero_commit = secp_static::commit_to_zero_value();
positive.retain(|x| *x!= zero_commit);
negative.retain(|x| *x!= zero_commit);
let secp = static_secp_instance();
let secp = secp.lock();
Ok(secp.commit_sum(positive, negative)?)
}
/// Utility function to take sets of positive and negative kernel offsets as
/// blinding factors, convert them to private key filtering zero values and
/// summing all of them. Useful to build blocks.
pub fn sum_kernel_offsets(
positive: Vec<BlindingFactor>,
negative: Vec<BlindingFactor>,
) -> Result<BlindingFactor, Error> {
let secp = static_secp_instance();
let secp = secp.lock();
let positive = to_secrets(positive, &secp);
let negative = to_secrets(negative, &secp);
if positive.is_empty() {
Ok(BlindingFactor::zero())
} else {
let sum = secp.blind_sum(positive, negative)?;
Ok(BlindingFactor::from_secret_key(sum))
}
}
fn to_secrets(bf: Vec<BlindingFactor>, secp: &secp::Secp256k1) -> Vec<SecretKey> {
bf.into_iter()
.filter(|x| *x!= BlindingFactor::zero())
.filter_map(|x| x.secret_key(&secp).ok())
.collect::<Vec<_>>()
}
| {
// Sum all input|output|overage commitments.
let utxo_sum = self.sum_commitments(overage)?;
// Sum the kernel excesses accounting for the kernel offset.
let (kernel_sum, kernel_sum_plus_offset) = self.sum_kernel_excesses(&kernel_offset)?;
if utxo_sum != kernel_sum_plus_offset {
return Err(Error::KernelSumMismatch);
}
Ok((utxo_sum, kernel_sum))
} | identifier_body |
committed.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The Committed trait and associated errors.
use failure::Fail;
use keychain::BlindingFactor;
use util::secp::key::SecretKey;
use util::secp::pedersen::Commitment;
use util::{secp, secp_static, static_secp_instance};
/// Errors from summing and verifying kernel excesses via committed trait.
#[derive(Debug, Clone, PartialEq, Eq, Fail, Serialize, Deserialize)]
pub enum Error {
/// Keychain related error.
#[fail(display = "Keychain error {}", _0)]
Keychain(keychain::Error),
/// Secp related error.
#[fail(display = "Secp error {}", _0)]
Secp(secp::Error),
/// Kernel sums do not equal output sums.
#[fail(display = "Kernel sum mismatch")]
KernelSumMismatch,
/// Committed overage (fee or reward) is invalid
#[fail(display = "Invalid value")]
InvalidValue,
}
impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error {
Error::Secp(e)
}
}
impl From<keychain::Error> for Error {
fn | (e: keychain::Error) -> Error {
Error::Keychain(e)
}
}
/// Implemented by types that hold inputs and outputs (and kernels)
/// containing Pedersen commitments.
/// Handles the collection of the commitments as well as their
/// summing, taking potential explicit overages of fees into account.
pub trait Committed {
/// Gather the kernel excesses and sum them.
fn sum_kernel_excesses(
&self,
offset: &BlindingFactor,
) -> Result<(Commitment, Commitment), Error> {
// then gather the kernel excess commitments
let kernel_commits = self.kernels_committed();
// sum the commitments
let kernel_sum = sum_commits(kernel_commits, vec![])?;
// sum the commitments along with the
// commit to zero built from the offset
let kernel_sum_plus_offset = {
let secp = static_secp_instance();
let secp = secp.lock();
let mut commits = vec![kernel_sum];
if *offset!= BlindingFactor::zero() {
let key = offset.secret_key(&secp)?;
let offset_commit = secp.commit(0, key)?;
commits.push(offset_commit);
}
secp.commit_sum(commits, vec![])?
};
Ok((kernel_sum, kernel_sum_plus_offset))
}
/// Gathers commitments and sum them.
fn sum_commitments(&self, overage: i64) -> Result<Commitment, Error> {
// gather the commitments
let mut input_commits = self.inputs_committed();
let mut output_commits = self.outputs_committed();
// add the overage as output commitment if positive,
// or as an input commitment if negative
if overage!= 0 {
let over_commit = {
let secp = static_secp_instance();
let secp = secp.lock();
let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64;
secp.commit_value(overage_abs).unwrap()
};
if overage < 0 {
input_commits.push(over_commit);
} else {
output_commits.push(over_commit);
}
}
sum_commits(output_commits, input_commits)
}
/// Vector of input commitments to verify.
fn inputs_committed(&self) -> Vec<Commitment>;
/// Vector of output commitments to verify.
fn outputs_committed(&self) -> Vec<Commitment>;
/// Vector of kernel excesses to verify.
fn kernels_committed(&self) -> Vec<Commitment>;
/// Verify the sum of the kernel excesses equals the
/// sum of the outputs, taking into account both
/// the kernel_offset and overage.
fn verify_kernel_sums(
&self,
overage: i64,
kernel_offset: BlindingFactor,
) -> Result<(Commitment, Commitment), Error> {
// Sum all input|output|overage commitments.
let utxo_sum = self.sum_commitments(overage)?;
// Sum the kernel excesses accounting for the kernel offset.
let (kernel_sum, kernel_sum_plus_offset) = self.sum_kernel_excesses(&kernel_offset)?;
if utxo_sum!= kernel_sum_plus_offset {
return Err(Error::KernelSumMismatch);
}
Ok((utxo_sum, kernel_sum))
}
}
/// Utility to sum positive and negative commitments, eliminating zero values
pub fn sum_commits(
mut positive: Vec<Commitment>,
mut negative: Vec<Commitment>,
) -> Result<Commitment, Error> {
let zero_commit = secp_static::commit_to_zero_value();
positive.retain(|x| *x!= zero_commit);
negative.retain(|x| *x!= zero_commit);
let secp = static_secp_instance();
let secp = secp.lock();
Ok(secp.commit_sum(positive, negative)?)
}
/// Utility function to take sets of positive and negative kernel offsets as
/// blinding factors, convert them to private key filtering zero values and
/// summing all of them. Useful to build blocks.
pub fn sum_kernel_offsets(
positive: Vec<BlindingFactor>,
negative: Vec<BlindingFactor>,
) -> Result<BlindingFactor, Error> {
let secp = static_secp_instance();
let secp = secp.lock();
let positive = to_secrets(positive, &secp);
let negative = to_secrets(negative, &secp);
if positive.is_empty() {
Ok(BlindingFactor::zero())
} else {
let sum = secp.blind_sum(positive, negative)?;
Ok(BlindingFactor::from_secret_key(sum))
}
}
fn to_secrets(bf: Vec<BlindingFactor>, secp: &secp::Secp256k1) -> Vec<SecretKey> {
bf.into_iter()
.filter(|x| *x!= BlindingFactor::zero())
.filter_map(|x| x.secret_key(&secp).ok())
.collect::<Vec<_>>()
}
| from | identifier_name |
committed.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. | // distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The Committed trait and associated errors.
use failure::Fail;
use keychain::BlindingFactor;
use util::secp::key::SecretKey;
use util::secp::pedersen::Commitment;
use util::{secp, secp_static, static_secp_instance};
/// Errors from summing and verifying kernel excesses via committed trait.
#[derive(Debug, Clone, PartialEq, Eq, Fail, Serialize, Deserialize)]
pub enum Error {
/// Keychain related error.
#[fail(display = "Keychain error {}", _0)]
Keychain(keychain::Error),
/// Secp related error.
#[fail(display = "Secp error {}", _0)]
Secp(secp::Error),
/// Kernel sums do not equal output sums.
#[fail(display = "Kernel sum mismatch")]
KernelSumMismatch,
/// Committed overage (fee or reward) is invalid
#[fail(display = "Invalid value")]
InvalidValue,
}
impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error {
Error::Secp(e)
}
}
impl From<keychain::Error> for Error {
fn from(e: keychain::Error) -> Error {
Error::Keychain(e)
}
}
/// Implemented by types that hold inputs and outputs (and kernels)
/// containing Pedersen commitments.
/// Handles the collection of the commitments as well as their
/// summing, taking potential explicit overages of fees into account.
pub trait Committed {
/// Gather the kernel excesses and sum them.
fn sum_kernel_excesses(
&self,
offset: &BlindingFactor,
) -> Result<(Commitment, Commitment), Error> {
// then gather the kernel excess commitments
let kernel_commits = self.kernels_committed();
// sum the commitments
let kernel_sum = sum_commits(kernel_commits, vec![])?;
// sum the commitments along with the
// commit to zero built from the offset
let kernel_sum_plus_offset = {
let secp = static_secp_instance();
let secp = secp.lock();
let mut commits = vec![kernel_sum];
if *offset!= BlindingFactor::zero() {
let key = offset.secret_key(&secp)?;
let offset_commit = secp.commit(0, key)?;
commits.push(offset_commit);
}
secp.commit_sum(commits, vec![])?
};
Ok((kernel_sum, kernel_sum_plus_offset))
}
/// Gathers commitments and sum them.
fn sum_commitments(&self, overage: i64) -> Result<Commitment, Error> {
// gather the commitments
let mut input_commits = self.inputs_committed();
let mut output_commits = self.outputs_committed();
// add the overage as output commitment if positive,
// or as an input commitment if negative
if overage!= 0 {
let over_commit = {
let secp = static_secp_instance();
let secp = secp.lock();
let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64;
secp.commit_value(overage_abs).unwrap()
};
if overage < 0 {
input_commits.push(over_commit);
} else {
output_commits.push(over_commit);
}
}
sum_commits(output_commits, input_commits)
}
/// Vector of input commitments to verify.
fn inputs_committed(&self) -> Vec<Commitment>;
/// Vector of output commitments to verify.
fn outputs_committed(&self) -> Vec<Commitment>;
/// Vector of kernel excesses to verify.
fn kernels_committed(&self) -> Vec<Commitment>;
/// Verify the sum of the kernel excesses equals the
/// sum of the outputs, taking into account both
/// the kernel_offset and overage.
fn verify_kernel_sums(
&self,
overage: i64,
kernel_offset: BlindingFactor,
) -> Result<(Commitment, Commitment), Error> {
// Sum all input|output|overage commitments.
let utxo_sum = self.sum_commitments(overage)?;
// Sum the kernel excesses accounting for the kernel offset.
let (kernel_sum, kernel_sum_plus_offset) = self.sum_kernel_excesses(&kernel_offset)?;
if utxo_sum!= kernel_sum_plus_offset {
return Err(Error::KernelSumMismatch);
}
Ok((utxo_sum, kernel_sum))
}
}
/// Utility to sum positive and negative commitments, eliminating zero values
pub fn sum_commits(
mut positive: Vec<Commitment>,
mut negative: Vec<Commitment>,
) -> Result<Commitment, Error> {
let zero_commit = secp_static::commit_to_zero_value();
positive.retain(|x| *x!= zero_commit);
negative.retain(|x| *x!= zero_commit);
let secp = static_secp_instance();
let secp = secp.lock();
Ok(secp.commit_sum(positive, negative)?)
}
/// Utility function to take sets of positive and negative kernel offsets as
/// blinding factors, convert them to private key filtering zero values and
/// summing all of them. Useful to build blocks.
pub fn sum_kernel_offsets(
positive: Vec<BlindingFactor>,
negative: Vec<BlindingFactor>,
) -> Result<BlindingFactor, Error> {
let secp = static_secp_instance();
let secp = secp.lock();
let positive = to_secrets(positive, &secp);
let negative = to_secrets(negative, &secp);
if positive.is_empty() {
Ok(BlindingFactor::zero())
} else {
let sum = secp.blind_sum(positive, negative)?;
Ok(BlindingFactor::from_secret_key(sum))
}
}
fn to_secrets(bf: Vec<BlindingFactor>, secp: &secp::Secp256k1) -> Vec<SecretKey> {
bf.into_iter()
.filter(|x| *x!= BlindingFactor::zero())
.filter_map(|x| x.secret_key(&secp).ok())
.collect::<Vec<_>>()
} | // You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software | random_line_split |
error.rs | use std::old_io::IoError;
use std::error::Error;
use std::fmt;
pub type ProtobufResult<T> = Result<T, ProtobufError>;
#[derive(Debug,Eq,PartialEq)]
pub enum ProtobufError {
IoError(IoError),
WireError(String),
}
impl fmt::Display for ProtobufError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
impl Error for ProtobufError {
fn description(&self) -> &str {
match self {
// not sure that cause should be included in message
&ProtobufError::IoError(ref e) => e.description(),
&ProtobufError::WireError(ref e) => e.as_slice(),
}
}
fn | (&self) -> Option<&Error> {
match self {
&ProtobufError::IoError(ref e) => Some(e as &Error),
&ProtobufError::WireError(..) => None,
}
}
}
| cause | identifier_name |
error.rs | use std::old_io::IoError;
use std::error::Error;
use std::fmt;
pub type ProtobufResult<T> = Result<T, ProtobufError>;
#[derive(Debug,Eq,PartialEq)]
pub enum ProtobufError { | }
impl fmt::Display for ProtobufError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
impl Error for ProtobufError {
fn description(&self) -> &str {
match self {
// not sure that cause should be included in message
&ProtobufError::IoError(ref e) => e.description(),
&ProtobufError::WireError(ref e) => e.as_slice(),
}
}
fn cause(&self) -> Option<&Error> {
match self {
&ProtobufError::IoError(ref e) => Some(e as &Error),
&ProtobufError::WireError(..) => None,
}
}
} | IoError(IoError),
WireError(String), | random_line_split |
error.rs | use std::old_io::IoError;
use std::error::Error;
use std::fmt;
pub type ProtobufResult<T> = Result<T, ProtobufError>;
#[derive(Debug,Eq,PartialEq)]
pub enum ProtobufError {
IoError(IoError),
WireError(String),
}
impl fmt::Display for ProtobufError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl Error for ProtobufError {
fn description(&self) -> &str {
match self {
// not sure that cause should be included in message
&ProtobufError::IoError(ref e) => e.description(),
&ProtobufError::WireError(ref e) => e.as_slice(),
}
}
fn cause(&self) -> Option<&Error> {
match self {
&ProtobufError::IoError(ref e) => Some(e as &Error),
&ProtobufError::WireError(..) => None,
}
}
}
| {
fmt::Debug::fmt(self, f)
} | identifier_body |
types.rs | use byteorder::{BigEndian, WriteBytesExt};
use protobuf::Message;
use std::io::Write;
use crate::protocol;
#[derive(Debug, PartialEq, Eq)]
pub enum MercuryMethod {
Get,
Sub,
Unsub,
Send,
}
#[derive(Debug)]
pub struct MercuryRequest {
pub method: MercuryMethod,
pub uri: String,
pub content_type: Option<String>,
pub payload: Vec<Vec<u8>>,
}
#[derive(Debug, Clone)]
pub struct MercuryResponse {
pub uri: String,
pub status_code: i32,
pub payload: Vec<Vec<u8>>,
}
#[derive(Debug, Hash, PartialEq, Eq, Copy, Clone)]
pub struct MercuryError;
impl ToString for MercuryMethod {
fn to_string(&self) -> String {
match *self {
MercuryMethod::Get => "GET",
MercuryMethod::Sub => "SUB",
MercuryMethod::Unsub => "UNSUB",
MercuryMethod::Send => "SEND",
}
.to_owned()
}
}
impl MercuryMethod {
pub fn | (&self) -> u8 {
match *self {
MercuryMethod::Get | MercuryMethod::Send => 0xb2,
MercuryMethod::Sub => 0xb3,
MercuryMethod::Unsub => 0xb4,
}
}
}
impl MercuryRequest {
pub fn encode(&self, seq: &[u8]) -> Vec<u8> {
let mut packet = Vec::new();
packet.write_u16::<BigEndian>(seq.len() as u16).unwrap();
packet.write_all(seq).unwrap();
packet.write_u8(1).unwrap(); // Flags: FINAL
packet
.write_u16::<BigEndian>(1 + self.payload.len() as u16)
.unwrap(); // Part count
let mut header = protocol::mercury::Header::new();
header.set_uri(self.uri.clone());
header.set_method(self.method.to_string());
if let Some(ref content_type) = self.content_type {
header.set_content_type(content_type.clone());
}
packet
.write_u16::<BigEndian>(header.compute_size() as u16)
.unwrap();
header.write_to_writer(&mut packet).unwrap();
for p in &self.payload {
packet.write_u16::<BigEndian>(p.len() as u16).unwrap();
packet.write(p).unwrap();
}
packet
}
}
| command | identifier_name |
types.rs | use byteorder::{BigEndian, WriteBytesExt};
use protobuf::Message;
use std::io::Write;
use crate::protocol;
#[derive(Debug, PartialEq, Eq)]
pub enum MercuryMethod {
Get,
Sub,
Unsub,
Send,
}
#[derive(Debug)]
pub struct MercuryRequest {
pub method: MercuryMethod,
pub uri: String,
pub content_type: Option<String>,
pub payload: Vec<Vec<u8>>,
}
#[derive(Debug, Clone)]
pub struct MercuryResponse {
pub uri: String,
pub status_code: i32,
pub payload: Vec<Vec<u8>>,
}
#[derive(Debug, Hash, PartialEq, Eq, Copy, Clone)]
pub struct MercuryError;
impl ToString for MercuryMethod {
fn to_string(&self) -> String {
match *self {
MercuryMethod::Get => "GET",
MercuryMethod::Sub => "SUB",
MercuryMethod::Unsub => "UNSUB",
MercuryMethod::Send => "SEND",
}
.to_owned()
}
}
impl MercuryMethod {
pub fn command(&self) -> u8 {
match *self {
MercuryMethod::Get | MercuryMethod::Send => 0xb2,
MercuryMethod::Sub => 0xb3,
MercuryMethod::Unsub => 0xb4,
}
}
}
impl MercuryRequest {
pub fn encode(&self, seq: &[u8]) -> Vec<u8> {
let mut packet = Vec::new();
packet.write_u16::<BigEndian>(seq.len() as u16).unwrap();
packet.write_all(seq).unwrap();
packet.write_u8(1).unwrap(); // Flags: FINAL
packet
.write_u16::<BigEndian>(1 + self.payload.len() as u16)
.unwrap(); // Part count
let mut header = protocol::mercury::Header::new();
header.set_uri(self.uri.clone());
header.set_method(self.method.to_string());
if let Some(ref content_type) = self.content_type {
header.set_content_type(content_type.clone());
}
packet
.write_u16::<BigEndian>(header.compute_size() as u16)
.unwrap(); | header.write_to_writer(&mut packet).unwrap();
for p in &self.payload {
packet.write_u16::<BigEndian>(p.len() as u16).unwrap();
packet.write(p).unwrap();
}
packet
}
} | random_line_split |
|
tests.rs | use super::*;
#[test]
fn test_struct_info_roundtrip() {
let s = ItemEnum::Struct(Struct {
struct_type: StructType::Plain,
generics: Generics { params: vec![], where_predicates: vec![] },
fields_stripped: false,
fields: vec![],
impls: vec![],
});
let struct_json = serde_json::to_string(&s).unwrap();
let de_s = serde_json::from_str(&struct_json).unwrap();
assert_eq!(s, de_s);
}
#[test]
fn test_union_info_roundtrip() {
let u = ItemEnum::Union(Union {
generics: Generics { params: vec![], where_predicates: vec![] },
fields_stripped: false,
fields: vec![], | impls: vec![],
});
let union_json = serde_json::to_string(&u).unwrap();
let de_u = serde_json::from_str(&union_json).unwrap();
assert_eq!(u, de_u);
} | random_line_split |
|
tests.rs | use super::*;
#[test]
fn test_struct_info_roundtrip() {
let s = ItemEnum::Struct(Struct {
struct_type: StructType::Plain,
generics: Generics { params: vec![], where_predicates: vec![] },
fields_stripped: false,
fields: vec![],
impls: vec![],
});
let struct_json = serde_json::to_string(&s).unwrap();
let de_s = serde_json::from_str(&struct_json).unwrap();
assert_eq!(s, de_s);
}
#[test]
fn test_union_info_roundtrip() | {
let u = ItemEnum::Union(Union {
generics: Generics { params: vec![], where_predicates: vec![] },
fields_stripped: false,
fields: vec![],
impls: vec![],
});
let union_json = serde_json::to_string(&u).unwrap();
let de_u = serde_json::from_str(&union_json).unwrap();
assert_eq!(u, de_u);
} | identifier_body |
|
tests.rs | use super::*;
#[test]
fn test_struct_info_roundtrip() {
let s = ItemEnum::Struct(Struct {
struct_type: StructType::Plain,
generics: Generics { params: vec![], where_predicates: vec![] },
fields_stripped: false,
fields: vec![],
impls: vec![],
});
let struct_json = serde_json::to_string(&s).unwrap();
let de_s = serde_json::from_str(&struct_json).unwrap();
assert_eq!(s, de_s);
}
#[test]
fn | () {
let u = ItemEnum::Union(Union {
generics: Generics { params: vec![], where_predicates: vec![] },
fields_stripped: false,
fields: vec![],
impls: vec![],
});
let union_json = serde_json::to_string(&u).unwrap();
let de_u = serde_json::from_str(&union_json).unwrap();
assert_eq!(u, de_u);
}
| test_union_info_roundtrip | identifier_name |
rec-align-u64.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #2303
#![feature(intrinsics)]
use std::mem;
mod rusti {
extern "rust-intrinsic" {
pub fn pref_align_of<T>() -> uint;
pub fn min_align_of<T>() -> uint;
}
}
// This is the type with the questionable alignment
#[derive(Debug)]
struct Inner {
c64: u64
}
// This is the type that contains the type with the
// questionable alignment, for testing
#[derive(Debug)]
struct Outer {
c8: u8,
t: Inner
}
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly"))]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 4u }
pub fn size() -> uint { 12u }
}
#[cfg(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64"))]
pub mod m {
pub fn align() -> uint { 8u }
pub fn size() -> uint { 16u }
}
}
#[cfg(target_os = "windows")]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 8u }
pub fn size() -> uint { 16u }
}
#[cfg(target_arch = "x86_64")]
pub mod m {
pub fn align() -> uint { 8u } | #[cfg(target_os = "android")]
mod m {
#[cfg(target_arch = "arm")]
pub mod m {
pub fn align() -> uint { 8u }
pub fn size() -> uint { 16u }
}
}
pub fn main() {
unsafe {
let x = Outer {c8: 22u8, t: Inner {c64: 44u64}};
let y = format!("{:?}", x);
println!("align inner = {:?}", rusti::min_align_of::<Inner>());
println!("size outer = {:?}", mem::size_of::<Outer>());
println!("y = {:?}", y);
// per clang/gcc the alignment of `Inner` is 4 on x86.
assert_eq!(rusti::min_align_of::<Inner>(), m::m::align());
// per clang/gcc the size of `Outer` should be 12
// because `Inner`s alignment was 4.
assert_eq!(mem::size_of::<Outer>(), m::m::size());
assert_eq!(y, "Outer { c8: 22, t: Inner { c64: 44 } }".to_string());
}
} | pub fn size() -> uint { 16u }
}
}
| random_line_split |
rec-align-u64.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #2303
#![feature(intrinsics)]
use std::mem;
mod rusti {
extern "rust-intrinsic" {
pub fn pref_align_of<T>() -> uint;
pub fn min_align_of<T>() -> uint;
}
}
// This is the type with the questionable alignment
#[derive(Debug)]
struct Inner {
c64: u64
}
// This is the type that contains the type with the
// questionable alignment, for testing
#[derive(Debug)]
struct Outer {
c8: u8,
t: Inner
}
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly"))]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 4u }
pub fn size() -> uint { 12u }
}
#[cfg(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64"))]
pub mod m {
pub fn align() -> uint { 8u }
pub fn size() -> uint { 16u }
}
}
#[cfg(target_os = "windows")]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 8u }
pub fn | () -> uint { 16u }
}
#[cfg(target_arch = "x86_64")]
pub mod m {
pub fn align() -> uint { 8u }
pub fn size() -> uint { 16u }
}
}
#[cfg(target_os = "android")]
mod m {
#[cfg(target_arch = "arm")]
pub mod m {
pub fn align() -> uint { 8u }
pub fn size() -> uint { 16u }
}
}
pub fn main() {
unsafe {
let x = Outer {c8: 22u8, t: Inner {c64: 44u64}};
let y = format!("{:?}", x);
println!("align inner = {:?}", rusti::min_align_of::<Inner>());
println!("size outer = {:?}", mem::size_of::<Outer>());
println!("y = {:?}", y);
// per clang/gcc the alignment of `Inner` is 4 on x86.
assert_eq!(rusti::min_align_of::<Inner>(), m::m::align());
// per clang/gcc the size of `Outer` should be 12
// because `Inner`s alignment was 4.
assert_eq!(mem::size_of::<Outer>(), m::m::size());
assert_eq!(y, "Outer { c8: 22, t: Inner { c64: 44 } }".to_string());
}
}
| size | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Calculate [specified][specified] and [computed values][computed] from a
//! tree of DOM nodes and a set of stylesheets.
//!
//! [computed]: https://drafts.csswg.org/css-cascade/#computed
//! [specified]: https://drafts.csswg.org/css-cascade/#specified
//!
//! In particular, this crate contains the definitions of supported properties,
//! the code to parse them into specified values and calculate the computed
//! values based on the specified values, as well as the code to serialize both
//! specified and computed values.
//!
//! The main entry point is [`recalc_style_at`][recalc_style_at].
//!
//! [recalc_style_at]: traversal/fn.recalc_style_at.html
//!
//! Major dependencies are the [cssparser][cssparser] and [selectors][selectors]
//! crates.
//!
//! [cssparser]:../cssparser/index.html
//! [selectors]:../selectors/index.html
#![deny(missing_docs)]
extern crate app_units;
extern crate arrayvec;
extern crate atomic_refcell;
#[macro_use]
extern crate bitflags;
#[allow(unused_extern_crates)] extern crate byteorder;
#[cfg(feature = "gecko")] #[macro_use] #[no_link] extern crate cfg_if;
#[macro_use] extern crate cssparser;
#[macro_use] extern crate debug_unreachable;
extern crate euclid;
extern crate fallible;
extern crate fnv;
#[cfg(feature = "gecko")] #[macro_use] pub mod gecko_string_cache;
extern crate hashglobe;
extern crate itertools;
extern crate itoa;
#[cfg(feature = "servo")] #[macro_use] extern crate html5ever;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[macro_use] extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate matches;
#[cfg(feature = "gecko")]
pub extern crate nsstring;
#[cfg(feature = "gecko")] extern crate num_cpus;
extern crate num_integer;
extern crate num_traits;
extern crate ordered_float;
extern crate owning_ref;
extern crate parking_lot;
extern crate precomputed_hash;
extern crate rayon;
extern crate selectors;
#[cfg(feature = "servo")] #[macro_use] extern crate serde;
pub extern crate servo_arc;
#[cfg(feature = "servo")] #[macro_use] extern crate servo_atoms;
#[cfg(feature = "servo")] extern crate servo_config;
#[cfg(feature = "servo")] extern crate servo_url;
extern crate smallbitvec;
extern crate smallvec;
#[cfg(feature = "servo")] extern crate string_cache;
#[macro_use]
extern crate style_derive;
extern crate style_traits;
extern crate time;
extern crate uluru;
extern crate unicode_bidi;
#[allow(unused_extern_crates)]
extern crate unicode_segmentation;
extern crate void;
#[macro_use]
mod macros;
#[cfg(feature = "servo")] pub mod animation;
pub mod applicable_declarations;
#[allow(missing_docs)] // TODO.
#[cfg(feature = "servo")] pub mod attr;
pub mod author_styles;
pub mod bezier;
pub mod bloom;
pub mod context;
pub mod counter_style;
pub mod custom_properties;
pub mod data;
pub mod dom;
pub mod dom_apis;
pub mod driver;
pub mod element_state;
#[cfg(feature = "servo")] mod encoding_support;
pub mod error_reporting; | #[cfg(feature = "gecko")] #[allow(unsafe_code)] pub mod gecko;
#[cfg(feature = "gecko")] #[allow(unsafe_code)] pub mod gecko_bindings;
pub mod hash;
pub mod invalidation;
#[allow(missing_docs)] // TODO.
pub mod logical_geometry;
pub mod matching;
pub mod media_queries;
pub mod parallel;
pub mod parser;
pub mod rule_cache;
pub mod rule_tree;
pub mod scoped_tls;
pub mod selector_map;
pub mod selector_parser;
pub mod shared_lock;
pub mod sharing;
pub mod str;
pub mod style_adjuster;
pub mod style_resolver;
pub mod stylesheet_set;
pub mod stylesheets;
pub mod stylist;
pub mod thread_state;
pub mod timer;
pub mod traversal;
pub mod traversal_flags;
#[macro_use]
#[allow(non_camel_case_types)]
pub mod values;
#[cfg(feature = "gecko")] pub use gecko_string_cache as string_cache;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Namespace;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom as Prefix;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom as LocalName;
#[cfg(feature = "servo")] pub use servo_atoms::Atom;
#[cfg(feature = "servo")] pub use html5ever::Prefix;
#[cfg(feature = "servo")] pub use html5ever::LocalName;
#[cfg(feature = "servo")] pub use html5ever::Namespace;
/// The CSS properties supported by the style system.
/// Generated from the properties.mako.rs template by build.rs
#[macro_use]
#[allow(unsafe_code)]
#[deny(missing_docs)]
pub mod properties {
include!(concat!(env!("OUT_DIR"), "/properties.rs"));
}
// uses a macro from properties
#[cfg(feature = "servo")] #[allow(unsafe_code)] pub mod servo;
#[cfg(feature = "gecko")]
#[allow(unsafe_code, missing_docs)]
pub mod gecko_properties {
include!(concat!(env!("OUT_DIR"), "/gecko_properties.rs"));
}
macro_rules! reexport_computed_values {
( $( { $name: ident, $boxed: expr } )+ ) => {
/// Types for [computed values][computed].
///
/// [computed]: https://drafts.csswg.org/css-cascade/#computed
pub mod computed_values {
$(
pub use properties::longhands::$name::computed_value as $name;
)+
// Don't use a side-specific name needlessly:
pub use properties::longhands::border_top_style::computed_value as border_style;
}
}
}
longhand_properties_idents!(reexport_computed_values);
#[cfg(feature = "gecko")] use gecko_string_cache::WeakAtom;
#[cfg(feature = "servo")] use servo_atoms::Atom as WeakAtom;
/// Extension methods for selectors::attr::CaseSensitivity
pub trait CaseSensitivityExt {
/// Return whether two atoms compare equal according to this case sensitivity.
fn eq_atom(self, a: &WeakAtom, b: &WeakAtom) -> bool;
}
impl CaseSensitivityExt for selectors::attr::CaseSensitivity {
fn eq_atom(self, a: &WeakAtom, b: &WeakAtom) -> bool {
match self {
selectors::attr::CaseSensitivity::CaseSensitive => a == b,
selectors::attr::CaseSensitivity::AsciiCaseInsensitive => a.eq_ignore_ascii_case(b),
}
}
} | pub mod font_face;
pub mod font_metrics; | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Calculate [specified][specified] and [computed values][computed] from a
//! tree of DOM nodes and a set of stylesheets.
//!
//! [computed]: https://drafts.csswg.org/css-cascade/#computed
//! [specified]: https://drafts.csswg.org/css-cascade/#specified
//!
//! In particular, this crate contains the definitions of supported properties,
//! the code to parse them into specified values and calculate the computed
//! values based on the specified values, as well as the code to serialize both
//! specified and computed values.
//!
//! The main entry point is [`recalc_style_at`][recalc_style_at].
//!
//! [recalc_style_at]: traversal/fn.recalc_style_at.html
//!
//! Major dependencies are the [cssparser][cssparser] and [selectors][selectors]
//! crates.
//!
//! [cssparser]:../cssparser/index.html
//! [selectors]:../selectors/index.html
#![deny(missing_docs)]
extern crate app_units;
extern crate arrayvec;
extern crate atomic_refcell;
#[macro_use]
extern crate bitflags;
#[allow(unused_extern_crates)] extern crate byteorder;
#[cfg(feature = "gecko")] #[macro_use] #[no_link] extern crate cfg_if;
#[macro_use] extern crate cssparser;
#[macro_use] extern crate debug_unreachable;
extern crate euclid;
extern crate fallible;
extern crate fnv;
#[cfg(feature = "gecko")] #[macro_use] pub mod gecko_string_cache;
extern crate hashglobe;
extern crate itertools;
extern crate itoa;
#[cfg(feature = "servo")] #[macro_use] extern crate html5ever;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[macro_use] extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate matches;
#[cfg(feature = "gecko")]
pub extern crate nsstring;
#[cfg(feature = "gecko")] extern crate num_cpus;
extern crate num_integer;
extern crate num_traits;
extern crate ordered_float;
extern crate owning_ref;
extern crate parking_lot;
extern crate precomputed_hash;
extern crate rayon;
extern crate selectors;
#[cfg(feature = "servo")] #[macro_use] extern crate serde;
pub extern crate servo_arc;
#[cfg(feature = "servo")] #[macro_use] extern crate servo_atoms;
#[cfg(feature = "servo")] extern crate servo_config;
#[cfg(feature = "servo")] extern crate servo_url;
extern crate smallbitvec;
extern crate smallvec;
#[cfg(feature = "servo")] extern crate string_cache;
#[macro_use]
extern crate style_derive;
extern crate style_traits;
extern crate time;
extern crate uluru;
extern crate unicode_bidi;
#[allow(unused_extern_crates)]
extern crate unicode_segmentation;
extern crate void;
#[macro_use]
mod macros;
#[cfg(feature = "servo")] pub mod animation;
pub mod applicable_declarations;
#[allow(missing_docs)] // TODO.
#[cfg(feature = "servo")] pub mod attr;
pub mod author_styles;
pub mod bezier;
pub mod bloom;
pub mod context;
pub mod counter_style;
pub mod custom_properties;
pub mod data;
pub mod dom;
pub mod dom_apis;
pub mod driver;
pub mod element_state;
#[cfg(feature = "servo")] mod encoding_support;
pub mod error_reporting;
pub mod font_face;
pub mod font_metrics;
#[cfg(feature = "gecko")] #[allow(unsafe_code)] pub mod gecko;
#[cfg(feature = "gecko")] #[allow(unsafe_code)] pub mod gecko_bindings;
pub mod hash;
pub mod invalidation;
#[allow(missing_docs)] // TODO.
pub mod logical_geometry;
pub mod matching;
pub mod media_queries;
pub mod parallel;
pub mod parser;
pub mod rule_cache;
pub mod rule_tree;
pub mod scoped_tls;
pub mod selector_map;
pub mod selector_parser;
pub mod shared_lock;
pub mod sharing;
pub mod str;
pub mod style_adjuster;
pub mod style_resolver;
pub mod stylesheet_set;
pub mod stylesheets;
pub mod stylist;
pub mod thread_state;
pub mod timer;
pub mod traversal;
pub mod traversal_flags;
#[macro_use]
#[allow(non_camel_case_types)]
pub mod values;
#[cfg(feature = "gecko")] pub use gecko_string_cache as string_cache;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Namespace;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom as Prefix;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom as LocalName;
#[cfg(feature = "servo")] pub use servo_atoms::Atom;
#[cfg(feature = "servo")] pub use html5ever::Prefix;
#[cfg(feature = "servo")] pub use html5ever::LocalName;
#[cfg(feature = "servo")] pub use html5ever::Namespace;
/// The CSS properties supported by the style system.
/// Generated from the properties.mako.rs template by build.rs
#[macro_use]
#[allow(unsafe_code)]
#[deny(missing_docs)]
pub mod properties {
include!(concat!(env!("OUT_DIR"), "/properties.rs"));
}
// uses a macro from properties
#[cfg(feature = "servo")] #[allow(unsafe_code)] pub mod servo;
#[cfg(feature = "gecko")]
#[allow(unsafe_code, missing_docs)]
pub mod gecko_properties {
include!(concat!(env!("OUT_DIR"), "/gecko_properties.rs"));
}
macro_rules! reexport_computed_values {
( $( { $name: ident, $boxed: expr } )+ ) => {
/// Types for [computed values][computed].
///
/// [computed]: https://drafts.csswg.org/css-cascade/#computed
pub mod computed_values {
$(
pub use properties::longhands::$name::computed_value as $name;
)+
// Don't use a side-specific name needlessly:
pub use properties::longhands::border_top_style::computed_value as border_style;
}
}
}
longhand_properties_idents!(reexport_computed_values);
#[cfg(feature = "gecko")] use gecko_string_cache::WeakAtom;
#[cfg(feature = "servo")] use servo_atoms::Atom as WeakAtom;
/// Extension methods for selectors::attr::CaseSensitivity
pub trait CaseSensitivityExt {
/// Return whether two atoms compare equal according to this case sensitivity.
fn eq_atom(self, a: &WeakAtom, b: &WeakAtom) -> bool;
}
impl CaseSensitivityExt for selectors::attr::CaseSensitivity {
fn eq_atom(self, a: &WeakAtom, b: &WeakAtom) -> bool |
}
| {
match self {
selectors::attr::CaseSensitivity::CaseSensitive => a == b,
selectors::attr::CaseSensitivity::AsciiCaseInsensitive => a.eq_ignore_ascii_case(b),
}
} | identifier_body |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Calculate [specified][specified] and [computed values][computed] from a
//! tree of DOM nodes and a set of stylesheets.
//!
//! [computed]: https://drafts.csswg.org/css-cascade/#computed
//! [specified]: https://drafts.csswg.org/css-cascade/#specified
//!
//! In particular, this crate contains the definitions of supported properties,
//! the code to parse them into specified values and calculate the computed
//! values based on the specified values, as well as the code to serialize both
//! specified and computed values.
//!
//! The main entry point is [`recalc_style_at`][recalc_style_at].
//!
//! [recalc_style_at]: traversal/fn.recalc_style_at.html
//!
//! Major dependencies are the [cssparser][cssparser] and [selectors][selectors]
//! crates.
//!
//! [cssparser]:../cssparser/index.html
//! [selectors]:../selectors/index.html
#![deny(missing_docs)]
extern crate app_units;
extern crate arrayvec;
extern crate atomic_refcell;
#[macro_use]
extern crate bitflags;
#[allow(unused_extern_crates)] extern crate byteorder;
#[cfg(feature = "gecko")] #[macro_use] #[no_link] extern crate cfg_if;
#[macro_use] extern crate cssparser;
#[macro_use] extern crate debug_unreachable;
extern crate euclid;
extern crate fallible;
extern crate fnv;
#[cfg(feature = "gecko")] #[macro_use] pub mod gecko_string_cache;
extern crate hashglobe;
extern crate itertools;
extern crate itoa;
#[cfg(feature = "servo")] #[macro_use] extern crate html5ever;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[macro_use] extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate matches;
#[cfg(feature = "gecko")]
pub extern crate nsstring;
#[cfg(feature = "gecko")] extern crate num_cpus;
extern crate num_integer;
extern crate num_traits;
extern crate ordered_float;
extern crate owning_ref;
extern crate parking_lot;
extern crate precomputed_hash;
extern crate rayon;
extern crate selectors;
#[cfg(feature = "servo")] #[macro_use] extern crate serde;
pub extern crate servo_arc;
#[cfg(feature = "servo")] #[macro_use] extern crate servo_atoms;
#[cfg(feature = "servo")] extern crate servo_config;
#[cfg(feature = "servo")] extern crate servo_url;
extern crate smallbitvec;
extern crate smallvec;
#[cfg(feature = "servo")] extern crate string_cache;
#[macro_use]
extern crate style_derive;
extern crate style_traits;
extern crate time;
extern crate uluru;
extern crate unicode_bidi;
#[allow(unused_extern_crates)]
extern crate unicode_segmentation;
extern crate void;
#[macro_use]
mod macros;
#[cfg(feature = "servo")] pub mod animation;
pub mod applicable_declarations;
#[allow(missing_docs)] // TODO.
#[cfg(feature = "servo")] pub mod attr;
pub mod author_styles;
pub mod bezier;
pub mod bloom;
pub mod context;
pub mod counter_style;
pub mod custom_properties;
pub mod data;
pub mod dom;
pub mod dom_apis;
pub mod driver;
pub mod element_state;
#[cfg(feature = "servo")] mod encoding_support;
pub mod error_reporting;
pub mod font_face;
pub mod font_metrics;
#[cfg(feature = "gecko")] #[allow(unsafe_code)] pub mod gecko;
#[cfg(feature = "gecko")] #[allow(unsafe_code)] pub mod gecko_bindings;
pub mod hash;
pub mod invalidation;
#[allow(missing_docs)] // TODO.
pub mod logical_geometry;
pub mod matching;
pub mod media_queries;
pub mod parallel;
pub mod parser;
pub mod rule_cache;
pub mod rule_tree;
pub mod scoped_tls;
pub mod selector_map;
pub mod selector_parser;
pub mod shared_lock;
pub mod sharing;
pub mod str;
pub mod style_adjuster;
pub mod style_resolver;
pub mod stylesheet_set;
pub mod stylesheets;
pub mod stylist;
pub mod thread_state;
pub mod timer;
pub mod traversal;
pub mod traversal_flags;
#[macro_use]
#[allow(non_camel_case_types)]
pub mod values;
#[cfg(feature = "gecko")] pub use gecko_string_cache as string_cache;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Namespace;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom as Prefix;
#[cfg(feature = "gecko")] pub use gecko_string_cache::Atom as LocalName;
#[cfg(feature = "servo")] pub use servo_atoms::Atom;
#[cfg(feature = "servo")] pub use html5ever::Prefix;
#[cfg(feature = "servo")] pub use html5ever::LocalName;
#[cfg(feature = "servo")] pub use html5ever::Namespace;
/// The CSS properties supported by the style system.
/// Generated from the properties.mako.rs template by build.rs
#[macro_use]
#[allow(unsafe_code)]
#[deny(missing_docs)]
pub mod properties {
include!(concat!(env!("OUT_DIR"), "/properties.rs"));
}
// uses a macro from properties
#[cfg(feature = "servo")] #[allow(unsafe_code)] pub mod servo;
#[cfg(feature = "gecko")]
#[allow(unsafe_code, missing_docs)]
pub mod gecko_properties {
include!(concat!(env!("OUT_DIR"), "/gecko_properties.rs"));
}
macro_rules! reexport_computed_values {
( $( { $name: ident, $boxed: expr } )+ ) => {
/// Types for [computed values][computed].
///
/// [computed]: https://drafts.csswg.org/css-cascade/#computed
pub mod computed_values {
$(
pub use properties::longhands::$name::computed_value as $name;
)+
// Don't use a side-specific name needlessly:
pub use properties::longhands::border_top_style::computed_value as border_style;
}
}
}
longhand_properties_idents!(reexport_computed_values);
#[cfg(feature = "gecko")] use gecko_string_cache::WeakAtom;
#[cfg(feature = "servo")] use servo_atoms::Atom as WeakAtom;
/// Extension methods for selectors::attr::CaseSensitivity
pub trait CaseSensitivityExt {
/// Return whether two atoms compare equal according to this case sensitivity.
fn eq_atom(self, a: &WeakAtom, b: &WeakAtom) -> bool;
}
impl CaseSensitivityExt for selectors::attr::CaseSensitivity {
fn | (self, a: &WeakAtom, b: &WeakAtom) -> bool {
match self {
selectors::attr::CaseSensitivity::CaseSensitive => a == b,
selectors::attr::CaseSensitivity::AsciiCaseInsensitive => a.eq_ignore_ascii_case(b),
}
}
}
| eq_atom | identifier_name |
day_14.rs | use tdd_kata::string_calc_kata::iter_1::day_14::evaluate;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_eval_simple_num() {
assert_eq!(evaluate("1"), Ok(1.0));
}
#[test]
fn test_eval_three_digit_num() {
assert_eq!(evaluate("256"), Ok(256.0));
}
#[test]
fn | () {
assert_eq!(evaluate("125.256"), Ok(125.256));
}
#[test]
fn test_eval_add() {
assert_eq!(evaluate("1+2"), Ok(3.0));
}
#[test]
fn test_eval_sub() {
assert_eq!(evaluate("3-1"), Ok(2.0));
}
#[test]
fn test_eval_few_operations() {
assert_eq!(evaluate("2+3-1+4"), Ok(8.0));
}
#[test]
fn test_eval_mul() {
assert_eq!(evaluate("2×5"), Ok(10.0));
}
#[test]
fn test_eval_div() {
assert_eq!(evaluate("10÷2"), Ok(5.0));
}
#[test]
fn test_eval_operations_with_diff_priority() {
assert_eq!(evaluate("20+2×5-100÷4"), Ok(5.0));
}
#[test]
fn test_eval_operations_with_parentheses() {
assert_eq!(evaluate("2+(2-3+5×2)-8"), Ok(3.0));
}
#[test]
fn test_eval_operations_with_two_levels_of_parentheses() {
assert_eq!(evaluate("2+(2-3+5×2)-((1+1)×4)"), Ok(3.0));
}
}
| test_eval_real_num | identifier_name |
day_14.rs | use tdd_kata::string_calc_kata::iter_1::day_14::evaluate;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_eval_simple_num() {
assert_eq!(evaluate("1"), Ok(1.0));
}
#[test]
fn test_eval_three_digit_num() {
assert_eq!(evaluate("256"), Ok(256.0));
}
#[test]
fn test_eval_real_num() {
assert_eq!(evaluate("125.256"), Ok(125.256));
}
#[test]
fn test_eval_add() {
assert_eq!(evaluate("1+2"), Ok(3.0));
}
#[test]
fn test_eval_sub() {
assert_eq!(evaluate("3-1"), Ok(2.0));
}
#[test]
fn test_eval_few_operations() {
assert_eq!(evaluate("2+3-1+4"), Ok(8.0));
}
#[test]
fn test_eval_mul() |
#[test]
fn test_eval_div() {
assert_eq!(evaluate("10÷2"), Ok(5.0));
}
#[test]
fn test_eval_operations_with_diff_priority() {
assert_eq!(evaluate("20+2×5-100÷4"), Ok(5.0));
}
#[test]
fn test_eval_operations_with_parentheses() {
assert_eq!(evaluate("2+(2-3+5×2)-8"), Ok(3.0));
}
#[test]
fn test_eval_operations_with_two_levels_of_parentheses() {
assert_eq!(evaluate("2+(2-3+5×2)-((1+1)×4)"), Ok(3.0));
}
}
| {
assert_eq!(evaluate("2×5"), Ok(10.0));
}
| identifier_body |
day_14.rs | use tdd_kata::string_calc_kata::iter_1::day_14::evaluate;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_eval_simple_num() {
assert_eq!(evaluate("1"), Ok(1.0));
}
#[test]
fn test_eval_three_digit_num() {
assert_eq!(evaluate("256"), Ok(256.0));
}
#[test]
fn test_eval_real_num() {
assert_eq!(evaluate("125.256"), Ok(125.256));
}
#[test]
fn test_eval_add() {
assert_eq!(evaluate("1+2"), Ok(3.0));
}
#[test]
fn test_eval_sub() {
assert_eq!(evaluate("3-1"), Ok(2.0));
}
#[test]
fn test_eval_few_operations() {
assert_eq!(evaluate("2+3-1+4"), Ok(8.0));
}
#[test]
fn test_eval_mul() {
assert_eq!(evaluate("2×5"), Ok(10.0));
}
#[test]
fn test_eval_div() {
assert_eq!(evaluate("10÷2"), Ok(5.0));
}
#[test]
fn test_eval_operations_with_diff_priority() {
assert_eq!(evaluate("20+2×5-100÷4"), Ok(5.0));
} |
#[test]
fn test_eval_operations_with_parentheses() {
assert_eq!(evaluate("2+(2-3+5×2)-8"), Ok(3.0));
}
#[test]
fn test_eval_operations_with_two_levels_of_parentheses() {
assert_eq!(evaluate("2+(2-3+5×2)-((1+1)×4)"), Ok(3.0));
}
} | random_line_split |
|
extern-crate-only-used-in-link.rs | // This test is just a little cursed. | // aux-crate:priv:empty=empty.rs
// aux-build:empty2.rs
// aux-crate:priv:empty2=empty2.rs
// build-aux-docs
// compile-flags:-Z unstable-options --edition 2018
// @has extern_crate_only_used_in_link/index.html
// @has - '//a[@href="../issue_66159_1/struct.Something.html"]' 'issue_66159_1::Something'
//! [issue_66159_1::Something]
// @has - '//a[@href="../empty/index.html"]' 'empty'
//! [`empty`]
// @has - '//a[@href="../empty2/index.html"]' 'empty2'
//! [empty2<x>] | // aux-build:issue-66159-1.rs
// aux-crate:priv:issue_66159_1=issue-66159-1.rs
// aux-build:empty.rs | random_line_split |
ecdsa_common.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
use tink_core::TinkError;
use tink_proto::{EcdsaSignatureEncoding, EllipticCurveType, HashType};
/// Supported signature encodings. This is a precise subset of the protobuf enum,
/// allowing exact `match`es.
#[derive(Clone, Debug)]
pub enum SignatureEncoding {
Der,
IeeeP1363,
}
/// Validate ECDSA parameters.
/// The hash's strength must not be weaker than the curve's strength.
/// DER and IEEE_P1363 encodings are supported.
pub fn validate_ecdsa_params(
hash_alg: tink_proto::HashType,
curve: tink_proto::EllipticCurveType,
encoding: tink_proto::EcdsaSignatureEncoding,
) -> Result<SignatureEncoding, TinkError> {
let encoding = match encoding {
EcdsaSignatureEncoding::IeeeP1363 => SignatureEncoding::IeeeP1363,
EcdsaSignatureEncoding::Der => SignatureEncoding::Der,
_ => return Err("ecdsa: unsupported encoding".into()),
};
match curve {
EllipticCurveType::NistP256 => {
if hash_alg!= HashType::Sha256 {
return Err("invalid hash type, expect SHA-256".into());
}
}
EllipticCurveType::NistP384 => |
EllipticCurveType::NistP521 => {
if hash_alg!= HashType::Sha512 {
return Err("invalid hash type, expect SHA-512".into());
}
}
_ => return Err(format!("unsupported curve: {:?}", curve).into()),
}
Ok(encoding)
}
| {
if hash_alg != HashType::Sha384 && hash_alg != HashType::Sha512 {
return Err("invalid hash type, expect SHA-384 or SHA-512".into());
}
} | conditional_block |
ecdsa_common.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
use tink_core::TinkError;
use tink_proto::{EcdsaSignatureEncoding, EllipticCurveType, HashType};
/// Supported signature encodings. This is a precise subset of the protobuf enum,
/// allowing exact `match`es.
#[derive(Clone, Debug)]
pub enum | {
Der,
IeeeP1363,
}
/// Validate ECDSA parameters.
/// The hash's strength must not be weaker than the curve's strength.
/// DER and IEEE_P1363 encodings are supported.
pub fn validate_ecdsa_params(
hash_alg: tink_proto::HashType,
curve: tink_proto::EllipticCurveType,
encoding: tink_proto::EcdsaSignatureEncoding,
) -> Result<SignatureEncoding, TinkError> {
let encoding = match encoding {
EcdsaSignatureEncoding::IeeeP1363 => SignatureEncoding::IeeeP1363,
EcdsaSignatureEncoding::Der => SignatureEncoding::Der,
_ => return Err("ecdsa: unsupported encoding".into()),
};
match curve {
EllipticCurveType::NistP256 => {
if hash_alg!= HashType::Sha256 {
return Err("invalid hash type, expect SHA-256".into());
}
}
EllipticCurveType::NistP384 => {
if hash_alg!= HashType::Sha384 && hash_alg!= HashType::Sha512 {
return Err("invalid hash type, expect SHA-384 or SHA-512".into());
}
}
EllipticCurveType::NistP521 => {
if hash_alg!= HashType::Sha512 {
return Err("invalid hash type, expect SHA-512".into());
}
}
_ => return Err(format!("unsupported curve: {:?}", curve).into()),
}
Ok(encoding)
}
| SignatureEncoding | identifier_name |
ecdsa_common.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
use tink_core::TinkError;
use tink_proto::{EcdsaSignatureEncoding, EllipticCurveType, HashType};
/// Supported signature encodings. This is a precise subset of the protobuf enum,
/// allowing exact `match`es.
#[derive(Clone, Debug)]
pub enum SignatureEncoding {
Der,
IeeeP1363,
}
/// Validate ECDSA parameters.
/// The hash's strength must not be weaker than the curve's strength.
/// DER and IEEE_P1363 encodings are supported.
pub fn validate_ecdsa_params(
hash_alg: tink_proto::HashType,
curve: tink_proto::EllipticCurveType,
encoding: tink_proto::EcdsaSignatureEncoding,
) -> Result<SignatureEncoding, TinkError> | }
}
_ => return Err(format!("unsupported curve: {:?}", curve).into()),
}
Ok(encoding)
}
| {
let encoding = match encoding {
EcdsaSignatureEncoding::IeeeP1363 => SignatureEncoding::IeeeP1363,
EcdsaSignatureEncoding::Der => SignatureEncoding::Der,
_ => return Err("ecdsa: unsupported encoding".into()),
};
match curve {
EllipticCurveType::NistP256 => {
if hash_alg != HashType::Sha256 {
return Err("invalid hash type, expect SHA-256".into());
}
}
EllipticCurveType::NistP384 => {
if hash_alg != HashType::Sha384 && hash_alg != HashType::Sha512 {
return Err("invalid hash type, expect SHA-384 or SHA-512".into());
}
}
EllipticCurveType::NistP521 => {
if hash_alg != HashType::Sha512 {
return Err("invalid hash type, expect SHA-512".into()); | identifier_body |
ecdsa_common.rs | // Copyright 2020 The Tink-Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//////////////////////////////////////////////////////////////////////////////// |
use tink_core::TinkError;
use tink_proto::{EcdsaSignatureEncoding, EllipticCurveType, HashType};
/// Supported signature encodings. This is a precise subset of the protobuf enum,
/// allowing exact `match`es.
#[derive(Clone, Debug)]
pub enum SignatureEncoding {
Der,
IeeeP1363,
}
/// Validate ECDSA parameters.
/// The hash's strength must not be weaker than the curve's strength.
/// DER and IEEE_P1363 encodings are supported.
pub fn validate_ecdsa_params(
hash_alg: tink_proto::HashType,
curve: tink_proto::EllipticCurveType,
encoding: tink_proto::EcdsaSignatureEncoding,
) -> Result<SignatureEncoding, TinkError> {
let encoding = match encoding {
EcdsaSignatureEncoding::IeeeP1363 => SignatureEncoding::IeeeP1363,
EcdsaSignatureEncoding::Der => SignatureEncoding::Der,
_ => return Err("ecdsa: unsupported encoding".into()),
};
match curve {
EllipticCurveType::NistP256 => {
if hash_alg!= HashType::Sha256 {
return Err("invalid hash type, expect SHA-256".into());
}
}
EllipticCurveType::NistP384 => {
if hash_alg!= HashType::Sha384 && hash_alg!= HashType::Sha512 {
return Err("invalid hash type, expect SHA-384 or SHA-512".into());
}
}
EllipticCurveType::NistP521 => {
if hash_alg!= HashType::Sha512 {
return Err("invalid hash type, expect SHA-512".into());
}
}
_ => return Err(format!("unsupported curve: {:?}", curve).into()),
}
Ok(encoding)
} | random_line_split |
|
common.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::Mode::*;
use std::fmt;
use std::str::FromStr;
use std::path::PathBuf;
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum Mode {
CompileFail,
ParseFail,
RunFail,
RunPass,
RunPassValgrind,
Pretty,
DebugInfoGdb,
DebugInfoLldb,
Codegen,
Rustdoc,
}
impl FromStr for Mode {
type Err = ();
fn from_str(s: &str) -> Result<Mode, ()> {
match s {
"compile-fail" => Ok(CompileFail),
"parse-fail" => Ok(ParseFail),
"run-fail" => Ok(RunFail),
"run-pass" => Ok(RunPass),
"run-pass-valgrind" => Ok(RunPassValgrind),
"pretty" => Ok(Pretty),
"debuginfo-lldb" => Ok(DebugInfoLldb),
"debuginfo-gdb" => Ok(DebugInfoGdb),
"codegen" => Ok(Codegen),
"rustdoc" => Ok(Rustdoc),
_ => Err(()),
}
}
}
impl fmt::Display for Mode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
#[derive(Clone)]
pub struct Config {
// The library paths required for running the compiler
pub compile_lib_path: String,
// The library paths required for running compiled programs
pub run_lib_path: String,
// The rustc executable
pub rustc_path: PathBuf,
// The rustdoc executable
pub rustdoc_path: PathBuf,
// The python executable
pub python: String,
// The llvm binaries path
pub llvm_bin_path: Option<PathBuf>,
// The valgrind path
pub valgrind_path: Option<String>,
// Whether to fail if we can't run run-pass-valgrind tests under valgrind
// (or, alternatively, to silently run them like regular run-pass tests).
pub force_valgrind: bool,
// The directory containing the tests to run
pub src_base: PathBuf,
// The directory where programs should be built
pub build_base: PathBuf,
// Directory for auxiliary libraries
pub aux_base: PathBuf,
// The name of the stage being built (stage1, etc)
pub stage_id: String,
// The test mode, compile-fail, run-fail, run-pass
pub mode: Mode,
// Run ignored tests
pub run_ignored: bool,
// Only run tests that match this filter
pub filter: Option<String>,
// Write out a parseable log of tests that were run
pub logfile: Option<PathBuf>,
// A command line to prefix program execution with,
// for running under valgrind
pub runtool: Option<String>,
// Flags to pass to the compiler when building for the host
pub host_rustcflags: Option<String>,
// Flags to pass to the compiler when building for the target
pub target_rustcflags: Option<String>,
// Run tests using the JIT
pub jit: bool,
// Target system to be tested
pub target: String,
// Host triple for the compiler being invoked
pub host: String,
// Version of GDB
pub gdb_version: Option<String>,
// Version of LLDB
pub lldb_version: Option<String>,
// Path to the android tools
pub android_cross_path: PathBuf,
// Extra parameter to run adb on arm-linux-androideabi
pub adb_path: String,
// Extra parameter to run test suite on arm-linux-androideabi
pub adb_test_dir: String,
// status whether android device available or not
pub adb_device_status: bool,
// the path containing LLDB's Python module
pub lldb_python_dir: Option<String>,
// Explain what's going on
pub verbose: bool
}
| {
fmt::Display::fmt(match *self {
CompileFail => "compile-fail",
ParseFail => "parse-fail",
RunFail => "run-fail",
RunPass => "run-pass",
RunPassValgrind => "run-pass-valgrind",
Pretty => "pretty",
DebugInfoGdb => "debuginfo-gdb",
DebugInfoLldb => "debuginfo-lldb",
Codegen => "codegen",
Rustdoc => "rustdoc",
}, f)
} | identifier_body |
common.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::Mode::*;
use std::fmt;
use std::str::FromStr;
use std::path::PathBuf;
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum Mode {
CompileFail,
ParseFail,
RunFail,
RunPass,
RunPassValgrind,
Pretty,
DebugInfoGdb,
DebugInfoLldb,
Codegen,
Rustdoc,
}
impl FromStr for Mode {
type Err = ();
fn from_str(s: &str) -> Result<Mode, ()> {
match s {
"compile-fail" => Ok(CompileFail),
"parse-fail" => Ok(ParseFail),
"run-fail" => Ok(RunFail),
"run-pass" => Ok(RunPass),
"run-pass-valgrind" => Ok(RunPassValgrind),
"pretty" => Ok(Pretty),
"debuginfo-lldb" => Ok(DebugInfoLldb),
"debuginfo-gdb" => Ok(DebugInfoGdb),
"codegen" => Ok(Codegen),
"rustdoc" => Ok(Rustdoc),
_ => Err(()),
}
}
}
impl fmt::Display for Mode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(match *self {
CompileFail => "compile-fail",
ParseFail => "parse-fail",
RunFail => "run-fail",
RunPass => "run-pass",
RunPassValgrind => "run-pass-valgrind",
Pretty => "pretty",
DebugInfoGdb => "debuginfo-gdb",
DebugInfoLldb => "debuginfo-lldb",
Codegen => "codegen",
Rustdoc => "rustdoc",
}, f)
}
}
#[derive(Clone)]
pub struct Config {
// The library paths required for running the compiler
pub compile_lib_path: String,
// The library paths required for running compiled programs
pub run_lib_path: String,
// The rustc executable
pub rustc_path: PathBuf,
// The rustdoc executable
pub rustdoc_path: PathBuf,
| pub python: String,
// The llvm binaries path
pub llvm_bin_path: Option<PathBuf>,
// The valgrind path
pub valgrind_path: Option<String>,
// Whether to fail if we can't run run-pass-valgrind tests under valgrind
// (or, alternatively, to silently run them like regular run-pass tests).
pub force_valgrind: bool,
// The directory containing the tests to run
pub src_base: PathBuf,
// The directory where programs should be built
pub build_base: PathBuf,
// Directory for auxiliary libraries
pub aux_base: PathBuf,
// The name of the stage being built (stage1, etc)
pub stage_id: String,
// The test mode, compile-fail, run-fail, run-pass
pub mode: Mode,
// Run ignored tests
pub run_ignored: bool,
// Only run tests that match this filter
pub filter: Option<String>,
// Write out a parseable log of tests that were run
pub logfile: Option<PathBuf>,
// A command line to prefix program execution with,
// for running under valgrind
pub runtool: Option<String>,
// Flags to pass to the compiler when building for the host
pub host_rustcflags: Option<String>,
// Flags to pass to the compiler when building for the target
pub target_rustcflags: Option<String>,
// Run tests using the JIT
pub jit: bool,
// Target system to be tested
pub target: String,
// Host triple for the compiler being invoked
pub host: String,
// Version of GDB
pub gdb_version: Option<String>,
// Version of LLDB
pub lldb_version: Option<String>,
// Path to the android tools
pub android_cross_path: PathBuf,
// Extra parameter to run adb on arm-linux-androideabi
pub adb_path: String,
// Extra parameter to run test suite on arm-linux-androideabi
pub adb_test_dir: String,
// status whether android device available or not
pub adb_device_status: bool,
// the path containing LLDB's Python module
pub lldb_python_dir: Option<String>,
// Explain what's going on
pub verbose: bool
} | // The python executable | random_line_split |
common.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::Mode::*;
use std::fmt;
use std::str::FromStr;
use std::path::PathBuf;
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum Mode {
CompileFail,
ParseFail,
RunFail,
RunPass,
RunPassValgrind,
Pretty,
DebugInfoGdb,
DebugInfoLldb,
Codegen,
Rustdoc,
}
impl FromStr for Mode {
type Err = ();
fn from_str(s: &str) -> Result<Mode, ()> {
match s {
"compile-fail" => Ok(CompileFail),
"parse-fail" => Ok(ParseFail),
"run-fail" => Ok(RunFail),
"run-pass" => Ok(RunPass),
"run-pass-valgrind" => Ok(RunPassValgrind),
"pretty" => Ok(Pretty),
"debuginfo-lldb" => Ok(DebugInfoLldb),
"debuginfo-gdb" => Ok(DebugInfoGdb),
"codegen" => Ok(Codegen),
"rustdoc" => Ok(Rustdoc),
_ => Err(()),
}
}
}
impl fmt::Display for Mode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(match *self {
CompileFail => "compile-fail",
ParseFail => "parse-fail",
RunFail => "run-fail",
RunPass => "run-pass",
RunPassValgrind => "run-pass-valgrind",
Pretty => "pretty",
DebugInfoGdb => "debuginfo-gdb",
DebugInfoLldb => "debuginfo-lldb",
Codegen => "codegen",
Rustdoc => "rustdoc",
}, f)
}
}
#[derive(Clone)]
pub struct | {
// The library paths required for running the compiler
pub compile_lib_path: String,
// The library paths required for running compiled programs
pub run_lib_path: String,
// The rustc executable
pub rustc_path: PathBuf,
// The rustdoc executable
pub rustdoc_path: PathBuf,
// The python executable
pub python: String,
// The llvm binaries path
pub llvm_bin_path: Option<PathBuf>,
// The valgrind path
pub valgrind_path: Option<String>,
// Whether to fail if we can't run run-pass-valgrind tests under valgrind
// (or, alternatively, to silently run them like regular run-pass tests).
pub force_valgrind: bool,
// The directory containing the tests to run
pub src_base: PathBuf,
// The directory where programs should be built
pub build_base: PathBuf,
// Directory for auxiliary libraries
pub aux_base: PathBuf,
// The name of the stage being built (stage1, etc)
pub stage_id: String,
// The test mode, compile-fail, run-fail, run-pass
pub mode: Mode,
// Run ignored tests
pub run_ignored: bool,
// Only run tests that match this filter
pub filter: Option<String>,
// Write out a parseable log of tests that were run
pub logfile: Option<PathBuf>,
// A command line to prefix program execution with,
// for running under valgrind
pub runtool: Option<String>,
// Flags to pass to the compiler when building for the host
pub host_rustcflags: Option<String>,
// Flags to pass to the compiler when building for the target
pub target_rustcflags: Option<String>,
// Run tests using the JIT
pub jit: bool,
// Target system to be tested
pub target: String,
// Host triple for the compiler being invoked
pub host: String,
// Version of GDB
pub gdb_version: Option<String>,
// Version of LLDB
pub lldb_version: Option<String>,
// Path to the android tools
pub android_cross_path: PathBuf,
// Extra parameter to run adb on arm-linux-androideabi
pub adb_path: String,
// Extra parameter to run test suite on arm-linux-androideabi
pub adb_test_dir: String,
// status whether android device available or not
pub adb_device_status: bool,
// the path containing LLDB's Python module
pub lldb_python_dir: Option<String>,
// Explain what's going on
pub verbose: bool
}
| Config | identifier_name |
main.rs | //
// main.rs
//
// Copyright 2015-2019 Laurent Wandrebeck <[email protected]>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
// MA 02110-1301, USA.
//
//! # RuSh
//!
//! `RuSh` is a shell written in Rust
//! RuSh aims to be (maybe one day) a POSIX Shell, Bash compatible (or close to) with candies.
//! Source code is GPL3. Please note that this is a personal project (read not funded), in order to learn Rust language.
//! That does not mean feedback or patches are not welcome.
//! Right now, RuSh is definitely not useable. A couple little things have been done, but 99% (at least) have to be written.
extern crate chrono;
extern crate libc;
extern crate pest;
extern crate pest_consume;
extern crate rand;
extern crate rush;
extern crate rustyline;
extern crate term;
// pub for use is there so doc is generated.
pub use rush::arrays::{Array, Index};
pub use rush::opt::Opt;
pub use rush::parse::parse;
pub use rush::prompt::Prompt;
pub use rush::rush::RuSh;
pub use rush::variables::{Access, Value, Variable, Variables};
/// This is the main function. Initializes RuSh structure and starts the shell.
fn main() {
let mut rush = RuSh::default();
//rush.prompt = Prompt::get(&mut rush.shell_vars, "PS1");
rush.prompt = Prompt::get(&mut rush, "PS1");
//let mut stdin = io::stdin();
let mut rl = rustyline::Editor::<()>::new();
// take care of SECOND env var
//~ let child = thread::spawn(move || {
//~ loop {
//~ thread::sleep(time::Duration::new(1, 0));
//~ match shell.shell_vars.get("SECONDS") {
//~ Some(val) => { let mut s = val.geti(); s += 1; shell.shell_vars.set("SECONDS".to_string(), Variable { value: Value::I(s), rw: true }); },
//~ None => { shell.shell_vars.set("SECONDS".to_string(), Variable { value: Value::I(1), rw: true }); }
//~ }
//~ }
//~ });
// main loop. display prompt, wait for input, parse, etc.
loop {
let line = rl.readline(&rush.prompt.prompt);
// (very) Basic parsing for now. To be moved in parser.rs later on.
match line {
Ok(input) => {
// TODO fix history management
// rl.add_history_entry(&input);
parse(&mut rush, &input);
rush.cmd_nb += 1;
}
Err(_) => break,
}
// Use correct variable to define next prompt display. | 2 => rush.prompt = Prompt::get(&mut rush, "PS2"),
3 => rush.prompt = Prompt::get(&mut rush, "PS3"),
4 => rush.prompt = Prompt::get(&mut rush, "PS4"),
_ => panic!("wrong line_case value."),
}
}
} | match rush.line_case {
1 => rush.prompt = Prompt::get(&mut rush, "PS1"), | random_line_split |
main.rs | //
// main.rs
//
// Copyright 2015-2019 Laurent Wandrebeck <[email protected]>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
// MA 02110-1301, USA.
//
//! # RuSh
//!
//! `RuSh` is a shell written in Rust
//! RuSh aims to be (maybe one day) a POSIX Shell, Bash compatible (or close to) with candies.
//! Source code is GPL3. Please note that this is a personal project (read not funded), in order to learn Rust language.
//! That does not mean feedback or patches are not welcome.
//! Right now, RuSh is definitely not useable. A couple little things have been done, but 99% (at least) have to be written.
extern crate chrono;
extern crate libc;
extern crate pest;
extern crate pest_consume;
extern crate rand;
extern crate rush;
extern crate rustyline;
extern crate term;
// pub for use is there so doc is generated.
pub use rush::arrays::{Array, Index};
pub use rush::opt::Opt;
pub use rush::parse::parse;
pub use rush::prompt::Prompt;
pub use rush::rush::RuSh;
pub use rush::variables::{Access, Value, Variable, Variables};
/// This is the main function. Initializes RuSh structure and starts the shell.
fn main() | match line {
Ok(input) => {
// TODO fix history management
// rl.add_history_entry(&input);
parse(&mut rush, &input);
rush.cmd_nb += 1;
}
Err(_) => break,
}
// Use correct variable to define next prompt display.
match rush.line_case {
1 => rush.prompt = Prompt::get(&mut rush, "PS1"),
2 => rush.prompt = Prompt::get(&mut rush, "PS2"),
3 => rush.prompt = Prompt::get(&mut rush, "PS3"),
4 => rush.prompt = Prompt::get(&mut rush, "PS4"),
_ => panic!("wrong line_case value."),
}
}
}
| {
let mut rush = RuSh::default();
//rush.prompt = Prompt::get(&mut rush.shell_vars, "PS1");
rush.prompt = Prompt::get(&mut rush, "PS1");
//let mut stdin = io::stdin();
let mut rl = rustyline::Editor::<()>::new();
// take care of SECOND env var
//~ let child = thread::spawn(move || {
//~ loop {
//~ thread::sleep(time::Duration::new(1, 0));
//~ match shell.shell_vars.get("SECONDS") {
//~ Some(val) => { let mut s = val.geti(); s += 1; shell.shell_vars.set("SECONDS".to_string(), Variable { value: Value::I(s), rw: true }); },
//~ None => { shell.shell_vars.set("SECONDS".to_string(), Variable { value: Value::I(1), rw: true }); }
//~ }
//~ }
//~ });
// main loop. display prompt, wait for input, parse, etc.
loop {
let line = rl.readline(&rush.prompt.prompt);
// (very) Basic parsing for now. To be moved in parser.rs later on. | identifier_body |
main.rs | //
// main.rs
//
// Copyright 2015-2019 Laurent Wandrebeck <[email protected]>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
// MA 02110-1301, USA.
//
//! # RuSh
//!
//! `RuSh` is a shell written in Rust
//! RuSh aims to be (maybe one day) a POSIX Shell, Bash compatible (or close to) with candies.
//! Source code is GPL3. Please note that this is a personal project (read not funded), in order to learn Rust language.
//! That does not mean feedback or patches are not welcome.
//! Right now, RuSh is definitely not useable. A couple little things have been done, but 99% (at least) have to be written.
extern crate chrono;
extern crate libc;
extern crate pest;
extern crate pest_consume;
extern crate rand;
extern crate rush;
extern crate rustyline;
extern crate term;
// pub for use is there so doc is generated.
pub use rush::arrays::{Array, Index};
pub use rush::opt::Opt;
pub use rush::parse::parse;
pub use rush::prompt::Prompt;
pub use rush::rush::RuSh;
pub use rush::variables::{Access, Value, Variable, Variables};
/// This is the main function. Initializes RuSh structure and starts the shell.
fn | () {
let mut rush = RuSh::default();
//rush.prompt = Prompt::get(&mut rush.shell_vars, "PS1");
rush.prompt = Prompt::get(&mut rush, "PS1");
//let mut stdin = io::stdin();
let mut rl = rustyline::Editor::<()>::new();
// take care of SECOND env var
//~ let child = thread::spawn(move || {
//~ loop {
//~ thread::sleep(time::Duration::new(1, 0));
//~ match shell.shell_vars.get("SECONDS") {
//~ Some(val) => { let mut s = val.geti(); s += 1; shell.shell_vars.set("SECONDS".to_string(), Variable { value: Value::I(s), rw: true }); },
//~ None => { shell.shell_vars.set("SECONDS".to_string(), Variable { value: Value::I(1), rw: true }); }
//~ }
//~ }
//~ });
// main loop. display prompt, wait for input, parse, etc.
loop {
let line = rl.readline(&rush.prompt.prompt);
// (very) Basic parsing for now. To be moved in parser.rs later on.
match line {
Ok(input) => {
// TODO fix history management
// rl.add_history_entry(&input);
parse(&mut rush, &input);
rush.cmd_nb += 1;
}
Err(_) => break,
}
// Use correct variable to define next prompt display.
match rush.line_case {
1 => rush.prompt = Prompt::get(&mut rush, "PS1"),
2 => rush.prompt = Prompt::get(&mut rush, "PS2"),
3 => rush.prompt = Prompt::get(&mut rush, "PS3"),
4 => rush.prompt = Prompt::get(&mut rush, "PS4"),
_ => panic!("wrong line_case value."),
}
}
}
| main | identifier_name |
fulfill.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::mem_categorization::Typer;
use middle::ty;
use middle::typeck::infer::InferCtxt;
use util::ppaux::Repr;
use super::CodeAmbiguity;
use super::Obligation;
use super::FulfillmentError;
use super::CodeSelectionError;
use super::select::SelectionContext;
/// The fulfillment context is used to drive trait resolution. It
/// consists of a list of obligations that must be (eventually)
/// satisfied. The job is to track which are satisfied, which yielded
/// errors, and which are still pending. At any point, users can call
/// `select_where_possible`, and the fulfilment context will try to do
/// selection, retaining only those obligations that remain
/// ambiguous. This may be helpful in pushing type inference
/// along. Once all type inference constraints have been generated, the
/// method `select_all_or_error` can be used to report any remaining
/// ambiguous cases as errors.
pub struct FulfillmentContext<'tcx> {
// A list of all obligations that have been registered with this
// fulfillment context.
trait_obligations: Vec<Obligation<'tcx>>,
// Remembers the count of trait obligations that we have already
// attempted to select. This is used to avoid repeating work
// when `select_new_obligations` is called.
attempted_mark: uint,
}
impl<'tcx> FulfillmentContext<'tcx> {
pub fn new() -> FulfillmentContext<'tcx> {
FulfillmentContext {
trait_obligations: Vec::new(),
attempted_mark: 0,
}
}
pub fn register_obligation(&mut self,
tcx: &ty::ctxt<'tcx>,
obligation: Obligation<'tcx>)
{
debug!("register_obligation({})", obligation.repr(tcx));
assert!(!obligation.trait_ref.has_escaping_regions());
self.trait_obligations.push(obligation);
}
pub fn select_all_or_error<'a>(&mut self, | typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
try!(self.select_where_possible(infcx, param_env, typer));
// Anything left is ambiguous.
let errors: Vec<FulfillmentError> =
self.trait_obligations
.iter()
.map(|o| FulfillmentError::new((*o).clone(), CodeAmbiguity))
.collect();
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
/// Attempts to select obligations that were registered since the call to a selection routine.
/// This is used by the type checker to eagerly attempt to resolve obligations in hopes of
/// gaining type information. It'd be equally valid to use `select_where_possible` but it
/// results in `O(n^2)` performance (#18208).
pub fn select_new_obligations<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
let mut selcx = SelectionContext::new(infcx, param_env, typer);
self.select(&mut selcx, true)
}
pub fn select_where_possible<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
let mut selcx = SelectionContext::new(infcx, param_env, typer);
self.select(&mut selcx, false)
}
pub fn pending_trait_obligations(&self) -> &[Obligation<'tcx>] {
self.trait_obligations[]
}
/// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
/// only attempts to select obligations that haven't been seen before.
fn select<'a>(&mut self,
selcx: &mut SelectionContext<'a, 'tcx>,
only_new_obligations: bool)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
debug!("select({} obligations, only_new_obligations={}) start",
self.trait_obligations.len(),
only_new_obligations);
let tcx = selcx.tcx();
let mut errors = Vec::new();
loop {
let count = self.trait_obligations.len();
debug!("select_where_possible({} obligations) iteration",
count);
let mut selections = Vec::new();
// If we are only attempting obligations we haven't seen yet,
// then set `skip` to the number of obligations we've already
// seen.
let mut skip = if only_new_obligations {
self.attempted_mark
} else {
0
};
// First pass: walk each obligation, retaining
// only those that we cannot yet process.
self.trait_obligations.retain(|obligation| {
// Hack: Retain does not pass in the index, but we want
// to avoid processing the first `start_count` entries.
if skip > 0 {
skip -= 1;
true
} else {
match selcx.select(obligation) {
Ok(None) => {
true
}
Ok(Some(s)) => {
selections.push(s);
false
}
Err(selection_err) => {
debug!("obligation: {} error: {}",
obligation.repr(tcx),
selection_err.repr(tcx));
errors.push(FulfillmentError::new(
(*obligation).clone(),
CodeSelectionError(selection_err)));
false
}
}
}
});
self.attempted_mark = self.trait_obligations.len();
if self.trait_obligations.len() == count {
// Nothing changed.
break;
}
// Now go through all the successful ones,
// registering any nested obligations for the future.
for selection in selections.into_iter() {
selection.map_move_nested(
|o| self.register_obligation(tcx, o));
}
}
debug!("select({} obligations, {} errors) done",
self.trait_obligations.len(),
errors.len());
if errors.len() == 0 {
Ok(())
} else {
Err(errors)
}
}
} | infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>, | random_line_split |
fulfill.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::mem_categorization::Typer;
use middle::ty;
use middle::typeck::infer::InferCtxt;
use util::ppaux::Repr;
use super::CodeAmbiguity;
use super::Obligation;
use super::FulfillmentError;
use super::CodeSelectionError;
use super::select::SelectionContext;
/// The fulfillment context is used to drive trait resolution. It
/// consists of a list of obligations that must be (eventually)
/// satisfied. The job is to track which are satisfied, which yielded
/// errors, and which are still pending. At any point, users can call
/// `select_where_possible`, and the fulfilment context will try to do
/// selection, retaining only those obligations that remain
/// ambiguous. This may be helpful in pushing type inference
/// along. Once all type inference constraints have been generated, the
/// method `select_all_or_error` can be used to report any remaining
/// ambiguous cases as errors.
pub struct FulfillmentContext<'tcx> {
// A list of all obligations that have been registered with this
// fulfillment context.
trait_obligations: Vec<Obligation<'tcx>>,
// Remembers the count of trait obligations that we have already
// attempted to select. This is used to avoid repeating work
// when `select_new_obligations` is called.
attempted_mark: uint,
}
impl<'tcx> FulfillmentContext<'tcx> {
pub fn new() -> FulfillmentContext<'tcx> |
pub fn register_obligation(&mut self,
tcx: &ty::ctxt<'tcx>,
obligation: Obligation<'tcx>)
{
debug!("register_obligation({})", obligation.repr(tcx));
assert!(!obligation.trait_ref.has_escaping_regions());
self.trait_obligations.push(obligation);
}
pub fn select_all_or_error<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
try!(self.select_where_possible(infcx, param_env, typer));
// Anything left is ambiguous.
let errors: Vec<FulfillmentError> =
self.trait_obligations
.iter()
.map(|o| FulfillmentError::new((*o).clone(), CodeAmbiguity))
.collect();
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
/// Attempts to select obligations that were registered since the call to a selection routine.
/// This is used by the type checker to eagerly attempt to resolve obligations in hopes of
/// gaining type information. It'd be equally valid to use `select_where_possible` but it
/// results in `O(n^2)` performance (#18208).
pub fn select_new_obligations<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
let mut selcx = SelectionContext::new(infcx, param_env, typer);
self.select(&mut selcx, true)
}
pub fn select_where_possible<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
let mut selcx = SelectionContext::new(infcx, param_env, typer);
self.select(&mut selcx, false)
}
pub fn pending_trait_obligations(&self) -> &[Obligation<'tcx>] {
self.trait_obligations[]
}
/// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
/// only attempts to select obligations that haven't been seen before.
fn select<'a>(&mut self,
selcx: &mut SelectionContext<'a, 'tcx>,
only_new_obligations: bool)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
debug!("select({} obligations, only_new_obligations={}) start",
self.trait_obligations.len(),
only_new_obligations);
let tcx = selcx.tcx();
let mut errors = Vec::new();
loop {
let count = self.trait_obligations.len();
debug!("select_where_possible({} obligations) iteration",
count);
let mut selections = Vec::new();
// If we are only attempting obligations we haven't seen yet,
// then set `skip` to the number of obligations we've already
// seen.
let mut skip = if only_new_obligations {
self.attempted_mark
} else {
0
};
// First pass: walk each obligation, retaining
// only those that we cannot yet process.
self.trait_obligations.retain(|obligation| {
// Hack: Retain does not pass in the index, but we want
// to avoid processing the first `start_count` entries.
if skip > 0 {
skip -= 1;
true
} else {
match selcx.select(obligation) {
Ok(None) => {
true
}
Ok(Some(s)) => {
selections.push(s);
false
}
Err(selection_err) => {
debug!("obligation: {} error: {}",
obligation.repr(tcx),
selection_err.repr(tcx));
errors.push(FulfillmentError::new(
(*obligation).clone(),
CodeSelectionError(selection_err)));
false
}
}
}
});
self.attempted_mark = self.trait_obligations.len();
if self.trait_obligations.len() == count {
// Nothing changed.
break;
}
// Now go through all the successful ones,
// registering any nested obligations for the future.
for selection in selections.into_iter() {
selection.map_move_nested(
|o| self.register_obligation(tcx, o));
}
}
debug!("select({} obligations, {} errors) done",
self.trait_obligations.len(),
errors.len());
if errors.len() == 0 {
Ok(())
} else {
Err(errors)
}
}
}
| {
FulfillmentContext {
trait_obligations: Vec::new(),
attempted_mark: 0,
}
} | identifier_body |
fulfill.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::mem_categorization::Typer;
use middle::ty;
use middle::typeck::infer::InferCtxt;
use util::ppaux::Repr;
use super::CodeAmbiguity;
use super::Obligation;
use super::FulfillmentError;
use super::CodeSelectionError;
use super::select::SelectionContext;
/// The fulfillment context is used to drive trait resolution. It
/// consists of a list of obligations that must be (eventually)
/// satisfied. The job is to track which are satisfied, which yielded
/// errors, and which are still pending. At any point, users can call
/// `select_where_possible`, and the fulfilment context will try to do
/// selection, retaining only those obligations that remain
/// ambiguous. This may be helpful in pushing type inference
/// along. Once all type inference constraints have been generated, the
/// method `select_all_or_error` can be used to report any remaining
/// ambiguous cases as errors.
pub struct FulfillmentContext<'tcx> {
// A list of all obligations that have been registered with this
// fulfillment context.
trait_obligations: Vec<Obligation<'tcx>>,
// Remembers the count of trait obligations that we have already
// attempted to select. This is used to avoid repeating work
// when `select_new_obligations` is called.
attempted_mark: uint,
}
impl<'tcx> FulfillmentContext<'tcx> {
pub fn new() -> FulfillmentContext<'tcx> {
FulfillmentContext {
trait_obligations: Vec::new(),
attempted_mark: 0,
}
}
pub fn register_obligation(&mut self,
tcx: &ty::ctxt<'tcx>,
obligation: Obligation<'tcx>)
{
debug!("register_obligation({})", obligation.repr(tcx));
assert!(!obligation.trait_ref.has_escaping_regions());
self.trait_obligations.push(obligation);
}
pub fn select_all_or_error<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
try!(self.select_where_possible(infcx, param_env, typer));
// Anything left is ambiguous.
let errors: Vec<FulfillmentError> =
self.trait_obligations
.iter()
.map(|o| FulfillmentError::new((*o).clone(), CodeAmbiguity))
.collect();
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
/// Attempts to select obligations that were registered since the call to a selection routine.
/// This is used by the type checker to eagerly attempt to resolve obligations in hopes of
/// gaining type information. It'd be equally valid to use `select_where_possible` but it
/// results in `O(n^2)` performance (#18208).
pub fn select_new_obligations<'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
let mut selcx = SelectionContext::new(infcx, param_env, typer);
self.select(&mut selcx, true)
}
pub fn | <'a>(&mut self,
infcx: &InferCtxt<'a,'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>,
typer: &Typer<'tcx>)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
let mut selcx = SelectionContext::new(infcx, param_env, typer);
self.select(&mut selcx, false)
}
pub fn pending_trait_obligations(&self) -> &[Obligation<'tcx>] {
self.trait_obligations[]
}
/// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
/// only attempts to select obligations that haven't been seen before.
fn select<'a>(&mut self,
selcx: &mut SelectionContext<'a, 'tcx>,
only_new_obligations: bool)
-> Result<(),Vec<FulfillmentError<'tcx>>>
{
debug!("select({} obligations, only_new_obligations={}) start",
self.trait_obligations.len(),
only_new_obligations);
let tcx = selcx.tcx();
let mut errors = Vec::new();
loop {
let count = self.trait_obligations.len();
debug!("select_where_possible({} obligations) iteration",
count);
let mut selections = Vec::new();
// If we are only attempting obligations we haven't seen yet,
// then set `skip` to the number of obligations we've already
// seen.
let mut skip = if only_new_obligations {
self.attempted_mark
} else {
0
};
// First pass: walk each obligation, retaining
// only those that we cannot yet process.
self.trait_obligations.retain(|obligation| {
// Hack: Retain does not pass in the index, but we want
// to avoid processing the first `start_count` entries.
if skip > 0 {
skip -= 1;
true
} else {
match selcx.select(obligation) {
Ok(None) => {
true
}
Ok(Some(s)) => {
selections.push(s);
false
}
Err(selection_err) => {
debug!("obligation: {} error: {}",
obligation.repr(tcx),
selection_err.repr(tcx));
errors.push(FulfillmentError::new(
(*obligation).clone(),
CodeSelectionError(selection_err)));
false
}
}
}
});
self.attempted_mark = self.trait_obligations.len();
if self.trait_obligations.len() == count {
// Nothing changed.
break;
}
// Now go through all the successful ones,
// registering any nested obligations for the future.
for selection in selections.into_iter() {
selection.map_move_nested(
|o| self.register_obligation(tcx, o));
}
}
debug!("select({} obligations, {} errors) done",
self.trait_obligations.len(),
errors.len());
if errors.len() == 0 {
Ok(())
} else {
Err(errors)
}
}
}
| select_where_possible | identifier_name |
callback.rs | // Copyright (c) 2016 Yusuke Sasaki
//
// This software is released under the MIT License.
// See http://opensource.org/licenses/mit-license.php or <LICENSE>.
extern crate gurobi;
use gurobi::*;
use std::io::{BufWriter, Write};
use std::fs::OpenOptions;
fn main() | // Ignore polling callback
}
// Currently performing presolve
PreSolve { coldel, rowdel,.. } => {
println!("@PreSolve");
if coldel > 0 || rowdel > 0 {
println!("**** {} columns and {} rows are removed. ****",
coldel,
rowdel);
}
}
// Currently in simplex
Simplex { ispert, itrcnt, objval, priminf, dualinf } => {
if itrcnt - lastiter >= 100.0 {
lastiter = itrcnt;
let ch = match ispert {
0 =>'',
1 => 'S',
_ => 'P'
};
println!("@Simplex: itrcnt={}, objval={}{}, priminf={}, dualinf={}.",
itrcnt,
objval,
ch,
priminf,
dualinf);
}
}
// Currently in MIP
MIP { solcnt, cutcnt, objbst, objbnd, nodcnt, nodleft: actnodes, itrcnt } => {
if nodcnt - lastnode >= 100.0 {
lastnode = nodcnt;
println!("@MIP: nodcnt={}, actnodes={}, itrcnt={}, objbst={}, objbnd={}, solcnt={}, cutcnt={}.",
nodcnt,
actnodes,
itrcnt,
objbst,
objbnd,
solcnt,
cutcnt);
}
if (objbst - objbnd).abs() < 0.1 * (1.0 + objbst.abs()) {
println!("Stop early - 10% gap achived");
ctx.terminate();
}
if nodcnt >= 10000.0 && solcnt!= 0.0 {
println!("Stop early - 10000 nodes explored");
ctx.terminate();
}
}
// Found a new MIP incumbent
MIPSol { solcnt, obj, nodcnt,.. } => {
println!("@MIPSol: ");
let x = try!(ctx.get_solution(vars.as_slice()));
println!("**** New solution at node {}, obj {}, sol {}, x[0] = {} ****",
nodcnt,
obj,
solcnt,
x[0]);
}
// Currently exploring a MIP node
MIPNode {.. } => {
println!("@MIPNode");
println!("**** NEW NODE! ****");
let x = try!(ctx.get_node_rel(vars.as_slice()));
println!(" relaxed solution = {:?}", x);
try!(ctx.set_solution(vars.as_slice(), x.as_slice()));
}
// Currently in barrier
Barrier { itrcnt, primobj, dualobj, priminf, dualinf, compl } => {
println!("@Barrier: itrcnt={}, primobj={}, dualobj={}, priminf={}, dualinf={}, compl={}.",
itrcnt,
primobj,
dualobj,
priminf,
dualinf,
compl);
}
// Printing a log message
Message(message) => {
writer.write_all(message.as_bytes()).unwrap();
writer.write_all(&[b'\n']).unwrap();
}
}
Ok(())
}
};
model.optimize_with_callback(callback).unwrap();
println!("\nOptimization complete");
if model.get(attr::SolCount).unwrap() == 0 {
println!("No solution found. optimization status = {:?}",
model.status());
} else {
println!("Solution found. objective = {}",
model.get(attr::ObjVal).unwrap());
for v in model.get_vars() {
let vname = v.get(&model, attr::VarName).unwrap();
let value = v.get(&model, attr::X).unwrap();
if value > 1e-25 {
println!(" {}: {}", vname, value);
}
}
}
}
| {
let mut env = Env::new("callback.log").unwrap();
env.set(param::OutputFlag, 0).unwrap();
env.set(param::Heuristics, 0.0).unwrap();
let mut model = Model::read_from(&std::env::args().nth(1).unwrap(), &env).unwrap();
let callback = {
let mut lastiter = -INFINITY;
let mut lastnode = -INFINITY;
let vars: Vec<_> = model.get_vars().cloned().collect();
let file = OpenOptions::new().write(true).create(true).open("cb.log").unwrap();
let mut writer = BufWriter::new(file);
move |ctx: Callback| {
use gurobi::Where::*;
match ctx.get_where() {
// Periodic polling callback
Polling => { | identifier_body |
callback.rs | // Copyright (c) 2016 Yusuke Sasaki
//
// This software is released under the MIT License.
// See http://opensource.org/licenses/mit-license.php or <LICENSE>.
extern crate gurobi;
use gurobi::*;
use std::io::{BufWriter, Write};
use std::fs::OpenOptions;
fn main() {
let mut env = Env::new("callback.log").unwrap();
env.set(param::OutputFlag, 0).unwrap();
env.set(param::Heuristics, 0.0).unwrap();
let mut model = Model::read_from(&std::env::args().nth(1).unwrap(), &env).unwrap();
let callback = {
let mut lastiter = -INFINITY;
let mut lastnode = -INFINITY;
let vars: Vec<_> = model.get_vars().cloned().collect();
let file = OpenOptions::new().write(true).create(true).open("cb.log").unwrap();
let mut writer = BufWriter::new(file);
move |ctx: Callback| {
use gurobi::Where::*;
match ctx.get_where() {
// Periodic polling callback
Polling => {
// Ignore polling callback
}
// Currently performing presolve
PreSolve { coldel, rowdel,.. } => {
println!("@PreSolve");
if coldel > 0 || rowdel > 0 {
println!("**** {} columns and {} rows are removed. ****",
coldel,
rowdel);
}
}
// Currently in simplex
Simplex { ispert, itrcnt, objval, priminf, dualinf } => {
if itrcnt - lastiter >= 100.0 {
lastiter = itrcnt;
let ch = match ispert {
0 =>'',
1 => 'S',
_ => 'P'
};
println!("@Simplex: itrcnt={}, objval={}{}, priminf={}, dualinf={}.",
itrcnt,
objval,
ch,
priminf,
dualinf);
}
}
// Currently in MIP
MIP { solcnt, cutcnt, objbst, objbnd, nodcnt, nodleft: actnodes, itrcnt } => | ctx.terminate();
}
}
// Found a new MIP incumbent
MIPSol { solcnt, obj, nodcnt,.. } => {
println!("@MIPSol: ");
let x = try!(ctx.get_solution(vars.as_slice()));
println!("**** New solution at node {}, obj {}, sol {}, x[0] = {} ****",
nodcnt,
obj,
solcnt,
x[0]);
}
// Currently exploring a MIP node
MIPNode {.. } => {
println!("@MIPNode");
println!("**** NEW NODE! ****");
let x = try!(ctx.get_node_rel(vars.as_slice()));
println!(" relaxed solution = {:?}", x);
try!(ctx.set_solution(vars.as_slice(), x.as_slice()));
}
// Currently in barrier
Barrier { itrcnt, primobj, dualobj, priminf, dualinf, compl } => {
println!("@Barrier: itrcnt={}, primobj={}, dualobj={}, priminf={}, dualinf={}, compl={}.",
itrcnt,
primobj,
dualobj,
priminf,
dualinf,
compl);
}
// Printing a log message
Message(message) => {
writer.write_all(message.as_bytes()).unwrap();
writer.write_all(&[b'\n']).unwrap();
}
}
Ok(())
}
};
model.optimize_with_callback(callback).unwrap();
println!("\nOptimization complete");
if model.get(attr::SolCount).unwrap() == 0 {
println!("No solution found. optimization status = {:?}",
model.status());
} else {
println!("Solution found. objective = {}",
model.get(attr::ObjVal).unwrap());
for v in model.get_vars() {
let vname = v.get(&model, attr::VarName).unwrap();
let value = v.get(&model, attr::X).unwrap();
if value > 1e-25 {
println!(" {}: {}", vname, value);
}
}
}
}
| {
if nodcnt - lastnode >= 100.0 {
lastnode = nodcnt;
println!("@MIP: nodcnt={}, actnodes={}, itrcnt={}, objbst={}, objbnd={}, solcnt={}, cutcnt={}.",
nodcnt,
actnodes,
itrcnt,
objbst,
objbnd,
solcnt,
cutcnt);
}
if (objbst - objbnd).abs() < 0.1 * (1.0 + objbst.abs()) {
println!("Stop early - 10% gap achived");
ctx.terminate();
}
if nodcnt >= 10000.0 && solcnt != 0.0 {
println!("Stop early - 10000 nodes explored"); | conditional_block |
callback.rs | // Copyright (c) 2016 Yusuke Sasaki
//
// This software is released under the MIT License.
// See http://opensource.org/licenses/mit-license.php or <LICENSE>.
extern crate gurobi;
use gurobi::*;
use std::io::{BufWriter, Write};
use std::fs::OpenOptions;
fn main() {
let mut env = Env::new("callback.log").unwrap();
env.set(param::OutputFlag, 0).unwrap();
env.set(param::Heuristics, 0.0).unwrap();
let mut model = Model::read_from(&std::env::args().nth(1).unwrap(), &env).unwrap();
let callback = {
let mut lastiter = -INFINITY;
let mut lastnode = -INFINITY;
let vars: Vec<_> = model.get_vars().cloned().collect();
let file = OpenOptions::new().write(true).create(true).open("cb.log").unwrap();
let mut writer = BufWriter::new(file);
move |ctx: Callback| {
use gurobi::Where::*;
match ctx.get_where() {
// Periodic polling callback
Polling => {
// Ignore polling callback
}
// Currently performing presolve
PreSolve { coldel, rowdel,.. } => {
println!("@PreSolve");
if coldel > 0 || rowdel > 0 {
println!("**** {} columns and {} rows are removed. ****",
coldel,
rowdel);
}
}
// Currently in simplex
Simplex { ispert, itrcnt, objval, priminf, dualinf } => {
if itrcnt - lastiter >= 100.0 {
lastiter = itrcnt;
let ch = match ispert {
0 =>'',
1 => 'S',
_ => 'P'
};
println!("@Simplex: itrcnt={}, objval={}{}, priminf={}, dualinf={}.",
itrcnt,
objval,
ch,
priminf,
dualinf);
}
}
// Currently in MIP
MIP { solcnt, cutcnt, objbst, objbnd, nodcnt, nodleft: actnodes, itrcnt } => {
if nodcnt - lastnode >= 100.0 {
lastnode = nodcnt;
println!("@MIP: nodcnt={}, actnodes={}, itrcnt={}, objbst={}, objbnd={}, solcnt={}, cutcnt={}.",
nodcnt,
actnodes,
itrcnt,
objbst,
objbnd,
solcnt,
cutcnt);
}
if (objbst - objbnd).abs() < 0.1 * (1.0 + objbst.abs()) {
println!("Stop early - 10% gap achived");
ctx.terminate();
}
if nodcnt >= 10000.0 && solcnt!= 0.0 {
println!("Stop early - 10000 nodes explored");
ctx.terminate();
}
}
// Found a new MIP incumbent
MIPSol { solcnt, obj, nodcnt,.. } => {
println!("@MIPSol: ");
let x = try!(ctx.get_solution(vars.as_slice()));
println!("**** New solution at node {}, obj {}, sol {}, x[0] = {} ****",
nodcnt,
obj,
solcnt,
x[0]);
}
// Currently exploring a MIP node
MIPNode {.. } => {
println!("@MIPNode");
println!("**** NEW NODE! ****");
let x = try!(ctx.get_node_rel(vars.as_slice()));
println!(" relaxed solution = {:?}", x);
try!(ctx.set_solution(vars.as_slice(), x.as_slice()));
}
// Currently in barrier
Barrier { itrcnt, primobj, dualobj, priminf, dualinf, compl } => {
println!("@Barrier: itrcnt={}, primobj={}, dualobj={}, priminf={}, dualinf={}, compl={}.",
itrcnt,
primobj,
dualobj,
priminf,
dualinf,
compl); | writer.write_all(message.as_bytes()).unwrap();
writer.write_all(&[b'\n']).unwrap();
}
}
Ok(())
}
};
model.optimize_with_callback(callback).unwrap();
println!("\nOptimization complete");
if model.get(attr::SolCount).unwrap() == 0 {
println!("No solution found. optimization status = {:?}",
model.status());
} else {
println!("Solution found. objective = {}",
model.get(attr::ObjVal).unwrap());
for v in model.get_vars() {
let vname = v.get(&model, attr::VarName).unwrap();
let value = v.get(&model, attr::X).unwrap();
if value > 1e-25 {
println!(" {}: {}", vname, value);
}
}
}
} | }
// Printing a log message
Message(message) => { | random_line_split |
callback.rs | // Copyright (c) 2016 Yusuke Sasaki
//
// This software is released under the MIT License.
// See http://opensource.org/licenses/mit-license.php or <LICENSE>.
extern crate gurobi;
use gurobi::*;
use std::io::{BufWriter, Write};
use std::fs::OpenOptions;
fn | () {
let mut env = Env::new("callback.log").unwrap();
env.set(param::OutputFlag, 0).unwrap();
env.set(param::Heuristics, 0.0).unwrap();
let mut model = Model::read_from(&std::env::args().nth(1).unwrap(), &env).unwrap();
let callback = {
let mut lastiter = -INFINITY;
let mut lastnode = -INFINITY;
let vars: Vec<_> = model.get_vars().cloned().collect();
let file = OpenOptions::new().write(true).create(true).open("cb.log").unwrap();
let mut writer = BufWriter::new(file);
move |ctx: Callback| {
use gurobi::Where::*;
match ctx.get_where() {
// Periodic polling callback
Polling => {
// Ignore polling callback
}
// Currently performing presolve
PreSolve { coldel, rowdel,.. } => {
println!("@PreSolve");
if coldel > 0 || rowdel > 0 {
println!("**** {} columns and {} rows are removed. ****",
coldel,
rowdel);
}
}
// Currently in simplex
Simplex { ispert, itrcnt, objval, priminf, dualinf } => {
if itrcnt - lastiter >= 100.0 {
lastiter = itrcnt;
let ch = match ispert {
0 =>'',
1 => 'S',
_ => 'P'
};
println!("@Simplex: itrcnt={}, objval={}{}, priminf={}, dualinf={}.",
itrcnt,
objval,
ch,
priminf,
dualinf);
}
}
// Currently in MIP
MIP { solcnt, cutcnt, objbst, objbnd, nodcnt, nodleft: actnodes, itrcnt } => {
if nodcnt - lastnode >= 100.0 {
lastnode = nodcnt;
println!("@MIP: nodcnt={}, actnodes={}, itrcnt={}, objbst={}, objbnd={}, solcnt={}, cutcnt={}.",
nodcnt,
actnodes,
itrcnt,
objbst,
objbnd,
solcnt,
cutcnt);
}
if (objbst - objbnd).abs() < 0.1 * (1.0 + objbst.abs()) {
println!("Stop early - 10% gap achived");
ctx.terminate();
}
if nodcnt >= 10000.0 && solcnt!= 0.0 {
println!("Stop early - 10000 nodes explored");
ctx.terminate();
}
}
// Found a new MIP incumbent
MIPSol { solcnt, obj, nodcnt,.. } => {
println!("@MIPSol: ");
let x = try!(ctx.get_solution(vars.as_slice()));
println!("**** New solution at node {}, obj {}, sol {}, x[0] = {} ****",
nodcnt,
obj,
solcnt,
x[0]);
}
// Currently exploring a MIP node
MIPNode {.. } => {
println!("@MIPNode");
println!("**** NEW NODE! ****");
let x = try!(ctx.get_node_rel(vars.as_slice()));
println!(" relaxed solution = {:?}", x);
try!(ctx.set_solution(vars.as_slice(), x.as_slice()));
}
// Currently in barrier
Barrier { itrcnt, primobj, dualobj, priminf, dualinf, compl } => {
println!("@Barrier: itrcnt={}, primobj={}, dualobj={}, priminf={}, dualinf={}, compl={}.",
itrcnt,
primobj,
dualobj,
priminf,
dualinf,
compl);
}
// Printing a log message
Message(message) => {
writer.write_all(message.as_bytes()).unwrap();
writer.write_all(&[b'\n']).unwrap();
}
}
Ok(())
}
};
model.optimize_with_callback(callback).unwrap();
println!("\nOptimization complete");
if model.get(attr::SolCount).unwrap() == 0 {
println!("No solution found. optimization status = {:?}",
model.status());
} else {
println!("Solution found. objective = {}",
model.get(attr::ObjVal).unwrap());
for v in model.get_vars() {
let vname = v.get(&model, attr::VarName).unwrap();
let value = v.get(&model, attr::X).unwrap();
if value > 1e-25 {
println!(" {}: {}", vname, value);
}
}
}
}
| main | identifier_name |
performanceentry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::PerformanceEntryBinding;
use dom::bindings::codegen::Bindings::PerformanceEntryBinding::PerformanceEntryMethods;
use dom::bindings::num::Finite;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
#[dom_struct]
pub struct PerformanceEntry {
reflector_: Reflector,
name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64,
}
impl PerformanceEntry {
pub fn new_inherited(name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64) -> PerformanceEntry {
PerformanceEntry {
reflector_: Reflector::new(),
name,
entry_type,
start_time,
duration,
}
}
#[allow(unrooted_must_root)]
pub fn new(global: &GlobalScope,
name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64) -> DomRoot<PerformanceEntry> {
let entry = PerformanceEntry::new_inherited(name, entry_type, start_time, duration);
reflect_dom_object(Box::new(entry), global, PerformanceEntryBinding::Wrap)
}
pub fn entry_type(&self) -> &DOMString {
&self.entry_type
}
pub fn name(&self) -> &DOMString {
&self.name
}
pub fn start_time(&self) -> f64 {
self.start_time
}
}
impl PerformanceEntryMethods for PerformanceEntry {
// https://w3c.github.io/performance-timeline/#dom-performanceentry-name
fn Name(&self) -> DOMString {
DOMString::from(self.name.clone())
}
// https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype
fn EntryType(&self) -> DOMString {
DOMString::from(self.entry_type.clone())
}
// https://w3c.github.io/performance-timeline/#dom-performanceentry-starttime
fn StartTime(&self) -> Finite<f64> {
Finite::wrap(self.start_time)
} |
// https://w3c.github.io/performance-timeline/#dom-performanceentry-duration
fn Duration(&self) -> Finite<f64> {
Finite::wrap(self.duration)
}
} | random_line_split |
|
performanceentry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::PerformanceEntryBinding;
use dom::bindings::codegen::Bindings::PerformanceEntryBinding::PerformanceEntryMethods;
use dom::bindings::num::Finite;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
#[dom_struct]
pub struct PerformanceEntry {
reflector_: Reflector,
name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64,
}
impl PerformanceEntry {
pub fn new_inherited(name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64) -> PerformanceEntry {
PerformanceEntry {
reflector_: Reflector::new(),
name,
entry_type,
start_time,
duration,
}
}
#[allow(unrooted_must_root)]
pub fn new(global: &GlobalScope,
name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64) -> DomRoot<PerformanceEntry> {
let entry = PerformanceEntry::new_inherited(name, entry_type, start_time, duration);
reflect_dom_object(Box::new(entry), global, PerformanceEntryBinding::Wrap)
}
pub fn entry_type(&self) -> &DOMString {
&self.entry_type
}
pub fn name(&self) -> &DOMString {
&self.name
}
pub fn start_time(&self) -> f64 {
self.start_time
}
}
impl PerformanceEntryMethods for PerformanceEntry {
// https://w3c.github.io/performance-timeline/#dom-performanceentry-name
fn Name(&self) -> DOMString {
DOMString::from(self.name.clone())
}
// https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype
fn EntryType(&self) -> DOMString {
DOMString::from(self.entry_type.clone())
}
// https://w3c.github.io/performance-timeline/#dom-performanceentry-starttime
fn StartTime(&self) -> Finite<f64> {
Finite::wrap(self.start_time)
}
// https://w3c.github.io/performance-timeline/#dom-performanceentry-duration
fn | (&self) -> Finite<f64> {
Finite::wrap(self.duration)
}
}
| Duration | identifier_name |
performanceentry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::PerformanceEntryBinding;
use dom::bindings::codegen::Bindings::PerformanceEntryBinding::PerformanceEntryMethods;
use dom::bindings::num::Finite;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
#[dom_struct]
pub struct PerformanceEntry {
reflector_: Reflector,
name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64,
}
impl PerformanceEntry {
pub fn new_inherited(name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64) -> PerformanceEntry {
PerformanceEntry {
reflector_: Reflector::new(),
name,
entry_type,
start_time,
duration,
}
}
#[allow(unrooted_must_root)]
pub fn new(global: &GlobalScope,
name: DOMString,
entry_type: DOMString,
start_time: f64,
duration: f64) -> DomRoot<PerformanceEntry> |
pub fn entry_type(&self) -> &DOMString {
&self.entry_type
}
pub fn name(&self) -> &DOMString {
&self.name
}
pub fn start_time(&self) -> f64 {
self.start_time
}
}
impl PerformanceEntryMethods for PerformanceEntry {
// https://w3c.github.io/performance-timeline/#dom-performanceentry-name
fn Name(&self) -> DOMString {
DOMString::from(self.name.clone())
}
// https://w3c.github.io/performance-timeline/#dom-performanceentry-entrytype
fn EntryType(&self) -> DOMString {
DOMString::from(self.entry_type.clone())
}
// https://w3c.github.io/performance-timeline/#dom-performanceentry-starttime
fn StartTime(&self) -> Finite<f64> {
Finite::wrap(self.start_time)
}
// https://w3c.github.io/performance-timeline/#dom-performanceentry-duration
fn Duration(&self) -> Finite<f64> {
Finite::wrap(self.duration)
}
}
| {
let entry = PerformanceEntry::new_inherited(name, entry_type, start_time, duration);
reflect_dom_object(Box::new(entry), global, PerformanceEntryBinding::Wrap)
} | identifier_body |
is_negative_zero.rs | use malachite_base::num::basic::floats::PrimitiveFloat;
use malachite_base::num::float::NiceFloat;
use malachite_base_test_util::generators::primitive_float_gen;
fn is_negative_zero_helper<T: PrimitiveFloat>() {
let test = |n: T, out| {
assert_eq!(n.is_negative_zero(), out);
};
test(T::ZERO, false);
test(T::NEGATIVE_ZERO, true);
test(T::NAN, false); | test(T::from(1.234), false);
test(T::from(-1.234), false);
}
#[test]
fn test_is_negative_zero() {
apply_fn_to_primitive_floats!(is_negative_zero_helper);
}
fn is_negative_zero_properties_helper<T: PrimitiveFloat>() {
primitive_float_gen::<T>().test_properties(|x| {
assert_eq!(
x.is_negative_zero(),
NiceFloat(x)!= NiceFloat(x.abs_negative_zero())
);
});
}
#[test]
fn is_negative_zero_properties() {
apply_fn_to_primitive_floats!(is_negative_zero_properties_helper);
} | test(T::POSITIVE_INFINITY, false);
test(T::NEGATIVE_INFINITY, false);
test(T::ONE, false);
test(T::NEGATIVE_ONE, false); | random_line_split |
is_negative_zero.rs | use malachite_base::num::basic::floats::PrimitiveFloat;
use malachite_base::num::float::NiceFloat;
use malachite_base_test_util::generators::primitive_float_gen;
fn is_negative_zero_helper<T: PrimitiveFloat>() {
let test = |n: T, out| {
assert_eq!(n.is_negative_zero(), out);
};
test(T::ZERO, false);
test(T::NEGATIVE_ZERO, true);
test(T::NAN, false);
test(T::POSITIVE_INFINITY, false);
test(T::NEGATIVE_INFINITY, false);
test(T::ONE, false);
test(T::NEGATIVE_ONE, false);
test(T::from(1.234), false);
test(T::from(-1.234), false);
}
#[test]
fn test_is_negative_zero() {
apply_fn_to_primitive_floats!(is_negative_zero_helper);
}
fn is_negative_zero_properties_helper<T: PrimitiveFloat>() |
#[test]
fn is_negative_zero_properties() {
apply_fn_to_primitive_floats!(is_negative_zero_properties_helper);
}
| {
primitive_float_gen::<T>().test_properties(|x| {
assert_eq!(
x.is_negative_zero(),
NiceFloat(x) != NiceFloat(x.abs_negative_zero())
);
});
} | identifier_body |
is_negative_zero.rs | use malachite_base::num::basic::floats::PrimitiveFloat;
use malachite_base::num::float::NiceFloat;
use malachite_base_test_util::generators::primitive_float_gen;
fn is_negative_zero_helper<T: PrimitiveFloat>() {
let test = |n: T, out| {
assert_eq!(n.is_negative_zero(), out);
};
test(T::ZERO, false);
test(T::NEGATIVE_ZERO, true);
test(T::NAN, false);
test(T::POSITIVE_INFINITY, false);
test(T::NEGATIVE_INFINITY, false);
test(T::ONE, false);
test(T::NEGATIVE_ONE, false);
test(T::from(1.234), false);
test(T::from(-1.234), false);
}
#[test]
fn | () {
apply_fn_to_primitive_floats!(is_negative_zero_helper);
}
fn is_negative_zero_properties_helper<T: PrimitiveFloat>() {
primitive_float_gen::<T>().test_properties(|x| {
assert_eq!(
x.is_negative_zero(),
NiceFloat(x)!= NiceFloat(x.abs_negative_zero())
);
});
}
#[test]
fn is_negative_zero_properties() {
apply_fn_to_primitive_floats!(is_negative_zero_properties_helper);
}
| test_is_negative_zero | identifier_name |
borrowck-borrowed-uniq-rvalue.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//buggy.rs
#![feature(box_syntax)]
extern crate collections;
use std::collections::HashMap;
fn | () {
let mut buggy_map: HashMap<usize, &usize> = HashMap::new();
buggy_map.insert(42, &*box 1); //~ ERROR borrowed value does not live long enough
// but it is ok if we use a temporary
let tmp = box 2;
buggy_map.insert(43, &*tmp);
}
| main | identifier_name |
borrowck-borrowed-uniq-rvalue.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//buggy.rs
#![feature(box_syntax)]
extern crate collections;
use std::collections::HashMap;
fn main() | {
let mut buggy_map: HashMap<usize, &usize> = HashMap::new();
buggy_map.insert(42, &*box 1); //~ ERROR borrowed value does not live long enough
// but it is ok if we use a temporary
let tmp = box 2;
buggy_map.insert(43, &*tmp);
} | identifier_body |
|
borrowck-borrowed-uniq-rvalue.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//buggy.rs
#![feature(box_syntax)]
extern crate collections; |
fn main() {
let mut buggy_map: HashMap<usize, &usize> = HashMap::new();
buggy_map.insert(42, &*box 1); //~ ERROR borrowed value does not live long enough
// but it is ok if we use a temporary
let tmp = box 2;
buggy_map.insert(43, &*tmp);
} | use std::collections::HashMap; | random_line_split |
playlist_track.rs | use std::collections::BTreeMap;
use uuid::Uuid;
use chrono::{NaiveDateTime, Utc};
use postgres;
use error::Error;
use super::{conn, Model};
use model::track::{Track, PROPS as TRACK_PROPS};
use model::playlist::Playlist;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PlaylistTrack {
pub playlist_id: Uuid,
pub track_id: Uuid,
pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
pub track: Track,
}
static PROPS: [&'static str; 4] = ["playlist_id",
"track_id",
"created_at",
"updated_at"];
impl PlaylistTrack {
fn props_str(prefix: &str) -> String |
fn row_to_item(row: postgres::rows::Row) -> Self {
let offset = TRACK_PROPS.len();
PlaylistTrack {
playlist_id: row.get(offset),
track_id: row.get(offset + 1),
created_at: row.get(offset + 2),
updated_at: row.get(offset + 3),
track: Track::row_to_item(row),
}
}
pub fn find_by_playlist_ids(playlist_ids: Vec<Uuid>) -> Result<BTreeMap<Uuid, Vec<PlaylistTrack>>, Error> {
let conn = conn().unwrap();
let sql = format!("SELECT {}, {} FROM tracks
LEFT OUTER JOIN playlist_tracks
ON playlist_tracks.track_id = tracks.id
WHERE playlist_tracks.playlist_id = ANY($1)
ORDER BY playlist_tracks.created_at DESC LIMIT {}",
Track::props_str("tracks."),
PlaylistTrack::props_str("playlist_tracks."),
playlist_ids.len() * 200);
let stmt = conn.prepare(&sql).map_err(|e| {
println!("{:?}", e);
e
})?;
let rows = stmt.query(&[&playlist_ids]).map_err(|e| {
println!("{:?}", e);
e
})?;
let mut items: BTreeMap<Uuid, Vec<PlaylistTrack>> = BTreeMap::new();
for id in playlist_ids.iter() {
items.insert(*id, vec![]);
}
for row in rows.iter() {
let id: Uuid = row.get(TRACK_PROPS.len());
if let Some(playlist_tracks) = items.get_mut(&id) {
let pt = PlaylistTrack::row_to_item(row);
playlist_tracks.push(pt);
}
};
Ok(items)
}
pub fn upsert(playlist: &Playlist, track: &Track) -> Result<PlaylistTrack, Error> {
let conn = conn()?;
let stmt = conn.prepare("INSERT INTO playlist_tracks
(track_id, playlist_id, created_at, updated_at)
VALUES ($1, $2, $3, $4)
ON CONFLICT (track_id, playlist_id)
DO UPDATE SET updated_at=$4
RETURNING playlist_tracks.created_at, playlist_tracks.updated_at")?;
let now = Utc::now().naive_utc();
let rows = stmt.query(&[&track.id, &playlist.id, &now, &now])?;
let row = rows.iter().next().ok_or(Error::Unexpected)?;
Ok(PlaylistTrack {
playlist_id: playlist.id,
track_id: track.id,
created_at: row.get(0),
updated_at: row.get(1),
track: track.clone(),
})
}
}
| {
PROPS
.iter()
.map(|&p| format!("{}{}", prefix, p))
.collect::<Vec<String>>().join(",")
} | identifier_body |
playlist_track.rs | use std::collections::BTreeMap;
use uuid::Uuid;
use chrono::{NaiveDateTime, Utc};
use postgres;
use error::Error;
use super::{conn, Model};
use model::track::{Track, PROPS as TRACK_PROPS};
use model::playlist::Playlist;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct | {
pub playlist_id: Uuid,
pub track_id: Uuid,
pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
pub track: Track,
}
static PROPS: [&'static str; 4] = ["playlist_id",
"track_id",
"created_at",
"updated_at"];
impl PlaylistTrack {
fn props_str(prefix: &str) -> String {
PROPS
.iter()
.map(|&p| format!("{}{}", prefix, p))
.collect::<Vec<String>>().join(",")
}
fn row_to_item(row: postgres::rows::Row) -> Self {
let offset = TRACK_PROPS.len();
PlaylistTrack {
playlist_id: row.get(offset),
track_id: row.get(offset + 1),
created_at: row.get(offset + 2),
updated_at: row.get(offset + 3),
track: Track::row_to_item(row),
}
}
pub fn find_by_playlist_ids(playlist_ids: Vec<Uuid>) -> Result<BTreeMap<Uuid, Vec<PlaylistTrack>>, Error> {
let conn = conn().unwrap();
let sql = format!("SELECT {}, {} FROM tracks
LEFT OUTER JOIN playlist_tracks
ON playlist_tracks.track_id = tracks.id
WHERE playlist_tracks.playlist_id = ANY($1)
ORDER BY playlist_tracks.created_at DESC LIMIT {}",
Track::props_str("tracks."),
PlaylistTrack::props_str("playlist_tracks."),
playlist_ids.len() * 200);
let stmt = conn.prepare(&sql).map_err(|e| {
println!("{:?}", e);
e
})?;
let rows = stmt.query(&[&playlist_ids]).map_err(|e| {
println!("{:?}", e);
e
})?;
let mut items: BTreeMap<Uuid, Vec<PlaylistTrack>> = BTreeMap::new();
for id in playlist_ids.iter() {
items.insert(*id, vec![]);
}
for row in rows.iter() {
let id: Uuid = row.get(TRACK_PROPS.len());
if let Some(playlist_tracks) = items.get_mut(&id) {
let pt = PlaylistTrack::row_to_item(row);
playlist_tracks.push(pt);
}
};
Ok(items)
}
pub fn upsert(playlist: &Playlist, track: &Track) -> Result<PlaylistTrack, Error> {
let conn = conn()?;
let stmt = conn.prepare("INSERT INTO playlist_tracks
(track_id, playlist_id, created_at, updated_at)
VALUES ($1, $2, $3, $4)
ON CONFLICT (track_id, playlist_id)
DO UPDATE SET updated_at=$4
RETURNING playlist_tracks.created_at, playlist_tracks.updated_at")?;
let now = Utc::now().naive_utc();
let rows = stmt.query(&[&track.id, &playlist.id, &now, &now])?;
let row = rows.iter().next().ok_or(Error::Unexpected)?;
Ok(PlaylistTrack {
playlist_id: playlist.id,
track_id: track.id,
created_at: row.get(0),
updated_at: row.get(1),
track: track.clone(),
})
}
}
| PlaylistTrack | identifier_name |
playlist_track.rs | use std::collections::BTreeMap;
use uuid::Uuid;
use chrono::{NaiveDateTime, Utc};
use postgres;
use error::Error;
use super::{conn, Model};
use model::track::{Track, PROPS as TRACK_PROPS};
use model::playlist::Playlist;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PlaylistTrack {
pub playlist_id: Uuid,
pub track_id: Uuid,
pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
pub track: Track,
}
static PROPS: [&'static str; 4] = ["playlist_id",
"track_id",
"created_at",
"updated_at"];
impl PlaylistTrack {
fn props_str(prefix: &str) -> String {
PROPS
.iter()
.map(|&p| format!("{}{}", prefix, p))
.collect::<Vec<String>>().join(",")
}
fn row_to_item(row: postgres::rows::Row) -> Self {
let offset = TRACK_PROPS.len();
PlaylistTrack {
playlist_id: row.get(offset),
track_id: row.get(offset + 1),
created_at: row.get(offset + 2),
updated_at: row.get(offset + 3),
track: Track::row_to_item(row),
}
}
pub fn find_by_playlist_ids(playlist_ids: Vec<Uuid>) -> Result<BTreeMap<Uuid, Vec<PlaylistTrack>>, Error> {
let conn = conn().unwrap();
let sql = format!("SELECT {}, {} FROM tracks
LEFT OUTER JOIN playlist_tracks
ON playlist_tracks.track_id = tracks.id
WHERE playlist_tracks.playlist_id = ANY($1)
ORDER BY playlist_tracks.created_at DESC LIMIT {}",
Track::props_str("tracks."),
PlaylistTrack::props_str("playlist_tracks."),
playlist_ids.len() * 200);
let stmt = conn.prepare(&sql).map_err(|e| {
println!("{:?}", e);
e
})?;
let rows = stmt.query(&[&playlist_ids]).map_err(|e| {
println!("{:?}", e);
e
})?;
let mut items: BTreeMap<Uuid, Vec<PlaylistTrack>> = BTreeMap::new();
for id in playlist_ids.iter() {
items.insert(*id, vec![]);
}
for row in rows.iter() {
let id: Uuid = row.get(TRACK_PROPS.len());
if let Some(playlist_tracks) = items.get_mut(&id) |
};
Ok(items)
}
pub fn upsert(playlist: &Playlist, track: &Track) -> Result<PlaylistTrack, Error> {
let conn = conn()?;
let stmt = conn.prepare("INSERT INTO playlist_tracks
(track_id, playlist_id, created_at, updated_at)
VALUES ($1, $2, $3, $4)
ON CONFLICT (track_id, playlist_id)
DO UPDATE SET updated_at=$4
RETURNING playlist_tracks.created_at, playlist_tracks.updated_at")?;
let now = Utc::now().naive_utc();
let rows = stmt.query(&[&track.id, &playlist.id, &now, &now])?;
let row = rows.iter().next().ok_or(Error::Unexpected)?;
Ok(PlaylistTrack {
playlist_id: playlist.id,
track_id: track.id,
created_at: row.get(0),
updated_at: row.get(1),
track: track.clone(),
})
}
}
| {
let pt = PlaylistTrack::row_to_item(row);
playlist_tracks.push(pt);
} | conditional_block |
playlist_track.rs | use std::collections::BTreeMap;
use uuid::Uuid;
use chrono::{NaiveDateTime, Utc};
use postgres;
use error::Error;
use super::{conn, Model};
use model::track::{Track, PROPS as TRACK_PROPS};
use model::playlist::Playlist;
| #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PlaylistTrack {
pub playlist_id: Uuid,
pub track_id: Uuid,
pub created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
pub track: Track,
}
static PROPS: [&'static str; 4] = ["playlist_id",
"track_id",
"created_at",
"updated_at"];
impl PlaylistTrack {
fn props_str(prefix: &str) -> String {
PROPS
.iter()
.map(|&p| format!("{}{}", prefix, p))
.collect::<Vec<String>>().join(",")
}
fn row_to_item(row: postgres::rows::Row) -> Self {
let offset = TRACK_PROPS.len();
PlaylistTrack {
playlist_id: row.get(offset),
track_id: row.get(offset + 1),
created_at: row.get(offset + 2),
updated_at: row.get(offset + 3),
track: Track::row_to_item(row),
}
}
pub fn find_by_playlist_ids(playlist_ids: Vec<Uuid>) -> Result<BTreeMap<Uuid, Vec<PlaylistTrack>>, Error> {
let conn = conn().unwrap();
let sql = format!("SELECT {}, {} FROM tracks
LEFT OUTER JOIN playlist_tracks
ON playlist_tracks.track_id = tracks.id
WHERE playlist_tracks.playlist_id = ANY($1)
ORDER BY playlist_tracks.created_at DESC LIMIT {}",
Track::props_str("tracks."),
PlaylistTrack::props_str("playlist_tracks."),
playlist_ids.len() * 200);
let stmt = conn.prepare(&sql).map_err(|e| {
println!("{:?}", e);
e
})?;
let rows = stmt.query(&[&playlist_ids]).map_err(|e| {
println!("{:?}", e);
e
})?;
let mut items: BTreeMap<Uuid, Vec<PlaylistTrack>> = BTreeMap::new();
for id in playlist_ids.iter() {
items.insert(*id, vec![]);
}
for row in rows.iter() {
let id: Uuid = row.get(TRACK_PROPS.len());
if let Some(playlist_tracks) = items.get_mut(&id) {
let pt = PlaylistTrack::row_to_item(row);
playlist_tracks.push(pt);
}
};
Ok(items)
}
pub fn upsert(playlist: &Playlist, track: &Track) -> Result<PlaylistTrack, Error> {
let conn = conn()?;
let stmt = conn.prepare("INSERT INTO playlist_tracks
(track_id, playlist_id, created_at, updated_at)
VALUES ($1, $2, $3, $4)
ON CONFLICT (track_id, playlist_id)
DO UPDATE SET updated_at=$4
RETURNING playlist_tracks.created_at, playlist_tracks.updated_at")?;
let now = Utc::now().naive_utc();
let rows = stmt.query(&[&track.id, &playlist.id, &now, &now])?;
let row = rows.iter().next().ok_or(Error::Unexpected)?;
Ok(PlaylistTrack {
playlist_id: playlist.id,
track_id: track.id,
created_at: row.get(0),
updated_at: row.get(1),
track: track.clone(),
})
}
} | random_line_split |
|
crateresolve8.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
// aux-build:crateresolve8-1.rs
#[pkgid="crateresolve8#0.1"];
extern mod crateresolve8(vers = "0.1", package_id="crateresolve8#0.1");
//extern mod crateresolve8(vers = "0.1");
pub fn main() | {
assert_eq!(crateresolve8::f(), 20);
} | identifier_body |
|
crateresolve8.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
// aux-build:crateresolve8-1.rs
#[pkgid="crateresolve8#0.1"];
extern mod crateresolve8(vers = "0.1", package_id="crateresolve8#0.1");
//extern mod crateresolve8(vers = "0.1");
pub fn | () {
assert_eq!(crateresolve8::f(), 20);
}
| main | identifier_name |
crateresolve8.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-fast
// aux-build:crateresolve8-1.rs
#[pkgid="crateresolve8#0.1"];
extern mod crateresolve8(vers = "0.1", package_id="crateresolve8#0.1");
//extern mod crateresolve8(vers = "0.1");
pub fn main() {
assert_eq!(crateresolve8::f(), 20);
} | random_line_split |
|
mod_power_of_2_square.rs | use malachite_base::num::arithmetic::traits::{Square, WrappingSquare};
use malachite_base::num::conversion::traits::SplitInHalf;
use malachite_nz::natural::arithmetic::add_mul::limbs_slice_add_mul_limb_same_length_in_place_left;
use malachite_nz::natural::arithmetic::mod_power_of_2_square::limbs_square_diagonal_shl_add;
use malachite_nz::natural::arithmetic::mul::limb::limbs_mul_limb_to_out;
use malachite_nz::platform::{DoubleLimb, Limb};
pub fn limbs_square_low_basecase_unrestricted(out: &mut [Limb], xs: &[Limb]) | limbs_slice_add_mul_limb_same_length_in_place_left(
&mut scratch[two_i..],
&xs[i + 1..n - i],
xs[i],
);
}
limbs_square_diagonal_shl_add(out, &mut scratch, xs);
}
}
}
| {
let n = xs.len();
let out = &mut out[..n];
assert_ne!(n, 0);
let xs_0 = xs[0];
match n {
1 => out[0] = xs_0.wrapping_square(),
2 => {
let (p_hi, p_lo) = DoubleLimb::from(xs_0).square().split_in_half();
out[0] = p_lo;
out[1] = (xs_0.wrapping_mul(xs[1]) << 1).wrapping_add(p_hi);
}
_ => {
let mut scratch = vec![0; n - 1];
limbs_mul_limb_to_out(&mut scratch, &xs[1..], xs_0);
for i in 1.. {
let two_i = i << 1;
if two_i >= n - 1 {
break;
} | identifier_body |
mod_power_of_2_square.rs | use malachite_base::num::arithmetic::traits::{Square, WrappingSquare};
use malachite_base::num::conversion::traits::SplitInHalf;
use malachite_nz::natural::arithmetic::add_mul::limbs_slice_add_mul_limb_same_length_in_place_left;
use malachite_nz::natural::arithmetic::mod_power_of_2_square::limbs_square_diagonal_shl_add;
use malachite_nz::natural::arithmetic::mul::limb::limbs_mul_limb_to_out;
use malachite_nz::platform::{DoubleLimb, Limb};
pub fn | (out: &mut [Limb], xs: &[Limb]) {
let n = xs.len();
let out = &mut out[..n];
assert_ne!(n, 0);
let xs_0 = xs[0];
match n {
1 => out[0] = xs_0.wrapping_square(),
2 => {
let (p_hi, p_lo) = DoubleLimb::from(xs_0).square().split_in_half();
out[0] = p_lo;
out[1] = (xs_0.wrapping_mul(xs[1]) << 1).wrapping_add(p_hi);
}
_ => {
let mut scratch = vec![0; n - 1];
limbs_mul_limb_to_out(&mut scratch, &xs[1..], xs_0);
for i in 1.. {
let two_i = i << 1;
if two_i >= n - 1 {
break;
}
limbs_slice_add_mul_limb_same_length_in_place_left(
&mut scratch[two_i..],
&xs[i + 1..n - i],
xs[i],
);
}
limbs_square_diagonal_shl_add(out, &mut scratch, xs);
}
}
}
| limbs_square_low_basecase_unrestricted | identifier_name |
mod_power_of_2_square.rs | use malachite_base::num::arithmetic::traits::{Square, WrappingSquare};
use malachite_base::num::conversion::traits::SplitInHalf;
use malachite_nz::natural::arithmetic::add_mul::limbs_slice_add_mul_limb_same_length_in_place_left;
use malachite_nz::natural::arithmetic::mod_power_of_2_square::limbs_square_diagonal_shl_add; | use malachite_nz::platform::{DoubleLimb, Limb};
pub fn limbs_square_low_basecase_unrestricted(out: &mut [Limb], xs: &[Limb]) {
let n = xs.len();
let out = &mut out[..n];
assert_ne!(n, 0);
let xs_0 = xs[0];
match n {
1 => out[0] = xs_0.wrapping_square(),
2 => {
let (p_hi, p_lo) = DoubleLimb::from(xs_0).square().split_in_half();
out[0] = p_lo;
out[1] = (xs_0.wrapping_mul(xs[1]) << 1).wrapping_add(p_hi);
}
_ => {
let mut scratch = vec![0; n - 1];
limbs_mul_limb_to_out(&mut scratch, &xs[1..], xs_0);
for i in 1.. {
let two_i = i << 1;
if two_i >= n - 1 {
break;
}
limbs_slice_add_mul_limb_same_length_in_place_left(
&mut scratch[two_i..],
&xs[i + 1..n - i],
xs[i],
);
}
limbs_square_diagonal_shl_add(out, &mut scratch, xs);
}
}
} | use malachite_nz::natural::arithmetic::mul::limb::limbs_mul_limb_to_out; | random_line_split |
current_page_table.rs | //! Handles interactions with the current page table.
use super::inactive_page_table::InactivePageTable;
use super::page_table::{Level1, Level4, PageTable};
use super::page_table_entry::*;
use super::page_table_manager::PageTableManager;
use super::{Page, PageFrame};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::ptr;
use core::ptr::Unique;
use memory::{Address, PhysicalAddress, VirtualAddress};
use sync::{Mutex, PreemptionState};
use x86_64::instructions::tlb;
use x86_64::registers::control_regs;
/// The address of the current Level 4 table. | ///
/// Note that this is only valid if the level 4 table is mapped recursively on
/// the last entry.
const L4_TABLE: *mut PageTable<Level4> = 0xfffffffffffff000 as *mut PageTable<Level4>;
/// The base address for all temporary addresses.
const TEMPORARY_ADDRESS_BASE: VirtualAddress = VirtualAddress::from_const(0xffffffffffc00000);
/// The method to access the current page table.
pub static CURRENT_PAGE_TABLE: CurrentPageTableLock =
unsafe { CurrentPageTableLock::new(CurrentPageTable::new()) };
// TODO: Revisit page table locking.
/// Protects the current page table from being accessed directly.
///
/// This serves to stop the page table from being switched while being accessed.
pub struct CurrentPageTableLock {
/// The page table being locked.
current_page_table: UnsafeCell<CurrentPageTable>,
/// The reference count to the table.
reference_count: Mutex<usize>
}
// This is safe because the page table will manage it's own exclusion
// internally.
unsafe impl Sync for CurrentPageTableLock {}
impl CurrentPageTableLock {
/// Creates a new current page table lock.
///
/// # Safety
/// This should only ever get called once at compile time.
const unsafe fn new(table: CurrentPageTable) -> CurrentPageTableLock {
CurrentPageTableLock {
current_page_table: UnsafeCell::new(table),
reference_count: Mutex::new(0)
}
}
/// Locks the current page table.
pub fn lock(&self) -> CurrentPageTableReference {
let rc: &mut usize = &mut self.reference_count.lock();
*rc += 1;
CurrentPageTableReference {
current_page_table: unsafe { &mut *self.current_page_table.get() },
reference_count: &self.reference_count
}
}
}
/// Serves as a reference to a locked current page table.
pub struct CurrentPageTableReference<'a> {
current_page_table: &'a mut CurrentPageTable,
reference_count: &'a Mutex<usize>
}
impl<'a> Drop for CurrentPageTableReference<'a> {
fn drop(&mut self) {
let rc: &mut usize = &mut self.reference_count.lock();
*rc -= 1;
}
}
impl<'a> Deref for CurrentPageTableReference<'a> {
type Target = CurrentPageTable;
fn deref(&self) -> &CurrentPageTable {
self.current_page_table
}
}
impl<'a> DerefMut for CurrentPageTableReference<'a> {
fn deref_mut(&mut self) -> &mut CurrentPageTable {
self.current_page_table
}
}
/// Owns the page table currently in use.
pub struct CurrentPageTable {
l4_table: Unique<PageTable<Level4>>
}
impl PageTableManager for CurrentPageTable {
fn get_l4(&mut self) -> &mut PageTable<Level4> {
unsafe { self.l4_table.as_mut() }
}
}
impl CurrentPageTable {
/// Returns the current page table.
///
/// # Safety
/// - At any point in time there should only be exactly one current page
/// table struct.
const unsafe fn new() -> CurrentPageTable {
CurrentPageTable {
l4_table: Unique::new_unchecked(L4_TABLE)
}
}
/// Tries to map an inactive page table.
///
/// Returns true if the mapping was successful.
///
/// # Safety
/// - Should not be called while another inactive table is mapped.
pub unsafe fn map_inactive(&mut self, frame: &PageFrame) -> PreemptionState {
let l4 = self.get_l4();
let entry = &mut l4[509];
let preemption_state = entry.lock();
if!entry.flags().contains(PRESENT) {
entry
.set_flags(PRESENT | WRITABLE | NO_EXECUTE)
.set_address(frame.get_address());
}
preemption_state
}
/// Unmaps the currently mapped inactive page table.
pub fn unmap_inactive(&mut self, preemption_state: &PreemptionState) {
let l4 = self.get_l4();
let entry = &mut l4[509];
debug_assert!(entry.flags().contains(PRESENT));
entry.remove_flags(PRESENT);
entry.unlock(&preemption_state);
}
/// Returns a mutable reference to the temporary mapping page table.
fn get_temporary_map_table(&mut self) -> &mut PageTable<Level1> {
let l4 = self.get_l4();
l4.get_next_level_mut(TEMPORARY_ADDRESS_BASE)
.and_then(|l3| l3.get_next_level_mut(TEMPORARY_ADDRESS_BASE))
.and_then(|l2| l2.get_next_level_mut(TEMPORARY_ADDRESS_BASE))
.expect("Temporary page table not mapped.")
}
/// Performs the given action with the mapped page.
pub fn with_temporary_page<F, T>(&mut self, frame: &PageFrame, action: F) -> T
where
F: Fn(&mut Page) -> T
{
// Map the page.
let index = page_frame_hash(frame);
let temporary_map_table = self.get_temporary_map_table();
let entry = &mut temporary_map_table[index];
let preemption_state = entry.lock();
let virtual_address = TEMPORARY_ADDRESS_BASE + (index << 12);
if entry.points_to()!= Some(frame.get_address()) {
tlb::flush(::x86_64::VirtualAddress(virtual_address.as_usize()));
entry.set_address(frame.get_address());
entry.set_flags(PRESENT | WRITABLE | DISABLE_CACHE | NO_EXECUTE);
}
// Perform the action.
let result: T = action(&mut Page::from_address(virtual_address));
// Unlock this entry.
entry.unlock(&preemption_state);
result
}
/// Writes the given value to the given physical address.
pub fn write_at_physical<T: Sized + Copy>(
&mut self,
physical_address: PhysicalAddress,
data: T
) {
self.with_temporary_page(&PageFrame::from_address(physical_address), |page| {
let virtual_address =
page.get_address().as_usize() | (physical_address.offset_in_page());
unsafe {
ptr::write(virtual_address as *mut T, data);
}
});
}
/// Reads from the given physical address.
pub fn read_from_physical<T: Sized + Copy>(&mut self, physical_address: PhysicalAddress) -> T {
self.with_temporary_page(&PageFrame::from_address(physical_address), |page| {
let virtual_address =
page.get_address().as_usize() | (physical_address.offset_in_page());
unsafe { ptr::read(virtual_address as *mut T) }
})
}
/// Switches to the new page table returning the current one.
///
/// The old page table will not be mapped into the new one. This should be
/// done manually.
pub unsafe fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
let old_frame =
PageFrame::from_address(PhysicalAddress::from_usize(control_regs::cr3().0 as usize));
let old_table = InactivePageTable::from_frame(old_frame.copy(), &new_table);
let new_frame = new_table.get_frame();
drop(new_table);
// Make the switch.
control_regs::cr3_write(::x86_64::PhysicalAddress(
new_frame.get_address().as_usize() as u64
));
// Map the now inactive old table.
self.map_inactive(&old_frame);
old_table
}
}
/// Hashes page frames to values from 0 to 511.
///
/// This serves to speed up temporary mapping of page frames,
/// by better utilizing the available space.
fn page_frame_hash(frame: &PageFrame) -> usize {
// UNOPTIMIZED: Possibly use a better hash algorithm here?
let mut address = frame.get_address().as_usize() >> 12;
address *= 101489;
address % 512
} | random_line_split |
|
current_page_table.rs | //! Handles interactions with the current page table.
use super::inactive_page_table::InactivePageTable;
use super::page_table::{Level1, Level4, PageTable};
use super::page_table_entry::*;
use super::page_table_manager::PageTableManager;
use super::{Page, PageFrame};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::ptr;
use core::ptr::Unique;
use memory::{Address, PhysicalAddress, VirtualAddress};
use sync::{Mutex, PreemptionState};
use x86_64::instructions::tlb;
use x86_64::registers::control_regs;
/// The address of the current Level 4 table.
///
/// Note that this is only valid if the level 4 table is mapped recursively on
/// the last entry.
const L4_TABLE: *mut PageTable<Level4> = 0xfffffffffffff000 as *mut PageTable<Level4>;
/// The base address for all temporary addresses.
const TEMPORARY_ADDRESS_BASE: VirtualAddress = VirtualAddress::from_const(0xffffffffffc00000);
/// The method to access the current page table.
pub static CURRENT_PAGE_TABLE: CurrentPageTableLock =
unsafe { CurrentPageTableLock::new(CurrentPageTable::new()) };
// TODO: Revisit page table locking.
/// Protects the current page table from being accessed directly.
///
/// This serves to stop the page table from being switched while being accessed.
pub struct CurrentPageTableLock {
/// The page table being locked.
current_page_table: UnsafeCell<CurrentPageTable>,
/// The reference count to the table.
reference_count: Mutex<usize>
}
// This is safe because the page table will manage it's own exclusion
// internally.
unsafe impl Sync for CurrentPageTableLock {}
impl CurrentPageTableLock {
/// Creates a new current page table lock.
///
/// # Safety
/// This should only ever get called once at compile time.
const unsafe fn new(table: CurrentPageTable) -> CurrentPageTableLock {
CurrentPageTableLock {
current_page_table: UnsafeCell::new(table),
reference_count: Mutex::new(0)
}
}
/// Locks the current page table.
pub fn lock(&self) -> CurrentPageTableReference {
let rc: &mut usize = &mut self.reference_count.lock();
*rc += 1;
CurrentPageTableReference {
current_page_table: unsafe { &mut *self.current_page_table.get() },
reference_count: &self.reference_count
}
}
}
/// Serves as a reference to a locked current page table.
pub struct CurrentPageTableReference<'a> {
current_page_table: &'a mut CurrentPageTable,
reference_count: &'a Mutex<usize>
}
impl<'a> Drop for CurrentPageTableReference<'a> {
fn drop(&mut self) {
let rc: &mut usize = &mut self.reference_count.lock();
*rc -= 1;
}
}
impl<'a> Deref for CurrentPageTableReference<'a> {
type Target = CurrentPageTable;
fn deref(&self) -> &CurrentPageTable {
self.current_page_table
}
}
impl<'a> DerefMut for CurrentPageTableReference<'a> {
fn deref_mut(&mut self) -> &mut CurrentPageTable {
self.current_page_table
}
}
/// Owns the page table currently in use.
pub struct CurrentPageTable {
l4_table: Unique<PageTable<Level4>>
}
impl PageTableManager for CurrentPageTable {
fn get_l4(&mut self) -> &mut PageTable<Level4> {
unsafe { self.l4_table.as_mut() }
}
}
impl CurrentPageTable {
/// Returns the current page table.
///
/// # Safety
/// - At any point in time there should only be exactly one current page
/// table struct.
const unsafe fn new() -> CurrentPageTable {
CurrentPageTable {
l4_table: Unique::new_unchecked(L4_TABLE)
}
}
/// Tries to map an inactive page table.
///
/// Returns true if the mapping was successful.
///
/// # Safety
/// - Should not be called while another inactive table is mapped.
pub unsafe fn map_inactive(&mut self, frame: &PageFrame) -> PreemptionState {
let l4 = self.get_l4();
let entry = &mut l4[509];
let preemption_state = entry.lock();
if!entry.flags().contains(PRESENT) {
entry
.set_flags(PRESENT | WRITABLE | NO_EXECUTE)
.set_address(frame.get_address());
}
preemption_state
}
/// Unmaps the currently mapped inactive page table.
pub fn unmap_inactive(&mut self, preemption_state: &PreemptionState) {
let l4 = self.get_l4();
let entry = &mut l4[509];
debug_assert!(entry.flags().contains(PRESENT));
entry.remove_flags(PRESENT);
entry.unlock(&preemption_state);
}
/// Returns a mutable reference to the temporary mapping page table.
fn get_temporary_map_table(&mut self) -> &mut PageTable<Level1> {
let l4 = self.get_l4();
l4.get_next_level_mut(TEMPORARY_ADDRESS_BASE)
.and_then(|l3| l3.get_next_level_mut(TEMPORARY_ADDRESS_BASE))
.and_then(|l2| l2.get_next_level_mut(TEMPORARY_ADDRESS_BASE))
.expect("Temporary page table not mapped.")
}
/// Performs the given action with the mapped page.
pub fn with_temporary_page<F, T>(&mut self, frame: &PageFrame, action: F) -> T
where
F: Fn(&mut Page) -> T
{
// Map the page.
let index = page_frame_hash(frame);
let temporary_map_table = self.get_temporary_map_table();
let entry = &mut temporary_map_table[index];
let preemption_state = entry.lock();
let virtual_address = TEMPORARY_ADDRESS_BASE + (index << 12);
if entry.points_to()!= Some(frame.get_address()) {
tlb::flush(::x86_64::VirtualAddress(virtual_address.as_usize()));
entry.set_address(frame.get_address());
entry.set_flags(PRESENT | WRITABLE | DISABLE_CACHE | NO_EXECUTE);
}
// Perform the action.
let result: T = action(&mut Page::from_address(virtual_address));
// Unlock this entry.
entry.unlock(&preemption_state);
result
}
/// Writes the given value to the given physical address.
pub fn write_at_physical<T: Sized + Copy>(
&mut self,
physical_address: PhysicalAddress,
data: T
) {
self.with_temporary_page(&PageFrame::from_address(physical_address), |page| {
let virtual_address =
page.get_address().as_usize() | (physical_address.offset_in_page());
unsafe {
ptr::write(virtual_address as *mut T, data);
}
});
}
/// Reads from the given physical address.
pub fn read_from_physical<T: Sized + Copy>(&mut self, physical_address: PhysicalAddress) -> T |
/// Switches to the new page table returning the current one.
///
/// The old page table will not be mapped into the new one. This should be
/// done manually.
pub unsafe fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
let old_frame =
PageFrame::from_address(PhysicalAddress::from_usize(control_regs::cr3().0 as usize));
let old_table = InactivePageTable::from_frame(old_frame.copy(), &new_table);
let new_frame = new_table.get_frame();
drop(new_table);
// Make the switch.
control_regs::cr3_write(::x86_64::PhysicalAddress(
new_frame.get_address().as_usize() as u64
));
// Map the now inactive old table.
self.map_inactive(&old_frame);
old_table
}
}
/// Hashes page frames to values from 0 to 511.
///
/// This serves to speed up temporary mapping of page frames,
/// by better utilizing the available space.
fn page_frame_hash(frame: &PageFrame) -> usize {
// UNOPTIMIZED: Possibly use a better hash algorithm here?
let mut address = frame.get_address().as_usize() >> 12;
address *= 101489;
address % 512
}
| {
self.with_temporary_page(&PageFrame::from_address(physical_address), |page| {
let virtual_address =
page.get_address().as_usize() | (physical_address.offset_in_page());
unsafe { ptr::read(virtual_address as *mut T) }
})
} | identifier_body |
current_page_table.rs | //! Handles interactions with the current page table.
use super::inactive_page_table::InactivePageTable;
use super::page_table::{Level1, Level4, PageTable};
use super::page_table_entry::*;
use super::page_table_manager::PageTableManager;
use super::{Page, PageFrame};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::ptr;
use core::ptr::Unique;
use memory::{Address, PhysicalAddress, VirtualAddress};
use sync::{Mutex, PreemptionState};
use x86_64::instructions::tlb;
use x86_64::registers::control_regs;
/// The address of the current Level 4 table.
///
/// Note that this is only valid if the level 4 table is mapped recursively on
/// the last entry.
const L4_TABLE: *mut PageTable<Level4> = 0xfffffffffffff000 as *mut PageTable<Level4>;
/// The base address for all temporary addresses.
const TEMPORARY_ADDRESS_BASE: VirtualAddress = VirtualAddress::from_const(0xffffffffffc00000);
/// The method to access the current page table.
pub static CURRENT_PAGE_TABLE: CurrentPageTableLock =
unsafe { CurrentPageTableLock::new(CurrentPageTable::new()) };
// TODO: Revisit page table locking.
/// Protects the current page table from being accessed directly.
///
/// This serves to stop the page table from being switched while being accessed.
pub struct CurrentPageTableLock {
/// The page table being locked.
current_page_table: UnsafeCell<CurrentPageTable>,
/// The reference count to the table.
reference_count: Mutex<usize>
}
// This is safe because the page table will manage it's own exclusion
// internally.
unsafe impl Sync for CurrentPageTableLock {}
impl CurrentPageTableLock {
/// Creates a new current page table lock.
///
/// # Safety
/// This should only ever get called once at compile time.
const unsafe fn new(table: CurrentPageTable) -> CurrentPageTableLock {
CurrentPageTableLock {
current_page_table: UnsafeCell::new(table),
reference_count: Mutex::new(0)
}
}
/// Locks the current page table.
pub fn lock(&self) -> CurrentPageTableReference {
let rc: &mut usize = &mut self.reference_count.lock();
*rc += 1;
CurrentPageTableReference {
current_page_table: unsafe { &mut *self.current_page_table.get() },
reference_count: &self.reference_count
}
}
}
/// Serves as a reference to a locked current page table.
pub struct CurrentPageTableReference<'a> {
current_page_table: &'a mut CurrentPageTable,
reference_count: &'a Mutex<usize>
}
impl<'a> Drop for CurrentPageTableReference<'a> {
fn drop(&mut self) {
let rc: &mut usize = &mut self.reference_count.lock();
*rc -= 1;
}
}
impl<'a> Deref for CurrentPageTableReference<'a> {
type Target = CurrentPageTable;
fn deref(&self) -> &CurrentPageTable {
self.current_page_table
}
}
impl<'a> DerefMut for CurrentPageTableReference<'a> {
fn deref_mut(&mut self) -> &mut CurrentPageTable {
self.current_page_table
}
}
/// Owns the page table currently in use.
pub struct CurrentPageTable {
l4_table: Unique<PageTable<Level4>>
}
impl PageTableManager for CurrentPageTable {
fn get_l4(&mut self) -> &mut PageTable<Level4> {
unsafe { self.l4_table.as_mut() }
}
}
impl CurrentPageTable {
/// Returns the current page table.
///
/// # Safety
/// - At any point in time there should only be exactly one current page
/// table struct.
const unsafe fn new() -> CurrentPageTable {
CurrentPageTable {
l4_table: Unique::new_unchecked(L4_TABLE)
}
}
/// Tries to map an inactive page table.
///
/// Returns true if the mapping was successful.
///
/// # Safety
/// - Should not be called while another inactive table is mapped.
pub unsafe fn map_inactive(&mut self, frame: &PageFrame) -> PreemptionState {
let l4 = self.get_l4();
let entry = &mut l4[509];
let preemption_state = entry.lock();
if!entry.flags().contains(PRESENT) {
entry
.set_flags(PRESENT | WRITABLE | NO_EXECUTE)
.set_address(frame.get_address());
}
preemption_state
}
/// Unmaps the currently mapped inactive page table.
pub fn unmap_inactive(&mut self, preemption_state: &PreemptionState) {
let l4 = self.get_l4();
let entry = &mut l4[509];
debug_assert!(entry.flags().contains(PRESENT));
entry.remove_flags(PRESENT);
entry.unlock(&preemption_state);
}
/// Returns a mutable reference to the temporary mapping page table.
fn get_temporary_map_table(&mut self) -> &mut PageTable<Level1> {
let l4 = self.get_l4();
l4.get_next_level_mut(TEMPORARY_ADDRESS_BASE)
.and_then(|l3| l3.get_next_level_mut(TEMPORARY_ADDRESS_BASE))
.and_then(|l2| l2.get_next_level_mut(TEMPORARY_ADDRESS_BASE))
.expect("Temporary page table not mapped.")
}
/// Performs the given action with the mapped page.
pub fn with_temporary_page<F, T>(&mut self, frame: &PageFrame, action: F) -> T
where
F: Fn(&mut Page) -> T
{
// Map the page.
let index = page_frame_hash(frame);
let temporary_map_table = self.get_temporary_map_table();
let entry = &mut temporary_map_table[index];
let preemption_state = entry.lock();
let virtual_address = TEMPORARY_ADDRESS_BASE + (index << 12);
if entry.points_to()!= Some(frame.get_address()) |
// Perform the action.
let result: T = action(&mut Page::from_address(virtual_address));
// Unlock this entry.
entry.unlock(&preemption_state);
result
}
/// Writes the given value to the given physical address.
pub fn write_at_physical<T: Sized + Copy>(
&mut self,
physical_address: PhysicalAddress,
data: T
) {
self.with_temporary_page(&PageFrame::from_address(physical_address), |page| {
let virtual_address =
page.get_address().as_usize() | (physical_address.offset_in_page());
unsafe {
ptr::write(virtual_address as *mut T, data);
}
});
}
/// Reads from the given physical address.
pub fn read_from_physical<T: Sized + Copy>(&mut self, physical_address: PhysicalAddress) -> T {
self.with_temporary_page(&PageFrame::from_address(physical_address), |page| {
let virtual_address =
page.get_address().as_usize() | (physical_address.offset_in_page());
unsafe { ptr::read(virtual_address as *mut T) }
})
}
/// Switches to the new page table returning the current one.
///
/// The old page table will not be mapped into the new one. This should be
/// done manually.
pub unsafe fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
let old_frame =
PageFrame::from_address(PhysicalAddress::from_usize(control_regs::cr3().0 as usize));
let old_table = InactivePageTable::from_frame(old_frame.copy(), &new_table);
let new_frame = new_table.get_frame();
drop(new_table);
// Make the switch.
control_regs::cr3_write(::x86_64::PhysicalAddress(
new_frame.get_address().as_usize() as u64
));
// Map the now inactive old table.
self.map_inactive(&old_frame);
old_table
}
}
/// Hashes page frames to values from 0 to 511.
///
/// This serves to speed up temporary mapping of page frames,
/// by better utilizing the available space.
fn page_frame_hash(frame: &PageFrame) -> usize {
// UNOPTIMIZED: Possibly use a better hash algorithm here?
let mut address = frame.get_address().as_usize() >> 12;
address *= 101489;
address % 512
}
| {
tlb::flush(::x86_64::VirtualAddress(virtual_address.as_usize()));
entry.set_address(frame.get_address());
entry.set_flags(PRESENT | WRITABLE | DISABLE_CACHE | NO_EXECUTE);
} | conditional_block |
current_page_table.rs | //! Handles interactions with the current page table.
use super::inactive_page_table::InactivePageTable;
use super::page_table::{Level1, Level4, PageTable};
use super::page_table_entry::*;
use super::page_table_manager::PageTableManager;
use super::{Page, PageFrame};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::ptr;
use core::ptr::Unique;
use memory::{Address, PhysicalAddress, VirtualAddress};
use sync::{Mutex, PreemptionState};
use x86_64::instructions::tlb;
use x86_64::registers::control_regs;
/// The address of the current Level 4 table.
///
/// Note that this is only valid if the level 4 table is mapped recursively on
/// the last entry.
const L4_TABLE: *mut PageTable<Level4> = 0xfffffffffffff000 as *mut PageTable<Level4>;
/// The base address for all temporary addresses.
const TEMPORARY_ADDRESS_BASE: VirtualAddress = VirtualAddress::from_const(0xffffffffffc00000);
/// The method to access the current page table.
pub static CURRENT_PAGE_TABLE: CurrentPageTableLock =
unsafe { CurrentPageTableLock::new(CurrentPageTable::new()) };
// TODO: Revisit page table locking.
/// Protects the current page table from being accessed directly.
///
/// This serves to stop the page table from being switched while being accessed.
pub struct CurrentPageTableLock {
/// The page table being locked.
current_page_table: UnsafeCell<CurrentPageTable>,
/// The reference count to the table.
reference_count: Mutex<usize>
}
// This is safe because the page table will manage it's own exclusion
// internally.
unsafe impl Sync for CurrentPageTableLock {}
impl CurrentPageTableLock {
/// Creates a new current page table lock.
///
/// # Safety
/// This should only ever get called once at compile time.
const unsafe fn new(table: CurrentPageTable) -> CurrentPageTableLock {
CurrentPageTableLock {
current_page_table: UnsafeCell::new(table),
reference_count: Mutex::new(0)
}
}
/// Locks the current page table.
pub fn lock(&self) -> CurrentPageTableReference {
let rc: &mut usize = &mut self.reference_count.lock();
*rc += 1;
CurrentPageTableReference {
current_page_table: unsafe { &mut *self.current_page_table.get() },
reference_count: &self.reference_count
}
}
}
/// Serves as a reference to a locked current page table.
pub struct CurrentPageTableReference<'a> {
current_page_table: &'a mut CurrentPageTable,
reference_count: &'a Mutex<usize>
}
impl<'a> Drop for CurrentPageTableReference<'a> {
fn drop(&mut self) {
let rc: &mut usize = &mut self.reference_count.lock();
*rc -= 1;
}
}
impl<'a> Deref for CurrentPageTableReference<'a> {
type Target = CurrentPageTable;
fn deref(&self) -> &CurrentPageTable {
self.current_page_table
}
}
impl<'a> DerefMut for CurrentPageTableReference<'a> {
fn deref_mut(&mut self) -> &mut CurrentPageTable {
self.current_page_table
}
}
/// Owns the page table currently in use.
pub struct CurrentPageTable {
l4_table: Unique<PageTable<Level4>>
}
impl PageTableManager for CurrentPageTable {
fn get_l4(&mut self) -> &mut PageTable<Level4> {
unsafe { self.l4_table.as_mut() }
}
}
impl CurrentPageTable {
/// Returns the current page table.
///
/// # Safety
/// - At any point in time there should only be exactly one current page
/// table struct.
const unsafe fn new() -> CurrentPageTable {
CurrentPageTable {
l4_table: Unique::new_unchecked(L4_TABLE)
}
}
/// Tries to map an inactive page table.
///
/// Returns true if the mapping was successful.
///
/// # Safety
/// - Should not be called while another inactive table is mapped.
pub unsafe fn map_inactive(&mut self, frame: &PageFrame) -> PreemptionState {
let l4 = self.get_l4();
let entry = &mut l4[509];
let preemption_state = entry.lock();
if!entry.flags().contains(PRESENT) {
entry
.set_flags(PRESENT | WRITABLE | NO_EXECUTE)
.set_address(frame.get_address());
}
preemption_state
}
/// Unmaps the currently mapped inactive page table.
pub fn unmap_inactive(&mut self, preemption_state: &PreemptionState) {
let l4 = self.get_l4();
let entry = &mut l4[509];
debug_assert!(entry.flags().contains(PRESENT));
entry.remove_flags(PRESENT);
entry.unlock(&preemption_state);
}
/// Returns a mutable reference to the temporary mapping page table.
fn get_temporary_map_table(&mut self) -> &mut PageTable<Level1> {
let l4 = self.get_l4();
l4.get_next_level_mut(TEMPORARY_ADDRESS_BASE)
.and_then(|l3| l3.get_next_level_mut(TEMPORARY_ADDRESS_BASE))
.and_then(|l2| l2.get_next_level_mut(TEMPORARY_ADDRESS_BASE))
.expect("Temporary page table not mapped.")
}
/// Performs the given action with the mapped page.
pub fn with_temporary_page<F, T>(&mut self, frame: &PageFrame, action: F) -> T
where
F: Fn(&mut Page) -> T
{
// Map the page.
let index = page_frame_hash(frame);
let temporary_map_table = self.get_temporary_map_table();
let entry = &mut temporary_map_table[index];
let preemption_state = entry.lock();
let virtual_address = TEMPORARY_ADDRESS_BASE + (index << 12);
if entry.points_to()!= Some(frame.get_address()) {
tlb::flush(::x86_64::VirtualAddress(virtual_address.as_usize()));
entry.set_address(frame.get_address());
entry.set_flags(PRESENT | WRITABLE | DISABLE_CACHE | NO_EXECUTE);
}
// Perform the action.
let result: T = action(&mut Page::from_address(virtual_address));
// Unlock this entry.
entry.unlock(&preemption_state);
result
}
/// Writes the given value to the given physical address.
pub fn write_at_physical<T: Sized + Copy>(
&mut self,
physical_address: PhysicalAddress,
data: T
) {
self.with_temporary_page(&PageFrame::from_address(physical_address), |page| {
let virtual_address =
page.get_address().as_usize() | (physical_address.offset_in_page());
unsafe {
ptr::write(virtual_address as *mut T, data);
}
});
}
/// Reads from the given physical address.
pub fn read_from_physical<T: Sized + Copy>(&mut self, physical_address: PhysicalAddress) -> T {
self.with_temporary_page(&PageFrame::from_address(physical_address), |page| {
let virtual_address =
page.get_address().as_usize() | (physical_address.offset_in_page());
unsafe { ptr::read(virtual_address as *mut T) }
})
}
/// Switches to the new page table returning the current one.
///
/// The old page table will not be mapped into the new one. This should be
/// done manually.
pub unsafe fn | (&mut self, new_table: InactivePageTable) -> InactivePageTable {
let old_frame =
PageFrame::from_address(PhysicalAddress::from_usize(control_regs::cr3().0 as usize));
let old_table = InactivePageTable::from_frame(old_frame.copy(), &new_table);
let new_frame = new_table.get_frame();
drop(new_table);
// Make the switch.
control_regs::cr3_write(::x86_64::PhysicalAddress(
new_frame.get_address().as_usize() as u64
));
// Map the now inactive old table.
self.map_inactive(&old_frame);
old_table
}
}
/// Hashes page frames to values from 0 to 511.
///
/// This serves to speed up temporary mapping of page frames,
/// by better utilizing the available space.
fn page_frame_hash(frame: &PageFrame) -> usize {
// UNOPTIMIZED: Possibly use a better hash algorithm here?
let mut address = frame.get_address().as_usize() >> 12;
address *= 101489;
address % 512
}
| switch | identifier_name |
lib.rs | // Copyright 2018 Stichting Organism
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Emojisum
//External Crates
extern crate blake2_rfc;
extern crate serde;
extern crate serde_json;
#[macro_use] extern crate serde_derive;
//Imports
use std::fs::File;
use std::path::Path;
use blake2_rfc::blake2b::{blake2b};
//Holds main info parsed from standard
#[derive(Deserialize, Debug)]
pub struct Emojisum {
version: String,
description: String,
// these are an ordered list, referened by a byte (each byte of a checksum digest)
emojiwords: Vec<Vec<String>>
}
// Words are a set of options to represent an emoji.
// Possible options could be the ":colon_notation:" or a "U+26CF" style codepoint.
//pub type Word = String;
impl Emojisum {
//Pass a emojimapping JSON to start
pub fn init(file_path: &str) -> Emojisum |
//given a series of bytes match emojis
pub fn from_bytes(&self, tosum: &[u8]) -> Option<String> {
//check that it is 32bytes
if tosum.len() < 32 { return None }
let mut result = String::new();
for byte in tosum {
result.push_str(&(self.emojiwords[*byte as usize])[0])
}
return Some(result);
}
//given a vector of bytes, we hash and return checksum
pub fn hash_to_emojisum(&self, data: Vec<u8>) -> Option<String> {
//get 256bit hash of given data
let hash = blake2b(32, &[], &data);
return self.from_bytes(hash.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let e = Emojisum::init("./src/emojimap.json");
//println!("{:?}", e);
println!("{:?}", e.hash_to_emojisum("dog".as_bytes().to_vec()));
println!("{:?}", e.hash_to_emojisum("cat".as_bytes().to_vec()));
assert_eq!(2 + 2, 4);
}
}
| {
let json_file_path = Path::new(file_path);
let json_file = File::open(json_file_path).expect("file not found");
let deserialized: Emojisum =
serde_json::from_reader(json_file).expect("error while reading json");
return deserialized;
} | identifier_body |
lib.rs | // Copyright 2018 Stichting Organism
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Emojisum
//External Crates
extern crate blake2_rfc;
extern crate serde;
extern crate serde_json;
#[macro_use] extern crate serde_derive;
//Imports
use std::fs::File;
use std::path::Path;
use blake2_rfc::blake2b::{blake2b};
//Holds main info parsed from standard
#[derive(Deserialize, Debug)]
pub struct Emojisum {
version: String,
description: String,
// these are an ordered list, referened by a byte (each byte of a checksum digest)
emojiwords: Vec<Vec<String>>
}
// Words are a set of options to represent an emoji.
// Possible options could be the ":colon_notation:" or a "U+26CF" style codepoint.
//pub type Word = String;
impl Emojisum {
//Pass a emojimapping JSON to start
pub fn | (file_path: &str) -> Emojisum {
let json_file_path = Path::new(file_path);
let json_file = File::open(json_file_path).expect("file not found");
let deserialized: Emojisum =
serde_json::from_reader(json_file).expect("error while reading json");
return deserialized;
}
//given a series of bytes match emojis
pub fn from_bytes(&self, tosum: &[u8]) -> Option<String> {
//check that it is 32bytes
if tosum.len() < 32 { return None }
let mut result = String::new();
for byte in tosum {
result.push_str(&(self.emojiwords[*byte as usize])[0])
}
return Some(result);
}
//given a vector of bytes, we hash and return checksum
pub fn hash_to_emojisum(&self, data: Vec<u8>) -> Option<String> {
//get 256bit hash of given data
let hash = blake2b(32, &[], &data);
return self.from_bytes(hash.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let e = Emojisum::init("./src/emojimap.json");
//println!("{:?}", e);
println!("{:?}", e.hash_to_emojisum("dog".as_bytes().to_vec()));
println!("{:?}", e.hash_to_emojisum("cat".as_bytes().to_vec()));
assert_eq!(2 + 2, 4);
}
}
| init | identifier_name |
lib.rs | // Copyright 2018 Stichting Organism
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Emojisum
//External Crates
extern crate blake2_rfc;
extern crate serde;
extern crate serde_json;
#[macro_use] extern crate serde_derive;
//Imports
use std::fs::File;
use std::path::Path;
use blake2_rfc::blake2b::{blake2b};
//Holds main info parsed from standard
#[derive(Deserialize, Debug)]
pub struct Emojisum {
version: String,
description: String,
// these are an ordered list, referened by a byte (each byte of a checksum digest)
emojiwords: Vec<Vec<String>>
}
// Words are a set of options to represent an emoji.
// Possible options could be the ":colon_notation:" or a "U+26CF" style codepoint.
//pub type Word = String;
impl Emojisum {
//Pass a emojimapping JSON to start
pub fn init(file_path: &str) -> Emojisum {
let json_file_path = Path::new(file_path);
let json_file = File::open(json_file_path).expect("file not found");
let deserialized: Emojisum =
serde_json::from_reader(json_file).expect("error while reading json");
return deserialized;
}
//given a series of bytes match emojis
pub fn from_bytes(&self, tosum: &[u8]) -> Option<String> {
//check that it is 32bytes
if tosum.len() < 32 { return None }
let mut result = String::new();
for byte in tosum {
result.push_str(&(self.emojiwords[*byte as usize])[0])
}
return Some(result);
}
//given a vector of bytes, we hash and return checksum
pub fn hash_to_emojisum(&self, data: Vec<u8>) -> Option<String> {
//get 256bit hash of given data
let hash = blake2b(32, &[], &data);
return self.from_bytes(hash.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let e = Emojisum::init("./src/emojimap.json");
//println!("{:?}", e);
println!("{:?}", e.hash_to_emojisum("dog".as_bytes().to_vec()));
println!("{:?}", e.hash_to_emojisum("cat".as_bytes().to_vec()));
assert_eq!(2 + 2, 4);
}
} | random_line_split |
|
lib.rs | // Copyright 2018 Stichting Organism
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Emojisum
//External Crates
extern crate blake2_rfc;
extern crate serde;
extern crate serde_json;
#[macro_use] extern crate serde_derive;
//Imports
use std::fs::File;
use std::path::Path;
use blake2_rfc::blake2b::{blake2b};
//Holds main info parsed from standard
#[derive(Deserialize, Debug)]
pub struct Emojisum {
version: String,
description: String,
// these are an ordered list, referened by a byte (each byte of a checksum digest)
emojiwords: Vec<Vec<String>>
}
// Words are a set of options to represent an emoji.
// Possible options could be the ":colon_notation:" or a "U+26CF" style codepoint.
//pub type Word = String;
impl Emojisum {
//Pass a emojimapping JSON to start
pub fn init(file_path: &str) -> Emojisum {
let json_file_path = Path::new(file_path);
let json_file = File::open(json_file_path).expect("file not found");
let deserialized: Emojisum =
serde_json::from_reader(json_file).expect("error while reading json");
return deserialized;
}
//given a series of bytes match emojis
pub fn from_bytes(&self, tosum: &[u8]) -> Option<String> {
//check that it is 32bytes
if tosum.len() < 32 |
let mut result = String::new();
for byte in tosum {
result.push_str(&(self.emojiwords[*byte as usize])[0])
}
return Some(result);
}
//given a vector of bytes, we hash and return checksum
pub fn hash_to_emojisum(&self, data: Vec<u8>) -> Option<String> {
//get 256bit hash of given data
let hash = blake2b(32, &[], &data);
return self.from_bytes(hash.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let e = Emojisum::init("./src/emojimap.json");
//println!("{:?}", e);
println!("{:?}", e.hash_to_emojisum("dog".as_bytes().to_vec()));
println!("{:?}", e.hash_to_emojisum("cat".as_bytes().to_vec()));
assert_eq!(2 + 2, 4);
}
}
| { return None } | conditional_block |
response.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The [Response](https://fetch.spec.whatwg.org/#responses) object
//! resulting from a [fetch operation](https://fetch.spec.whatwg.org/#concept-fetch)
use {FetchMetadata, FilteredMetadata, Metadata, NetworkError};
use hyper::header::{AccessControlExposeHeaders, ContentType, Headers};
use hyper::status::StatusCode;
use hyper_serde::Serde;
use servo_url::ServoUrl;
use std::ascii::AsciiExt;
use std::cell::{Cell, RefCell};
use std::sync::{Arc, Mutex};
/// [Response type](https://fetch.spec.whatwg.org/#concept-response-type)
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum ResponseType {
Basic,
Cors,
Default,
Error(NetworkError),
Opaque,
OpaqueRedirect
}
/// [Response termination reason](https://fetch.spec.whatwg.org/#concept-response-termination-reason)
#[derive(Debug, Clone, Copy, Deserialize, Serialize, HeapSizeOf)]
pub enum TerminationReason {
EndUserAbort,
Fatal,
Timeout
}
/// The response body can still be pushed to after fetch
/// This provides a way to store unfinished response bodies
#[derive(Clone, Debug, PartialEq, HeapSizeOf)]
pub enum ResponseBody {
Empty, // XXXManishearth is this necessary, or is Done(vec![]) enough?
Receiving(Vec<u8>),
Done(Vec<u8>),
}
impl ResponseBody {
pub fn is_done(&self) -> bool {
match *self {
ResponseBody::Done(..) => true,
ResponseBody::Empty | ResponseBody::Receiving(..) => false
}
}
}
/// [Cache state](https://fetch.spec.whatwg.org/#concept-response-cache-state)
#[derive(Clone, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum CacheState {
None,
Local,
Validated,
Partial
}
/// [Https state](https://fetch.spec.whatwg.org/#concept-response-https-state)
#[derive(Debug, Clone, Copy, HeapSizeOf, Deserialize, Serialize)]
pub enum HttpsState {
None,
Deprecated,
Modern
}
pub enum ResponseMsg {
Chunk(Vec<u8>),
Finished,
Errored
}
/// A [Response](https://fetch.spec.whatwg.org/#concept-response) as defined by the Fetch spec
#[derive(Debug, Clone, HeapSizeOf)]
pub struct Response {
pub response_type: ResponseType,
pub termination_reason: Option<TerminationReason>,
url: Option<ServoUrl>,
pub url_list: RefCell<Vec<ServoUrl>>,
/// `None` can be considered a StatusCode of `0`.
#[ignore_heap_size_of = "Defined in hyper"]
pub status: Option<StatusCode>,
pub raw_status: Option<(u16, Vec<u8>)>,
#[ignore_heap_size_of = "Defined in hyper"]
pub headers: Headers,
#[ignore_heap_size_of = "Mutex heap size undefined"]
pub body: Arc<Mutex<ResponseBody>>,
pub cache_state: CacheState,
pub https_state: HttpsState,
pub referrer: Option<ServoUrl>,
/// [Internal response](https://fetch.spec.whatwg.org/#concept-internal-response), only used if the Response
/// is a filtered response
pub internal_response: Option<Box<Response>>,
/// whether or not to try to return the internal_response when asked for actual_response
pub return_internal: Cell<bool>,
}
impl Response {
pub fn new(url: ServoUrl) -> Response {
Response {
response_type: ResponseType::Default,
termination_reason: None,
url: Some(url),
url_list: RefCell::new(Vec::new()),
status: Some(StatusCode::Ok),
raw_status: Some((200, b"OK".to_vec())),
headers: Headers::new(),
body: Arc::new(Mutex::new(ResponseBody::Empty)),
cache_state: CacheState::None,
https_state: HttpsState::None,
referrer: None,
internal_response: None,
return_internal: Cell::new(true)
}
}
pub fn network_error(e: NetworkError) -> Response {
Response {
response_type: ResponseType::Error(e),
termination_reason: None,
url: None,
url_list: RefCell::new(vec![]),
status: None,
raw_status: None,
headers: Headers::new(),
body: Arc::new(Mutex::new(ResponseBody::Empty)),
cache_state: CacheState::None,
https_state: HttpsState::None,
referrer: None,
internal_response: None,
return_internal: Cell::new(true)
}
}
pub fn url(&self) -> Option<&ServoUrl> {
self.url.as_ref()
}
pub fn is_network_error(&self) -> bool {
match self.response_type {
ResponseType::Error(..) => true,
_ => false
}
}
pub fn get_network_error(&self) -> Option<&NetworkError> {
match self.response_type {
ResponseType::Error(ref e) => Some(e),
_ => None,
}
}
pub fn actual_response(&self) -> &Response {
if self.return_internal.get() && self.internal_response.is_some() {
&**self.internal_response.as_ref().unwrap()
} else {
self
}
}
pub fn to_actual(self) -> Response {
if self.return_internal.get() && self.internal_response.is_some() {
*self.internal_response.unwrap()
} else |
}
/// Convert to a filtered response, of type `filter_type`.
/// Do not use with type Error or Default
pub fn to_filtered(self, filter_type: ResponseType) -> Response {
match filter_type {
ResponseType::Default | ResponseType::Error(..) => panic!(),
_ => (),
}
let old_response = self.to_actual();
if let ResponseType::Error(e) = old_response.response_type {
return Response::network_error(e);
}
let old_headers = old_response.headers.clone();
let mut response = old_response.clone();
response.internal_response = Some(Box::new(old_response));
response.response_type = filter_type;
match response.response_type {
ResponseType::Default | ResponseType::Error(..) => unreachable!(),
ResponseType::Basic => {
let headers = old_headers.iter().filter(|header| {
match &*header.name().to_ascii_lowercase() {
"set-cookie" | "set-cookie2" => false,
_ => true
}
}).collect();
response.headers = headers;
},
ResponseType::Cors => {
let access = old_headers.get::<AccessControlExposeHeaders>();
let allowed_headers = access.as_ref().map(|v| &v[..]).unwrap_or(&[]);
let headers = old_headers.iter().filter(|header| {
match &*header.name().to_ascii_lowercase() {
"cache-control" | "content-language" | "content-type" |
"expires" | "last-modified" | "pragma" => true,
"set-cookie" | "set-cookie2" => false,
header => {
let result =
allowed_headers.iter().find(|h| *header == *h.to_ascii_lowercase());
result.is_some()
}
}
}).collect();
response.headers = headers;
},
ResponseType::Opaque => {
response.url_list = RefCell::new(vec![]);
response.url = None;
response.headers = Headers::new();
response.status = None;
response.body = Arc::new(Mutex::new(ResponseBody::Empty));
response.cache_state = CacheState::None;
},
ResponseType::OpaqueRedirect => {
response.headers = Headers::new();
response.status = None;
response.body = Arc::new(Mutex::new(ResponseBody::Empty));
response.cache_state = CacheState::None;
}
}
response
}
pub fn metadata(&self) -> Result<FetchMetadata, NetworkError> {
fn init_metadata(response: &Response, url: &ServoUrl) -> Metadata {
let mut metadata = Metadata::default(url.clone());
metadata.set_content_type(match response.headers.get() {
Some(&ContentType(ref mime)) => Some(mime),
None => None
});
metadata.headers = Some(Serde(response.headers.clone()));
metadata.status = response.raw_status.clone();
metadata.https_state = response.https_state;
metadata.referrer = response.referrer.clone();
metadata
};
if let Some(error) = self.get_network_error() {
return Err(error.clone());
}
let metadata = self.url.as_ref().map(|url| init_metadata(self, url));
if let Some(ref response) = self.internal_response {
match response.url {
Some(ref url) => {
let unsafe_metadata = init_metadata(response, url);
Ok(FetchMetadata::Filtered {
filtered: match metadata {
Some(m) => FilteredMetadata::Transparent(m),
None => FilteredMetadata::Opaque
},
unsafe_: unsafe_metadata
})
}
None => Err(NetworkError::Internal("No url found in unsafe response".to_owned()))
}
} else {
Ok(FetchMetadata::Unfiltered(metadata.unwrap()))
}
}
}
| {
self
} | conditional_block |
response.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The [Response](https://fetch.spec.whatwg.org/#responses) object
//! resulting from a [fetch operation](https://fetch.spec.whatwg.org/#concept-fetch)
use {FetchMetadata, FilteredMetadata, Metadata, NetworkError};
use hyper::header::{AccessControlExposeHeaders, ContentType, Headers};
use hyper::status::StatusCode;
use hyper_serde::Serde;
use servo_url::ServoUrl;
use std::ascii::AsciiExt;
use std::cell::{Cell, RefCell};
use std::sync::{Arc, Mutex};
/// [Response type](https://fetch.spec.whatwg.org/#concept-response-type)
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum ResponseType {
Basic,
Cors,
Default,
Error(NetworkError),
Opaque,
OpaqueRedirect
}
/// [Response termination reason](https://fetch.spec.whatwg.org/#concept-response-termination-reason)
#[derive(Debug, Clone, Copy, Deserialize, Serialize, HeapSizeOf)]
pub enum TerminationReason {
EndUserAbort,
Fatal,
Timeout
}
/// The response body can still be pushed to after fetch
/// This provides a way to store unfinished response bodies
#[derive(Clone, Debug, PartialEq, HeapSizeOf)]
pub enum ResponseBody {
Empty, // XXXManishearth is this necessary, or is Done(vec![]) enough?
Receiving(Vec<u8>),
Done(Vec<u8>),
}
impl ResponseBody {
pub fn is_done(&self) -> bool {
match *self {
ResponseBody::Done(..) => true,
ResponseBody::Empty | ResponseBody::Receiving(..) => false
}
}
}
/// [Cache state](https://fetch.spec.whatwg.org/#concept-response-cache-state)
#[derive(Clone, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum CacheState {
None,
Local,
Validated,
Partial
}
/// [Https state](https://fetch.spec.whatwg.org/#concept-response-https-state)
#[derive(Debug, Clone, Copy, HeapSizeOf, Deserialize, Serialize)]
pub enum HttpsState {
None,
Deprecated,
Modern
}
pub enum ResponseMsg {
Chunk(Vec<u8>),
Finished,
Errored
}
/// A [Response](https://fetch.spec.whatwg.org/#concept-response) as defined by the Fetch spec
#[derive(Debug, Clone, HeapSizeOf)]
pub struct Response {
pub response_type: ResponseType,
pub termination_reason: Option<TerminationReason>,
url: Option<ServoUrl>,
pub url_list: RefCell<Vec<ServoUrl>>,
/// `None` can be considered a StatusCode of `0`.
#[ignore_heap_size_of = "Defined in hyper"]
pub status: Option<StatusCode>,
pub raw_status: Option<(u16, Vec<u8>)>,
#[ignore_heap_size_of = "Defined in hyper"]
pub headers: Headers,
#[ignore_heap_size_of = "Mutex heap size undefined"]
pub body: Arc<Mutex<ResponseBody>>,
pub cache_state: CacheState,
pub https_state: HttpsState,
pub referrer: Option<ServoUrl>,
/// [Internal response](https://fetch.spec.whatwg.org/#concept-internal-response), only used if the Response
/// is a filtered response
pub internal_response: Option<Box<Response>>,
/// whether or not to try to return the internal_response when asked for actual_response
pub return_internal: Cell<bool>,
}
impl Response {
pub fn new(url: ServoUrl) -> Response {
Response {
response_type: ResponseType::Default,
termination_reason: None,
url: Some(url),
url_list: RefCell::new(Vec::new()),
status: Some(StatusCode::Ok),
raw_status: Some((200, b"OK".to_vec())),
headers: Headers::new(),
body: Arc::new(Mutex::new(ResponseBody::Empty)),
cache_state: CacheState::None,
https_state: HttpsState::None,
referrer: None,
internal_response: None,
return_internal: Cell::new(true)
}
}
pub fn | (e: NetworkError) -> Response {
Response {
response_type: ResponseType::Error(e),
termination_reason: None,
url: None,
url_list: RefCell::new(vec![]),
status: None,
raw_status: None,
headers: Headers::new(),
body: Arc::new(Mutex::new(ResponseBody::Empty)),
cache_state: CacheState::None,
https_state: HttpsState::None,
referrer: None,
internal_response: None,
return_internal: Cell::new(true)
}
}
pub fn url(&self) -> Option<&ServoUrl> {
self.url.as_ref()
}
pub fn is_network_error(&self) -> bool {
match self.response_type {
ResponseType::Error(..) => true,
_ => false
}
}
pub fn get_network_error(&self) -> Option<&NetworkError> {
match self.response_type {
ResponseType::Error(ref e) => Some(e),
_ => None,
}
}
pub fn actual_response(&self) -> &Response {
if self.return_internal.get() && self.internal_response.is_some() {
&**self.internal_response.as_ref().unwrap()
} else {
self
}
}
pub fn to_actual(self) -> Response {
if self.return_internal.get() && self.internal_response.is_some() {
*self.internal_response.unwrap()
} else {
self
}
}
/// Convert to a filtered response, of type `filter_type`.
/// Do not use with type Error or Default
pub fn to_filtered(self, filter_type: ResponseType) -> Response {
match filter_type {
ResponseType::Default | ResponseType::Error(..) => panic!(),
_ => (),
}
let old_response = self.to_actual();
if let ResponseType::Error(e) = old_response.response_type {
return Response::network_error(e);
}
let old_headers = old_response.headers.clone();
let mut response = old_response.clone();
response.internal_response = Some(Box::new(old_response));
response.response_type = filter_type;
match response.response_type {
ResponseType::Default | ResponseType::Error(..) => unreachable!(),
ResponseType::Basic => {
let headers = old_headers.iter().filter(|header| {
match &*header.name().to_ascii_lowercase() {
"set-cookie" | "set-cookie2" => false,
_ => true
}
}).collect();
response.headers = headers;
},
ResponseType::Cors => {
let access = old_headers.get::<AccessControlExposeHeaders>();
let allowed_headers = access.as_ref().map(|v| &v[..]).unwrap_or(&[]);
let headers = old_headers.iter().filter(|header| {
match &*header.name().to_ascii_lowercase() {
"cache-control" | "content-language" | "content-type" |
"expires" | "last-modified" | "pragma" => true,
"set-cookie" | "set-cookie2" => false,
header => {
let result =
allowed_headers.iter().find(|h| *header == *h.to_ascii_lowercase());
result.is_some()
}
}
}).collect();
response.headers = headers;
},
ResponseType::Opaque => {
response.url_list = RefCell::new(vec![]);
response.url = None;
response.headers = Headers::new();
response.status = None;
response.body = Arc::new(Mutex::new(ResponseBody::Empty));
response.cache_state = CacheState::None;
},
ResponseType::OpaqueRedirect => {
response.headers = Headers::new();
response.status = None;
response.body = Arc::new(Mutex::new(ResponseBody::Empty));
response.cache_state = CacheState::None;
}
}
response
}
pub fn metadata(&self) -> Result<FetchMetadata, NetworkError> {
fn init_metadata(response: &Response, url: &ServoUrl) -> Metadata {
let mut metadata = Metadata::default(url.clone());
metadata.set_content_type(match response.headers.get() {
Some(&ContentType(ref mime)) => Some(mime),
None => None
});
metadata.headers = Some(Serde(response.headers.clone()));
metadata.status = response.raw_status.clone();
metadata.https_state = response.https_state;
metadata.referrer = response.referrer.clone();
metadata
};
if let Some(error) = self.get_network_error() {
return Err(error.clone());
}
let metadata = self.url.as_ref().map(|url| init_metadata(self, url));
if let Some(ref response) = self.internal_response {
match response.url {
Some(ref url) => {
let unsafe_metadata = init_metadata(response, url);
Ok(FetchMetadata::Filtered {
filtered: match metadata {
Some(m) => FilteredMetadata::Transparent(m),
None => FilteredMetadata::Opaque
},
unsafe_: unsafe_metadata
})
}
None => Err(NetworkError::Internal("No url found in unsafe response".to_owned()))
}
} else {
Ok(FetchMetadata::Unfiltered(metadata.unwrap()))
}
}
}
| network_error | identifier_name |
response.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The [Response](https://fetch.spec.whatwg.org/#responses) object
//! resulting from a [fetch operation](https://fetch.spec.whatwg.org/#concept-fetch)
use {FetchMetadata, FilteredMetadata, Metadata, NetworkError};
use hyper::header::{AccessControlExposeHeaders, ContentType, Headers};
use hyper::status::StatusCode;
use hyper_serde::Serde;
use servo_url::ServoUrl;
use std::ascii::AsciiExt;
use std::cell::{Cell, RefCell};
use std::sync::{Arc, Mutex};
/// [Response type](https://fetch.spec.whatwg.org/#concept-response-type)
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum ResponseType {
Basic,
Cors,
Default,
Error(NetworkError),
Opaque,
OpaqueRedirect
}
/// [Response termination reason](https://fetch.spec.whatwg.org/#concept-response-termination-reason)
#[derive(Debug, Clone, Copy, Deserialize, Serialize, HeapSizeOf)]
pub enum TerminationReason {
EndUserAbort,
Fatal,
Timeout
}
/// The response body can still be pushed to after fetch
/// This provides a way to store unfinished response bodies
#[derive(Clone, Debug, PartialEq, HeapSizeOf)]
pub enum ResponseBody {
Empty, // XXXManishearth is this necessary, or is Done(vec![]) enough?
Receiving(Vec<u8>),
Done(Vec<u8>),
}
impl ResponseBody {
pub fn is_done(&self) -> bool {
match *self {
ResponseBody::Done(..) => true,
ResponseBody::Empty | ResponseBody::Receiving(..) => false
}
}
}
/// [Cache state](https://fetch.spec.whatwg.org/#concept-response-cache-state)
#[derive(Clone, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum CacheState {
None,
Local,
Validated,
Partial
}
/// [Https state](https://fetch.spec.whatwg.org/#concept-response-https-state)
#[derive(Debug, Clone, Copy, HeapSizeOf, Deserialize, Serialize)]
pub enum HttpsState {
None,
Deprecated,
Modern
}
pub enum ResponseMsg {
Chunk(Vec<u8>),
Finished,
Errored
}
/// A [Response](https://fetch.spec.whatwg.org/#concept-response) as defined by the Fetch spec
#[derive(Debug, Clone, HeapSizeOf)]
pub struct Response {
pub response_type: ResponseType,
pub termination_reason: Option<TerminationReason>,
url: Option<ServoUrl>,
pub url_list: RefCell<Vec<ServoUrl>>,
/// `None` can be considered a StatusCode of `0`.
#[ignore_heap_size_of = "Defined in hyper"]
pub status: Option<StatusCode>,
pub raw_status: Option<(u16, Vec<u8>)>,
#[ignore_heap_size_of = "Defined in hyper"]
pub headers: Headers,
#[ignore_heap_size_of = "Mutex heap size undefined"]
pub body: Arc<Mutex<ResponseBody>>,
pub cache_state: CacheState,
pub https_state: HttpsState,
pub referrer: Option<ServoUrl>,
/// [Internal response](https://fetch.spec.whatwg.org/#concept-internal-response), only used if the Response
/// is a filtered response
pub internal_response: Option<Box<Response>>,
/// whether or not to try to return the internal_response when asked for actual_response
pub return_internal: Cell<bool>,
}
impl Response {
pub fn new(url: ServoUrl) -> Response {
Response {
response_type: ResponseType::Default,
termination_reason: None,
url: Some(url),
url_list: RefCell::new(Vec::new()),
status: Some(StatusCode::Ok),
raw_status: Some((200, b"OK".to_vec())),
headers: Headers::new(),
body: Arc::new(Mutex::new(ResponseBody::Empty)),
cache_state: CacheState::None,
https_state: HttpsState::None,
referrer: None,
internal_response: None,
return_internal: Cell::new(true)
}
}
pub fn network_error(e: NetworkError) -> Response {
Response {
response_type: ResponseType::Error(e),
termination_reason: None,
url: None,
url_list: RefCell::new(vec![]),
status: None,
raw_status: None,
headers: Headers::new(),
body: Arc::new(Mutex::new(ResponseBody::Empty)),
cache_state: CacheState::None,
https_state: HttpsState::None,
referrer: None,
internal_response: None,
return_internal: Cell::new(true)
}
}
pub fn url(&self) -> Option<&ServoUrl> {
self.url.as_ref()
}
pub fn is_network_error(&self) -> bool {
match self.response_type {
ResponseType::Error(..) => true,
_ => false
}
}
pub fn get_network_error(&self) -> Option<&NetworkError> {
match self.response_type {
ResponseType::Error(ref e) => Some(e),
_ => None,
}
}
pub fn actual_response(&self) -> &Response {
if self.return_internal.get() && self.internal_response.is_some() {
&**self.internal_response.as_ref().unwrap()
} else {
self
}
}
pub fn to_actual(self) -> Response {
if self.return_internal.get() && self.internal_response.is_some() {
*self.internal_response.unwrap()
} else {
self
}
}
/// Convert to a filtered response, of type `filter_type`.
/// Do not use with type Error or Default
pub fn to_filtered(self, filter_type: ResponseType) -> Response {
match filter_type {
ResponseType::Default | ResponseType::Error(..) => panic!(),
_ => (),
}
let old_response = self.to_actual();
if let ResponseType::Error(e) = old_response.response_type {
return Response::network_error(e);
}
let old_headers = old_response.headers.clone();
let mut response = old_response.clone();
response.internal_response = Some(Box::new(old_response));
response.response_type = filter_type; |
ResponseType::Basic => {
let headers = old_headers.iter().filter(|header| {
match &*header.name().to_ascii_lowercase() {
"set-cookie" | "set-cookie2" => false,
_ => true
}
}).collect();
response.headers = headers;
},
ResponseType::Cors => {
let access = old_headers.get::<AccessControlExposeHeaders>();
let allowed_headers = access.as_ref().map(|v| &v[..]).unwrap_or(&[]);
let headers = old_headers.iter().filter(|header| {
match &*header.name().to_ascii_lowercase() {
"cache-control" | "content-language" | "content-type" |
"expires" | "last-modified" | "pragma" => true,
"set-cookie" | "set-cookie2" => false,
header => {
let result =
allowed_headers.iter().find(|h| *header == *h.to_ascii_lowercase());
result.is_some()
}
}
}).collect();
response.headers = headers;
},
ResponseType::Opaque => {
response.url_list = RefCell::new(vec![]);
response.url = None;
response.headers = Headers::new();
response.status = None;
response.body = Arc::new(Mutex::new(ResponseBody::Empty));
response.cache_state = CacheState::None;
},
ResponseType::OpaqueRedirect => {
response.headers = Headers::new();
response.status = None;
response.body = Arc::new(Mutex::new(ResponseBody::Empty));
response.cache_state = CacheState::None;
}
}
response
}
pub fn metadata(&self) -> Result<FetchMetadata, NetworkError> {
fn init_metadata(response: &Response, url: &ServoUrl) -> Metadata {
let mut metadata = Metadata::default(url.clone());
metadata.set_content_type(match response.headers.get() {
Some(&ContentType(ref mime)) => Some(mime),
None => None
});
metadata.headers = Some(Serde(response.headers.clone()));
metadata.status = response.raw_status.clone();
metadata.https_state = response.https_state;
metadata.referrer = response.referrer.clone();
metadata
};
if let Some(error) = self.get_network_error() {
return Err(error.clone());
}
let metadata = self.url.as_ref().map(|url| init_metadata(self, url));
if let Some(ref response) = self.internal_response {
match response.url {
Some(ref url) => {
let unsafe_metadata = init_metadata(response, url);
Ok(FetchMetadata::Filtered {
filtered: match metadata {
Some(m) => FilteredMetadata::Transparent(m),
None => FilteredMetadata::Opaque
},
unsafe_: unsafe_metadata
})
}
None => Err(NetworkError::Internal("No url found in unsafe response".to_owned()))
}
} else {
Ok(FetchMetadata::Unfiltered(metadata.unwrap()))
}
}
} |
match response.response_type {
ResponseType::Default | ResponseType::Error(..) => unreachable!(), | random_line_split |
response.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The [Response](https://fetch.spec.whatwg.org/#responses) object
//! resulting from a [fetch operation](https://fetch.spec.whatwg.org/#concept-fetch)
use {FetchMetadata, FilteredMetadata, Metadata, NetworkError};
use hyper::header::{AccessControlExposeHeaders, ContentType, Headers};
use hyper::status::StatusCode;
use hyper_serde::Serde;
use servo_url::ServoUrl;
use std::ascii::AsciiExt;
use std::cell::{Cell, RefCell};
use std::sync::{Arc, Mutex};
/// [Response type](https://fetch.spec.whatwg.org/#concept-response-type)
#[derive(Clone, PartialEq, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum ResponseType {
Basic,
Cors,
Default,
Error(NetworkError),
Opaque,
OpaqueRedirect
}
/// [Response termination reason](https://fetch.spec.whatwg.org/#concept-response-termination-reason)
#[derive(Debug, Clone, Copy, Deserialize, Serialize, HeapSizeOf)]
pub enum TerminationReason {
EndUserAbort,
Fatal,
Timeout
}
/// The response body can still be pushed to after fetch
/// This provides a way to store unfinished response bodies
#[derive(Clone, Debug, PartialEq, HeapSizeOf)]
pub enum ResponseBody {
Empty, // XXXManishearth is this necessary, or is Done(vec![]) enough?
Receiving(Vec<u8>),
Done(Vec<u8>),
}
impl ResponseBody {
pub fn is_done(&self) -> bool {
match *self {
ResponseBody::Done(..) => true,
ResponseBody::Empty | ResponseBody::Receiving(..) => false
}
}
}
/// [Cache state](https://fetch.spec.whatwg.org/#concept-response-cache-state)
#[derive(Clone, Debug, Deserialize, Serialize, HeapSizeOf)]
pub enum CacheState {
None,
Local,
Validated,
Partial
}
/// [Https state](https://fetch.spec.whatwg.org/#concept-response-https-state)
#[derive(Debug, Clone, Copy, HeapSizeOf, Deserialize, Serialize)]
pub enum HttpsState {
None,
Deprecated,
Modern
}
pub enum ResponseMsg {
Chunk(Vec<u8>),
Finished,
Errored
}
/// A [Response](https://fetch.spec.whatwg.org/#concept-response) as defined by the Fetch spec
#[derive(Debug, Clone, HeapSizeOf)]
pub struct Response {
pub response_type: ResponseType,
pub termination_reason: Option<TerminationReason>,
url: Option<ServoUrl>,
pub url_list: RefCell<Vec<ServoUrl>>,
/// `None` can be considered a StatusCode of `0`.
#[ignore_heap_size_of = "Defined in hyper"]
pub status: Option<StatusCode>,
pub raw_status: Option<(u16, Vec<u8>)>,
#[ignore_heap_size_of = "Defined in hyper"]
pub headers: Headers,
#[ignore_heap_size_of = "Mutex heap size undefined"]
pub body: Arc<Mutex<ResponseBody>>,
pub cache_state: CacheState,
pub https_state: HttpsState,
pub referrer: Option<ServoUrl>,
/// [Internal response](https://fetch.spec.whatwg.org/#concept-internal-response), only used if the Response
/// is a filtered response
pub internal_response: Option<Box<Response>>,
/// whether or not to try to return the internal_response when asked for actual_response
pub return_internal: Cell<bool>,
}
impl Response {
pub fn new(url: ServoUrl) -> Response {
Response {
response_type: ResponseType::Default,
termination_reason: None,
url: Some(url),
url_list: RefCell::new(Vec::new()),
status: Some(StatusCode::Ok),
raw_status: Some((200, b"OK".to_vec())),
headers: Headers::new(),
body: Arc::new(Mutex::new(ResponseBody::Empty)),
cache_state: CacheState::None,
https_state: HttpsState::None,
referrer: None,
internal_response: None,
return_internal: Cell::new(true)
}
}
pub fn network_error(e: NetworkError) -> Response {
Response {
response_type: ResponseType::Error(e),
termination_reason: None,
url: None,
url_list: RefCell::new(vec![]),
status: None,
raw_status: None,
headers: Headers::new(),
body: Arc::new(Mutex::new(ResponseBody::Empty)),
cache_state: CacheState::None,
https_state: HttpsState::None,
referrer: None,
internal_response: None,
return_internal: Cell::new(true)
}
}
pub fn url(&self) -> Option<&ServoUrl> {
self.url.as_ref()
}
pub fn is_network_error(&self) -> bool {
match self.response_type {
ResponseType::Error(..) => true,
_ => false
}
}
pub fn get_network_error(&self) -> Option<&NetworkError> {
match self.response_type {
ResponseType::Error(ref e) => Some(e),
_ => None,
}
}
pub fn actual_response(&self) -> &Response {
if self.return_internal.get() && self.internal_response.is_some() {
&**self.internal_response.as_ref().unwrap()
} else {
self
}
}
pub fn to_actual(self) -> Response {
if self.return_internal.get() && self.internal_response.is_some() {
*self.internal_response.unwrap()
} else {
self
}
}
/// Convert to a filtered response, of type `filter_type`.
/// Do not use with type Error or Default
pub fn to_filtered(self, filter_type: ResponseType) -> Response {
match filter_type {
ResponseType::Default | ResponseType::Error(..) => panic!(),
_ => (),
}
let old_response = self.to_actual();
if let ResponseType::Error(e) = old_response.response_type {
return Response::network_error(e);
}
let old_headers = old_response.headers.clone();
let mut response = old_response.clone();
response.internal_response = Some(Box::new(old_response));
response.response_type = filter_type;
match response.response_type {
ResponseType::Default | ResponseType::Error(..) => unreachable!(),
ResponseType::Basic => {
let headers = old_headers.iter().filter(|header| {
match &*header.name().to_ascii_lowercase() {
"set-cookie" | "set-cookie2" => false,
_ => true
}
}).collect();
response.headers = headers;
},
ResponseType::Cors => {
let access = old_headers.get::<AccessControlExposeHeaders>();
let allowed_headers = access.as_ref().map(|v| &v[..]).unwrap_or(&[]);
let headers = old_headers.iter().filter(|header| {
match &*header.name().to_ascii_lowercase() {
"cache-control" | "content-language" | "content-type" |
"expires" | "last-modified" | "pragma" => true,
"set-cookie" | "set-cookie2" => false,
header => {
let result =
allowed_headers.iter().find(|h| *header == *h.to_ascii_lowercase());
result.is_some()
}
}
}).collect();
response.headers = headers;
},
ResponseType::Opaque => {
response.url_list = RefCell::new(vec![]);
response.url = None;
response.headers = Headers::new();
response.status = None;
response.body = Arc::new(Mutex::new(ResponseBody::Empty));
response.cache_state = CacheState::None;
},
ResponseType::OpaqueRedirect => {
response.headers = Headers::new();
response.status = None;
response.body = Arc::new(Mutex::new(ResponseBody::Empty));
response.cache_state = CacheState::None;
}
}
response
}
pub fn metadata(&self) -> Result<FetchMetadata, NetworkError> {
fn init_metadata(response: &Response, url: &ServoUrl) -> Metadata | ;
if let Some(error) = self.get_network_error() {
return Err(error.clone());
}
let metadata = self.url.as_ref().map(|url| init_metadata(self, url));
if let Some(ref response) = self.internal_response {
match response.url {
Some(ref url) => {
let unsafe_metadata = init_metadata(response, url);
Ok(FetchMetadata::Filtered {
filtered: match metadata {
Some(m) => FilteredMetadata::Transparent(m),
None => FilteredMetadata::Opaque
},
unsafe_: unsafe_metadata
})
}
None => Err(NetworkError::Internal("No url found in unsafe response".to_owned()))
}
} else {
Ok(FetchMetadata::Unfiltered(metadata.unwrap()))
}
}
}
| {
let mut metadata = Metadata::default(url.clone());
metadata.set_content_type(match response.headers.get() {
Some(&ContentType(ref mime)) => Some(mime),
None => None
});
metadata.headers = Some(Serde(response.headers.clone()));
metadata.status = response.raw_status.clone();
metadata.https_state = response.https_state;
metadata.referrer = response.referrer.clone();
metadata
} | identifier_body |
sessions.rs | use iron::headers::ContentType;
use iron::prelude::*;
use iron::status;
use serde_json;
use authentication::{authenticate_user, delete_session};
use super::common::*;
pub fn | (request: &mut Request) -> IronResult<Response> {
use models::NewSession;
let email = get_param(request, "email")?;
let password = get_param(request, "password")?;
let (user, session) = authenticate_user(&email, &password)?;
let new_session = NewSession {
token: session.token,
user_id: user.id,
expired_at: session.expired_at,
};
let payload = serde_json::to_string(&new_session).unwrap();
Ok(Response::with((ContentType::json().0, status::Created, payload)))
}
pub fn logout(request: &mut Request) -> IronResult<Response> {
use models::Session;
let current_session = request.extensions.get::<Session>().unwrap();
delete_session(current_session.token.as_str())?;
Ok(Response::with((ContentType::json().0, status::NoContent)))
}
| login | identifier_name |
sessions.rs | use iron::headers::ContentType;
use iron::prelude::*;
use iron::status;
use serde_json;
use authentication::{authenticate_user, delete_session};
use super::common::*;
pub fn login(request: &mut Request) -> IronResult<Response> |
pub fn logout(request: &mut Request) -> IronResult<Response> {
use models::Session;
let current_session = request.extensions.get::<Session>().unwrap();
delete_session(current_session.token.as_str())?;
Ok(Response::with((ContentType::json().0, status::NoContent)))
}
| {
use models::NewSession;
let email = get_param(request, "email")?;
let password = get_param(request, "password")?;
let (user, session) = authenticate_user(&email, &password)?;
let new_session = NewSession {
token: session.token,
user_id: user.id,
expired_at: session.expired_at,
};
let payload = serde_json::to_string(&new_session).unwrap();
Ok(Response::with((ContentType::json().0, status::Created, payload)))
} | identifier_body |
sessions.rs | use iron::headers::ContentType;
use iron::prelude::*;
use iron::status;
use serde_json;
use authentication::{authenticate_user, delete_session};
use super::common::*;
pub fn login(request: &mut Request) -> IronResult<Response> {
use models::NewSession;
let email = get_param(request, "email")?;
let password = get_param(request, "password")?;
let (user, session) = authenticate_user(&email, &password)?;
let new_session = NewSession { | user_id: user.id,
expired_at: session.expired_at,
};
let payload = serde_json::to_string(&new_session).unwrap();
Ok(Response::with((ContentType::json().0, status::Created, payload)))
}
pub fn logout(request: &mut Request) -> IronResult<Response> {
use models::Session;
let current_session = request.extensions.get::<Session>().unwrap();
delete_session(current_session.token.as_str())?;
Ok(Response::with((ContentType::json().0, status::NoContent)))
} | token: session.token, | random_line_split |
service.rs | use std::future::Future;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use crate::futures;
use crate::jsonrpc::{middleware, MetaIoHandler, Metadata, Middleware};
pub struct Service<M: Metadata = (), S: Middleware<M> = middleware::Noop> {
handler: Arc<MetaIoHandler<M, S>>,
peer_addr: SocketAddr,
meta: M,
}
impl<M: Metadata, S: Middleware<M>> Service<M, S> {
pub fn new(peer_addr: SocketAddr, handler: Arc<MetaIoHandler<M, S>>, meta: M) -> Self {
Service {
handler,
peer_addr,
meta,
}
}
}
impl<M: Metadata, S: Middleware<M>> tower_service::Service<String> for Service<M, S>
where
S::Future: Unpin,
S::CallFuture: Unpin,
{
// These types must match the corresponding protocol types:
type Response = Option<String>;
// For non-streaming protocols, service errors are always io::Error
type Error = ();
// The future for computing the response; box it for simplicity.
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn | (&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
// Produce a future for computing a response from a request.
fn call(&mut self, req: String) -> Self::Future {
use futures::FutureExt;
trace!(target: "tcp", "Accepted request from peer {}: {}", &self.peer_addr, req);
Box::pin(self.handler.handle_request(&req, self.meta.clone()).map(Ok))
}
}
| poll_ready | identifier_name |
service.rs | use std::future::Future;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use crate::futures;
use crate::jsonrpc::{middleware, MetaIoHandler, Metadata, Middleware};
pub struct Service<M: Metadata = (), S: Middleware<M> = middleware::Noop> {
handler: Arc<MetaIoHandler<M, S>>,
peer_addr: SocketAddr,
meta: M,
}
impl<M: Metadata, S: Middleware<M>> Service<M, S> {
pub fn new(peer_addr: SocketAddr, handler: Arc<MetaIoHandler<M, S>>, meta: M) -> Self {
Service {
handler,
peer_addr,
meta,
}
}
}
impl<M: Metadata, S: Middleware<M>> tower_service::Service<String> for Service<M, S>
where
S::Future: Unpin,
S::CallFuture: Unpin,
{
// These types must match the corresponding protocol types:
type Response = Option<String>;
// For non-streaming protocols, service errors are always io::Error
type Error = ();
// The future for computing the response; box it for simplicity.
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { | use futures::FutureExt;
trace!(target: "tcp", "Accepted request from peer {}: {}", &self.peer_addr, req);
Box::pin(self.handler.handle_request(&req, self.meta.clone()).map(Ok))
}
} | Poll::Ready(Ok(()))
}
// Produce a future for computing a response from a request.
fn call(&mut self, req: String) -> Self::Future { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.