file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
build.rs
|
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::env;
use std::process::Command;
use std::str::{self, FromStr};
fn main()
|
}
}
fn rustc_minor_version() -> Option<u32> {
let rustc = match env::var_os("RUSTC") {
Some(rustc) => rustc,
None => return None,
};
let output = match Command::new(rustc).arg("--version").output() {
Ok(output) => output,
Err(_) => return None,
};
let version = match str::from_utf8(&output.stdout) {
Ok(version) => version,
Err(_) => return None,
};
let mut pieces = version.split('.');
if pieces.next()!= Some("rustc 1") {
return None;
}
let next = match pieces.next() {
Some(next) => next,
None => return None,
};
u32::from_str(next).ok()
}
|
{
println!("cargo:rustc-cfg=build_script_ran");
let minor = match rustc_minor_version() {
Some(minor) => minor,
None => return,
};
let target = env::var("TARGET").unwrap();
if minor >= 34 {
println!("cargo:rustc-cfg=is_new_rustc");
} else {
println!("cargo:rustc-cfg=is_old_rustc");
}
if target.contains("android") {
println!("cargo:rustc-cfg=is_android");
}
if target.contains("darwin") {
println!("cargo:rustc-cfg=is_mac");
|
identifier_body
|
build.rs
|
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::env;
use std::process::Command;
use std::str::{self, FromStr};
fn
|
() {
println!("cargo:rustc-cfg=build_script_ran");
let minor = match rustc_minor_version() {
Some(minor) => minor,
None => return,
};
let target = env::var("TARGET").unwrap();
if minor >= 34 {
println!("cargo:rustc-cfg=is_new_rustc");
} else {
println!("cargo:rustc-cfg=is_old_rustc");
}
if target.contains("android") {
println!("cargo:rustc-cfg=is_android");
}
if target.contains("darwin") {
println!("cargo:rustc-cfg=is_mac");
}
}
fn rustc_minor_version() -> Option<u32> {
let rustc = match env::var_os("RUSTC") {
Some(rustc) => rustc,
None => return None,
};
let output = match Command::new(rustc).arg("--version").output() {
Ok(output) => output,
Err(_) => return None,
};
let version = match str::from_utf8(&output.stdout) {
Ok(version) => version,
Err(_) => return None,
};
let mut pieces = version.split('.');
if pieces.next()!= Some("rustc 1") {
return None;
}
let next = match pieces.next() {
Some(next) => next,
None => return None,
};
u32::from_str(next).ok()
}
|
main
|
identifier_name
|
monitor.rs
|
//! Types useful for interacting with a user's monitors.
//!
//! If you want to get basic information about a monitor, you can use the [`MonitorHandle`][monitor_handle]
//! type. This is retrieved from one of the following methods, which return an iterator of
//! [`MonitorHandle`][monitor_handle]:
//! - [`EventLoopWindowTarget::available_monitors`][loop_get]
//! - [`Window::available_monitors`][window_get].
//!
//! [monitor_handle]: crate::monitor::MonitorHandle
//! [loop_get]: crate::event_loop::EventLoopWindowTarget::available_monitors
//! [window_get]: crate::window::Window::available_monitors
use crate::{
dpi::{PhysicalPosition, PhysicalSize},
platform_impl,
};
/// Describes a fullscreen video mode of a monitor.
///
/// Can be acquired with:
/// - [`MonitorHandle::video_modes`][monitor_get].
///
/// [monitor_get]: crate::monitor::MonitorHandle::video_modes
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct VideoMode {
pub(crate) video_mode: platform_impl::VideoMode,
}
impl std::fmt::Debug for VideoMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.video_mode.fmt(f)
}
}
impl PartialOrd for VideoMode {
fn partial_cmp(&self, other: &VideoMode) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for VideoMode {
fn cmp(&self, other: &VideoMode) -> std::cmp::Ordering {
// TODO: we can impl `Ord` for `PhysicalSize` once we switch from `f32`
// to `u32` there
let size: (u32, u32) = self.size().into();
let other_size: (u32, u32) = other.size().into();
self.monitor().cmp(&other.monitor()).then(
size.cmp(&other_size)
.then(
self.refresh_rate()
.cmp(&other.refresh_rate())
.then(self.bit_depth().cmp(&other.bit_depth())),
)
.reverse(),
)
}
}
impl VideoMode {
/// Returns the resolution of this video mode.
#[inline]
pub fn size(&self) -> PhysicalSize<u32> {
self.video_mode.size()
}
/// Returns the bit depth of this video mode, as in how many bits you have
/// available per color. This is generally 24 bits or 32 bits on modern
/// systems, depending on whether the alpha channel is counted or not.
///
/// ## Platform-specific
///
/// - **Wayland:** Always returns 32.
/// - **iOS:** Always returns 32.
#[inline]
pub fn bit_depth(&self) -> u16 {
self.video_mode.bit_depth()
}
/// Returns the refresh rate of this video mode. **Note**: the returned
/// refresh rate is an integer approximation, and you shouldn't rely on this
/// value to be exact.
#[inline]
pub fn refresh_rate(&self) -> u16 {
self.video_mode.refresh_rate()
}
/// Returns the monitor that this video mode is valid for. Each monitor has
/// a separate set of valid video modes.
#[inline]
pub fn monitor(&self) -> MonitorHandle {
self.video_mode.monitor()
}
}
impl std::fmt::Display for VideoMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result
|
}
/// Handle to a monitor.
///
/// Allows you to retrieve information about a given monitor and can be used in [`Window`] creation.
///
/// [`Window`]: crate::window::Window
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct MonitorHandle {
pub(crate) inner: platform_impl::MonitorHandle,
}
impl MonitorHandle {
/// Returns a human-readable name of the monitor.
///
/// Returns `None` if the monitor doesn't exist anymore.
///
/// ## Platform-specific
///
/// - **Web:** Always returns None
#[inline]
pub fn name(&self) -> Option<String> {
self.inner.name()
}
/// Returns the monitor's resolution.
///
/// ## Platform-specific
///
/// - **Web:** Always returns (0,0)
#[inline]
pub fn size(&self) -> PhysicalSize<u32> {
self.inner.size()
}
/// Returns the top-left corner position of the monitor relative to the larger full
/// screen area.
///
/// ## Platform-specific
///
/// - **Web:** Always returns (0,0)
#[inline]
pub fn position(&self) -> PhysicalPosition<i32> {
self.inner.position()
}
/// Returns the scale factor that can be used to map logical pixels to physical pixels, and vice versa.
///
/// See the [`dpi`](crate::dpi) module for more information.
///
/// ## Platform-specific
///
/// - **X11:** Can be overridden using the `WINIT_X11_SCALE_FACTOR` environment variable.
/// - **Android:** Always returns 1.0.
/// - **Web:** Always returns 1.0
#[inline]
pub fn scale_factor(&self) -> f64 {
self.inner.scale_factor()
}
/// Returns all fullscreen video modes supported by this monitor.
///
/// ## Platform-specific
///
/// - **Web:** Always returns an empty iterator
#[inline]
pub fn video_modes(&self) -> impl Iterator<Item = VideoMode> {
self.inner.video_modes()
}
}
|
{
write!(
f,
"{}x{} @ {} Hz ({} bpp)",
self.size().width,
self.size().height,
self.refresh_rate(),
self.bit_depth()
)
}
|
identifier_body
|
monitor.rs
|
//! Types useful for interacting with a user's monitors.
//!
//! If you want to get basic information about a monitor, you can use the [`MonitorHandle`][monitor_handle]
//! type. This is retrieved from one of the following methods, which return an iterator of
//! [`MonitorHandle`][monitor_handle]:
//! - [`EventLoopWindowTarget::available_monitors`][loop_get]
//! - [`Window::available_monitors`][window_get].
//!
//! [monitor_handle]: crate::monitor::MonitorHandle
//! [loop_get]: crate::event_loop::EventLoopWindowTarget::available_monitors
//! [window_get]: crate::window::Window::available_monitors
use crate::{
dpi::{PhysicalPosition, PhysicalSize},
platform_impl,
};
/// Describes a fullscreen video mode of a monitor.
///
/// Can be acquired with:
/// - [`MonitorHandle::video_modes`][monitor_get].
///
/// [monitor_get]: crate::monitor::MonitorHandle::video_modes
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct VideoMode {
pub(crate) video_mode: platform_impl::VideoMode,
}
impl std::fmt::Debug for VideoMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.video_mode.fmt(f)
}
}
impl PartialOrd for VideoMode {
fn partial_cmp(&self, other: &VideoMode) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for VideoMode {
fn cmp(&self, other: &VideoMode) -> std::cmp::Ordering {
// TODO: we can impl `Ord` for `PhysicalSize` once we switch from `f32`
// to `u32` there
let size: (u32, u32) = self.size().into();
let other_size: (u32, u32) = other.size().into();
self.monitor().cmp(&other.monitor()).then(
size.cmp(&other_size)
.then(
self.refresh_rate()
.cmp(&other.refresh_rate())
.then(self.bit_depth().cmp(&other.bit_depth())),
)
.reverse(),
)
}
}
impl VideoMode {
/// Returns the resolution of this video mode.
#[inline]
pub fn size(&self) -> PhysicalSize<u32> {
self.video_mode.size()
}
/// Returns the bit depth of this video mode, as in how many bits you have
/// available per color. This is generally 24 bits or 32 bits on modern
/// systems, depending on whether the alpha channel is counted or not.
///
/// ## Platform-specific
///
/// - **Wayland:** Always returns 32.
/// - **iOS:** Always returns 32.
#[inline]
pub fn bit_depth(&self) -> u16 {
self.video_mode.bit_depth()
}
/// Returns the refresh rate of this video mode. **Note**: the returned
/// refresh rate is an integer approximation, and you shouldn't rely on this
/// value to be exact.
#[inline]
pub fn refresh_rate(&self) -> u16 {
self.video_mode.refresh_rate()
}
/// Returns the monitor that this video mode is valid for. Each monitor has
/// a separate set of valid video modes.
#[inline]
pub fn monitor(&self) -> MonitorHandle {
self.video_mode.monitor()
}
}
impl std::fmt::Display for VideoMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}x{} @ {} Hz ({} bpp)",
self.size().width,
self.size().height,
self.refresh_rate(),
self.bit_depth()
)
}
}
/// Handle to a monitor.
///
/// Allows you to retrieve information about a given monitor and can be used in [`Window`] creation.
///
/// [`Window`]: crate::window::Window
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct MonitorHandle {
pub(crate) inner: platform_impl::MonitorHandle,
}
impl MonitorHandle {
/// Returns a human-readable name of the monitor.
///
/// Returns `None` if the monitor doesn't exist anymore.
///
/// ## Platform-specific
///
/// - **Web:** Always returns None
#[inline]
pub fn name(&self) -> Option<String> {
self.inner.name()
}
/// Returns the monitor's resolution.
///
/// ## Platform-specific
///
/// - **Web:** Always returns (0,0)
#[inline]
pub fn size(&self) -> PhysicalSize<u32> {
self.inner.size()
}
/// Returns the top-left corner position of the monitor relative to the larger full
/// screen area.
///
/// ## Platform-specific
///
/// - **Web:** Always returns (0,0)
#[inline]
pub fn
|
(&self) -> PhysicalPosition<i32> {
self.inner.position()
}
/// Returns the scale factor that can be used to map logical pixels to physical pixels, and vice versa.
///
/// See the [`dpi`](crate::dpi) module for more information.
///
/// ## Platform-specific
///
/// - **X11:** Can be overridden using the `WINIT_X11_SCALE_FACTOR` environment variable.
/// - **Android:** Always returns 1.0.
/// - **Web:** Always returns 1.0
#[inline]
pub fn scale_factor(&self) -> f64 {
self.inner.scale_factor()
}
/// Returns all fullscreen video modes supported by this monitor.
///
/// ## Platform-specific
///
/// - **Web:** Always returns an empty iterator
#[inline]
pub fn video_modes(&self) -> impl Iterator<Item = VideoMode> {
self.inner.video_modes()
}
}
|
position
|
identifier_name
|
monitor.rs
|
//! Types useful for interacting with a user's monitors.
//!
//! If you want to get basic information about a monitor, you can use the [`MonitorHandle`][monitor_handle]
//! type. This is retrieved from one of the following methods, which return an iterator of
//! [`MonitorHandle`][monitor_handle]:
//! - [`EventLoopWindowTarget::available_monitors`][loop_get]
//! - [`Window::available_monitors`][window_get].
//!
//! [monitor_handle]: crate::monitor::MonitorHandle
//! [loop_get]: crate::event_loop::EventLoopWindowTarget::available_monitors
//! [window_get]: crate::window::Window::available_monitors
use crate::{
dpi::{PhysicalPosition, PhysicalSize},
platform_impl,
};
/// Describes a fullscreen video mode of a monitor.
///
/// Can be acquired with:
/// - [`MonitorHandle::video_modes`][monitor_get].
///
/// [monitor_get]: crate::monitor::MonitorHandle::video_modes
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct VideoMode {
pub(crate) video_mode: platform_impl::VideoMode,
}
impl std::fmt::Debug for VideoMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.video_mode.fmt(f)
}
}
impl PartialOrd for VideoMode {
fn partial_cmp(&self, other: &VideoMode) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for VideoMode {
fn cmp(&self, other: &VideoMode) -> std::cmp::Ordering {
// TODO: we can impl `Ord` for `PhysicalSize` once we switch from `f32`
// to `u32` there
let size: (u32, u32) = self.size().into();
let other_size: (u32, u32) = other.size().into();
self.monitor().cmp(&other.monitor()).then(
size.cmp(&other_size)
.then(
self.refresh_rate()
.cmp(&other.refresh_rate())
.then(self.bit_depth().cmp(&other.bit_depth())),
)
.reverse(),
)
}
}
impl VideoMode {
/// Returns the resolution of this video mode.
#[inline]
pub fn size(&self) -> PhysicalSize<u32> {
self.video_mode.size()
}
/// Returns the bit depth of this video mode, as in how many bits you have
/// available per color. This is generally 24 bits or 32 bits on modern
/// systems, depending on whether the alpha channel is counted or not.
///
/// ## Platform-specific
///
/// - **Wayland:** Always returns 32.
/// - **iOS:** Always returns 32.
#[inline]
pub fn bit_depth(&self) -> u16 {
self.video_mode.bit_depth()
}
/// Returns the refresh rate of this video mode. **Note**: the returned
/// refresh rate is an integer approximation, and you shouldn't rely on this
/// value to be exact.
#[inline]
pub fn refresh_rate(&self) -> u16 {
self.video_mode.refresh_rate()
}
/// Returns the monitor that this video mode is valid for. Each monitor has
/// a separate set of valid video modes.
#[inline]
pub fn monitor(&self) -> MonitorHandle {
self.video_mode.monitor()
}
}
impl std::fmt::Display for VideoMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}x{} @ {} Hz ({} bpp)",
self.size().width,
self.size().height,
self.refresh_rate(),
self.bit_depth()
)
}
}
/// Handle to a monitor.
///
/// Allows you to retrieve information about a given monitor and can be used in [`Window`] creation.
///
/// [`Window`]: crate::window::Window
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct MonitorHandle {
pub(crate) inner: platform_impl::MonitorHandle,
}
impl MonitorHandle {
/// Returns a human-readable name of the monitor.
///
/// Returns `None` if the monitor doesn't exist anymore.
///
|
pub fn name(&self) -> Option<String> {
self.inner.name()
}
/// Returns the monitor's resolution.
///
/// ## Platform-specific
///
/// - **Web:** Always returns (0,0)
#[inline]
pub fn size(&self) -> PhysicalSize<u32> {
self.inner.size()
}
/// Returns the top-left corner position of the monitor relative to the larger full
/// screen area.
///
/// ## Platform-specific
///
/// - **Web:** Always returns (0,0)
#[inline]
pub fn position(&self) -> PhysicalPosition<i32> {
self.inner.position()
}
/// Returns the scale factor that can be used to map logical pixels to physical pixels, and vice versa.
///
/// See the [`dpi`](crate::dpi) module for more information.
///
/// ## Platform-specific
///
/// - **X11:** Can be overridden using the `WINIT_X11_SCALE_FACTOR` environment variable.
/// - **Android:** Always returns 1.0.
/// - **Web:** Always returns 1.0
#[inline]
pub fn scale_factor(&self) -> f64 {
self.inner.scale_factor()
}
/// Returns all fullscreen video modes supported by this monitor.
///
/// ## Platform-specific
///
/// - **Web:** Always returns an empty iterator
#[inline]
pub fn video_modes(&self) -> impl Iterator<Item = VideoMode> {
self.inner.video_modes()
}
}
|
/// ## Platform-specific
///
/// - **Web:** Always returns None
#[inline]
|
random_line_split
|
mod.rs
|
//
// Copyright:: Copyright (c) 2016 Chef Software, Inc.
// License:: Apache License, Version 2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use errors::{DeliveryError, Kind};
use git;
use regex::Captures;
use regex::Regex;
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use utils::path_ext::is_file;
use utils::say::{say, sayln};
use utils::{self, read_file};
#[derive(Debug, Clone)]
pub struct MetadataVersion {
pub major: usize,
pub minor: usize,
pub patch: usize,
}
impl MetadataVersion {
pub fn new(ma: Option<usize>, mi: Option<usize>, pa: Option<usize>) -> MetadataVersion {
MetadataVersion {
major: ma.unwrap_or_default(),
minor: mi.unwrap_or_default(),
patch: pa.unwrap_or_default(),
}
}
pub fn to_string(&self) -> String {
[
self.major.to_string(),
".".to_string(),
self.minor.to_string(),
".".to_string(),
self.patch.to_string(),
].concat()
}
}
// Bump the metadata version, only if:
// * The project is a cookbook
// * The version hasn't been updated
//
// @param p_root [&PathBuf] The project root path
// @param pipeline [&str] Pipeline the change is targeting to
// @return () if success
pub fn bump_version(p_root: &PathBuf, pipeline: &str, project: &str) -> Result<(), DeliveryError> {
if is_cookbook(&p_root) {
say("white", "Project ");
say("yellow", &project);
sayln("white", " is a cookbook");
sayln("white", "Validating version in metadata");
let meta_f_p = PathBuf::from(metadata_file(&p_root));
let meta_f_c = try!(read_file(&meta_f_p));
let current_meta_v = try!(metadata_version_from(&meta_f_c));
let current_v = current_meta_v.to_string();
let mut t_file = pipeline.to_string();
t_file.push_str(":metadata.rb");
let pipeline_meta = try!(git::git_command(&["show", &t_file], &p_root));
let pipeline_meta_v = try!(metadata_version_from(&pipeline_meta.stdout));
let pipeline_v = pipeline_meta_v.to_string();
if current_v == pipeline_v {
say("yellow", "The version hasn't been updated (");
say("red", &pipeline_v);
sayln("yellow", ")");
let new_meta_version = try!(bump_patchset(pipeline_meta_v.clone()));
let new_version = new_meta_version.to_string();
say("white", "Bumping version to: ");
sayln("green", &new_version);
try!(save_version(&meta_f_p, new_version));
} else {
say("white", "Version already updated (");
say("magenta", &pipeline_v);
say("white", "/");
say("green", ¤t_v);
sayln("white", ")");
}
}
Ok(())
}
// @Private
// Return the path to the metadata.rb file
fn metadata_file(path: &PathBuf) -> String {
let mut metadata = path.to_str().unwrap().to_string();
metadata.push_str("/metadata.rb");
return metadata;
}
// Verify if the provided path is a cookbook, or not
fn is_cookbook(path: &PathBuf) -> bool {
let meta_f = metadata_file(path);
is_file(&Path::new(&meta_f))
}
// Extract the cookbook version from the provided metadata content
//
// This function expects you to read the metadata in advance and
// pass just the content of the file. From there it will read every
// line and search for the version to return it.
//
// There are two valid version formats:
// a) 'x.y.z' - The normal semantic version. (major.minor.patch)
// b) 'x.y' - Where the patchset will be 0 by default. (major.minor.0)
fn metadata_version_from(content: &str) -> Result<MetadataVersion, DeliveryError> {
for l in content.lines() {
let r_m_m_p =
Regex::new(r"version\s+'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'").unwrap();
if let Some(version) = r_m_m_p.captures(l) {
return generate_metadata_version(version);
}
let r_m_m = Regex::new(r"version\s+'(?P<major>\d+)\.(?P<minor>\d+)'").unwrap();
if let Some(version) = r_m_m.captures(l) {
return generate_metadata_version(version);
}
}
return Err(DeliveryError {
kind: Kind::MissingMetadataVersion,
detail: None,
});
}
fn generate_metadata_version(metadata: Captures) -> Result<MetadataVersion, DeliveryError> {
let mut ma = None;
let mut mi = None;
let mut pa = None;
if let Some(major) = metadata.name("major") {
ma = major.as_str().parse::<usize>().ok();
};
if let Some(minor) = metadata.name("minor") {
mi = minor.as_str().parse::<usize>().ok();
};
if let Some(patch) = metadata.name("patch") {
pa = patch.as_str().parse::<usize>().ok();
};
Ok(MetadataVersion::new(ma, mi, pa))
}
// Bump the patchset of the provided version
fn bump_patchset(mut version: MetadataVersion) -> Result<MetadataVersion, DeliveryError> {
version = MetadataVersion {
patch: version.patch + 1,
..version
};
Ok(version)
}
// Saves the new version to the metadata and commit the changes
fn save_version(metadata: &PathBuf, version: String) -> Result<(), DeliveryError> {
let current_meta = try!(read_file(metadata));
let current_meta_version = try!(metadata_version_from(¤t_meta));
let current_version = current_meta_version.to_string();
let new_metadata = current_meta.replace(&*current_version, &*version);
// Recreate the file and dump the processed contents to it
let mut recreate_meta = try!(File::create(metadata));
try!(recreate_meta.write(new_metadata.as_bytes()));
// Commit the changes made to the metadata
let mut commit_msg = String::from("Bump version to ");
commit_msg.push_str(&version);
try!(git::git_command(
&["add", metadata.to_str().unwrap()],
&utils::cwd()
));
try!(git::git_command(
|
}
#[cfg(test)]
mod tests {
use cookbook::*;
use std::path::PathBuf;
#[test]
fn test_metadata_version_constructor() {
let version_generator = MetadataVersion::new(None, Some(2), None);
let MetadataVersion {
major: ma,
minor: mi,
patch: pa,
} = version_generator;
assert_eq!(ma, 0);
assert_eq!(mi, 2);
assert_eq!(pa, 0);
assert_eq!("0.2.0", &version_generator.to_string());
}
#[test]
fn verify_version_bumping_using_bump_patchset() {
let version = MetadataVersion {
major: 1,
minor: 2,
patch: 3,
};
let MetadataVersion {
major: ma,
minor: mi,
patch: pa,
} = super::bump_patchset(version).unwrap();
assert_eq!(ma, 1);
assert_eq!(mi, 2);
assert_eq!(pa, 4);
}
#[test]
fn return_the_metadata_file_path() {
let project_path = PathBuf::from("/cookbook");
assert_eq!(
String::from("/cookbook/metadata.rb"),
super::metadata_file(&project_path)
);
}
#[test]
fn verify_happy_metadata_version_from_content() {
let happy_version = "1.2.3";
let happy_metadata_content = metadata_from_version(&happy_version);
let happy_metadata_version = super::metadata_version_from(&happy_metadata_content).unwrap();
assert_eq!(happy_version, &happy_metadata_version.to_string());
let valid_version = "1.2";
let valid_metadata_content = metadata_from_version(&valid_version);
let valid_metadata_version = super::metadata_version_from(&valid_metadata_content).unwrap();
assert_eq!("1.2.0", &valid_metadata_version.to_string());
let awesome_version = "123.123.123";
let awesome_metadata_content = metadata_from_version(&awesome_version);
let awesome_metadata_version =
super::metadata_version_from(&awesome_metadata_content).unwrap();
assert_eq!(awesome_version, &awesome_metadata_version.to_string());
}
#[test]
fn verify_sad_metadata_version_from_content() {
let sad_version = "1..";
let sad_metadata = metadata_from_version(&sad_version);
assert!(super::metadata_version_from(&sad_metadata).is_err());
let typo_version = "1.2.";
let typo_metadata = metadata_from_version(&typo_version);
assert!(super::metadata_version_from(&typo_metadata).is_err());
let no_version = "";
let no_metadata = metadata_from_version(&no_version);
assert!(super::metadata_version_from(&no_metadata).is_err());
}
// Quick helper to render a dummy metadata from a provided version
fn metadata_from_version(version: &str) -> String {
String::from(["version '", version, "'"].concat())
}
}
|
&["commit", "-m", &commit_msg],
&utils::cwd()
));
Ok(())
|
random_line_split
|
mod.rs
|
//
// Copyright:: Copyright (c) 2016 Chef Software, Inc.
// License:: Apache License, Version 2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use errors::{DeliveryError, Kind};
use git;
use regex::Captures;
use regex::Regex;
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use utils::path_ext::is_file;
use utils::say::{say, sayln};
use utils::{self, read_file};
#[derive(Debug, Clone)]
pub struct MetadataVersion {
pub major: usize,
pub minor: usize,
pub patch: usize,
}
impl MetadataVersion {
pub fn new(ma: Option<usize>, mi: Option<usize>, pa: Option<usize>) -> MetadataVersion {
MetadataVersion {
major: ma.unwrap_or_default(),
minor: mi.unwrap_or_default(),
patch: pa.unwrap_or_default(),
}
}
pub fn to_string(&self) -> String {
[
self.major.to_string(),
".".to_string(),
self.minor.to_string(),
".".to_string(),
self.patch.to_string(),
].concat()
}
}
// Bump the metadata version, only if:
// * The project is a cookbook
// * The version hasn't been updated
//
// @param p_root [&PathBuf] The project root path
// @param pipeline [&str] Pipeline the change is targeting to
// @return () if success
pub fn bump_version(p_root: &PathBuf, pipeline: &str, project: &str) -> Result<(), DeliveryError> {
if is_cookbook(&p_root) {
say("white", "Project ");
say("yellow", &project);
sayln("white", " is a cookbook");
sayln("white", "Validating version in metadata");
let meta_f_p = PathBuf::from(metadata_file(&p_root));
let meta_f_c = try!(read_file(&meta_f_p));
let current_meta_v = try!(metadata_version_from(&meta_f_c));
let current_v = current_meta_v.to_string();
let mut t_file = pipeline.to_string();
t_file.push_str(":metadata.rb");
let pipeline_meta = try!(git::git_command(&["show", &t_file], &p_root));
let pipeline_meta_v = try!(metadata_version_from(&pipeline_meta.stdout));
let pipeline_v = pipeline_meta_v.to_string();
if current_v == pipeline_v {
say("yellow", "The version hasn't been updated (");
say("red", &pipeline_v);
sayln("yellow", ")");
let new_meta_version = try!(bump_patchset(pipeline_meta_v.clone()));
let new_version = new_meta_version.to_string();
say("white", "Bumping version to: ");
sayln("green", &new_version);
try!(save_version(&meta_f_p, new_version));
} else {
say("white", "Version already updated (");
say("magenta", &pipeline_v);
say("white", "/");
say("green", ¤t_v);
sayln("white", ")");
}
}
Ok(())
}
// @Private
// Return the path to the metadata.rb file
fn metadata_file(path: &PathBuf) -> String {
let mut metadata = path.to_str().unwrap().to_string();
metadata.push_str("/metadata.rb");
return metadata;
}
// Verify if the provided path is a cookbook, or not
fn is_cookbook(path: &PathBuf) -> bool {
let meta_f = metadata_file(path);
is_file(&Path::new(&meta_f))
}
// Extract the cookbook version from the provided metadata content
//
// This function expects you to read the metadata in advance and
// pass just the content of the file. From there it will read every
// line and search for the version to return it.
//
// There are two valid version formats:
// a) 'x.y.z' - The normal semantic version. (major.minor.patch)
// b) 'x.y' - Where the patchset will be 0 by default. (major.minor.0)
fn metadata_version_from(content: &str) -> Result<MetadataVersion, DeliveryError> {
for l in content.lines() {
let r_m_m_p =
Regex::new(r"version\s+'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'").unwrap();
if let Some(version) = r_m_m_p.captures(l) {
return generate_metadata_version(version);
}
let r_m_m = Regex::new(r"version\s+'(?P<major>\d+)\.(?P<minor>\d+)'").unwrap();
if let Some(version) = r_m_m.captures(l) {
return generate_metadata_version(version);
}
}
return Err(DeliveryError {
kind: Kind::MissingMetadataVersion,
detail: None,
});
}
fn generate_metadata_version(metadata: Captures) -> Result<MetadataVersion, DeliveryError> {
let mut ma = None;
let mut mi = None;
let mut pa = None;
if let Some(major) = metadata.name("major") {
ma = major.as_str().parse::<usize>().ok();
};
if let Some(minor) = metadata.name("minor") {
mi = minor.as_str().parse::<usize>().ok();
};
if let Some(patch) = metadata.name("patch") {
pa = patch.as_str().parse::<usize>().ok();
};
Ok(MetadataVersion::new(ma, mi, pa))
}
// Bump the patchset of the provided version
fn bump_patchset(mut version: MetadataVersion) -> Result<MetadataVersion, DeliveryError> {
version = MetadataVersion {
patch: version.patch + 1,
..version
};
Ok(version)
}
// Saves the new version to the metadata and commit the changes
fn save_version(metadata: &PathBuf, version: String) -> Result<(), DeliveryError> {
let current_meta = try!(read_file(metadata));
let current_meta_version = try!(metadata_version_from(¤t_meta));
let current_version = current_meta_version.to_string();
let new_metadata = current_meta.replace(&*current_version, &*version);
// Recreate the file and dump the processed contents to it
let mut recreate_meta = try!(File::create(metadata));
try!(recreate_meta.write(new_metadata.as_bytes()));
// Commit the changes made to the metadata
let mut commit_msg = String::from("Bump version to ");
commit_msg.push_str(&version);
try!(git::git_command(
&["add", metadata.to_str().unwrap()],
&utils::cwd()
));
try!(git::git_command(
&["commit", "-m", &commit_msg],
&utils::cwd()
));
Ok(())
}
#[cfg(test)]
mod tests {
use cookbook::*;
use std::path::PathBuf;
#[test]
fn test_metadata_version_constructor()
|
#[test]
fn verify_version_bumping_using_bump_patchset() {
let version = MetadataVersion {
major: 1,
minor: 2,
patch: 3,
};
let MetadataVersion {
major: ma,
minor: mi,
patch: pa,
} = super::bump_patchset(version).unwrap();
assert_eq!(ma, 1);
assert_eq!(mi, 2);
assert_eq!(pa, 4);
}
#[test]
fn return_the_metadata_file_path() {
let project_path = PathBuf::from("/cookbook");
assert_eq!(
String::from("/cookbook/metadata.rb"),
super::metadata_file(&project_path)
);
}
#[test]
fn verify_happy_metadata_version_from_content() {
let happy_version = "1.2.3";
let happy_metadata_content = metadata_from_version(&happy_version);
let happy_metadata_version = super::metadata_version_from(&happy_metadata_content).unwrap();
assert_eq!(happy_version, &happy_metadata_version.to_string());
let valid_version = "1.2";
let valid_metadata_content = metadata_from_version(&valid_version);
let valid_metadata_version = super::metadata_version_from(&valid_metadata_content).unwrap();
assert_eq!("1.2.0", &valid_metadata_version.to_string());
let awesome_version = "123.123.123";
let awesome_metadata_content = metadata_from_version(&awesome_version);
let awesome_metadata_version =
super::metadata_version_from(&awesome_metadata_content).unwrap();
assert_eq!(awesome_version, &awesome_metadata_version.to_string());
}
#[test]
fn verify_sad_metadata_version_from_content() {
let sad_version = "1..";
let sad_metadata = metadata_from_version(&sad_version);
assert!(super::metadata_version_from(&sad_metadata).is_err());
let typo_version = "1.2.";
let typo_metadata = metadata_from_version(&typo_version);
assert!(super::metadata_version_from(&typo_metadata).is_err());
let no_version = "";
let no_metadata = metadata_from_version(&no_version);
assert!(super::metadata_version_from(&no_metadata).is_err());
}
// Quick helper to render a dummy metadata from a provided version
fn metadata_from_version(version: &str) -> String {
String::from(["version '", version, "'"].concat())
}
}
|
{
let version_generator = MetadataVersion::new(None, Some(2), None);
let MetadataVersion {
major: ma,
minor: mi,
patch: pa,
} = version_generator;
assert_eq!(ma, 0);
assert_eq!(mi, 2);
assert_eq!(pa, 0);
assert_eq!("0.2.0", &version_generator.to_string());
}
|
identifier_body
|
mod.rs
|
//
// Copyright:: Copyright (c) 2016 Chef Software, Inc.
// License:: Apache License, Version 2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use errors::{DeliveryError, Kind};
use git;
use regex::Captures;
use regex::Regex;
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use utils::path_ext::is_file;
use utils::say::{say, sayln};
use utils::{self, read_file};
#[derive(Debug, Clone)]
pub struct MetadataVersion {
pub major: usize,
pub minor: usize,
pub patch: usize,
}
impl MetadataVersion {
pub fn new(ma: Option<usize>, mi: Option<usize>, pa: Option<usize>) -> MetadataVersion {
MetadataVersion {
major: ma.unwrap_or_default(),
minor: mi.unwrap_or_default(),
patch: pa.unwrap_or_default(),
}
}
pub fn to_string(&self) -> String {
[
self.major.to_string(),
".".to_string(),
self.minor.to_string(),
".".to_string(),
self.patch.to_string(),
].concat()
}
}
// Bump the metadata version, only if:
// * The project is a cookbook
// * The version hasn't been updated
//
// @param p_root [&PathBuf] The project root path
// @param pipeline [&str] Pipeline the change is targeting to
// @return () if success
pub fn bump_version(p_root: &PathBuf, pipeline: &str, project: &str) -> Result<(), DeliveryError> {
if is_cookbook(&p_root) {
say("white", "Project ");
say("yellow", &project);
sayln("white", " is a cookbook");
sayln("white", "Validating version in metadata");
let meta_f_p = PathBuf::from(metadata_file(&p_root));
let meta_f_c = try!(read_file(&meta_f_p));
let current_meta_v = try!(metadata_version_from(&meta_f_c));
let current_v = current_meta_v.to_string();
let mut t_file = pipeline.to_string();
t_file.push_str(":metadata.rb");
let pipeline_meta = try!(git::git_command(&["show", &t_file], &p_root));
let pipeline_meta_v = try!(metadata_version_from(&pipeline_meta.stdout));
let pipeline_v = pipeline_meta_v.to_string();
if current_v == pipeline_v {
say("yellow", "The version hasn't been updated (");
say("red", &pipeline_v);
sayln("yellow", ")");
let new_meta_version = try!(bump_patchset(pipeline_meta_v.clone()));
let new_version = new_meta_version.to_string();
say("white", "Bumping version to: ");
sayln("green", &new_version);
try!(save_version(&meta_f_p, new_version));
} else {
say("white", "Version already updated (");
say("magenta", &pipeline_v);
say("white", "/");
say("green", ¤t_v);
sayln("white", ")");
}
}
Ok(())
}
// @Private
// Return the path to the metadata.rb file
fn metadata_file(path: &PathBuf) -> String {
let mut metadata = path.to_str().unwrap().to_string();
metadata.push_str("/metadata.rb");
return metadata;
}
// Verify if the provided path is a cookbook, or not
fn is_cookbook(path: &PathBuf) -> bool {
let meta_f = metadata_file(path);
is_file(&Path::new(&meta_f))
}
// Extract the cookbook version from the provided metadata content
//
// This function expects you to read the metadata in advance and
// pass just the content of the file. From there it will read every
// line and search for the version to return it.
//
// There are two valid version formats:
// a) 'x.y.z' - The normal semantic version. (major.minor.patch)
// b) 'x.y' - Where the patchset will be 0 by default. (major.minor.0)
fn metadata_version_from(content: &str) -> Result<MetadataVersion, DeliveryError> {
for l in content.lines() {
let r_m_m_p =
Regex::new(r"version\s+'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'").unwrap();
if let Some(version) = r_m_m_p.captures(l) {
return generate_metadata_version(version);
}
let r_m_m = Regex::new(r"version\s+'(?P<major>\d+)\.(?P<minor>\d+)'").unwrap();
if let Some(version) = r_m_m.captures(l) {
return generate_metadata_version(version);
}
}
return Err(DeliveryError {
kind: Kind::MissingMetadataVersion,
detail: None,
});
}
fn generate_metadata_version(metadata: Captures) -> Result<MetadataVersion, DeliveryError> {
let mut ma = None;
let mut mi = None;
let mut pa = None;
if let Some(major) = metadata.name("major") {
ma = major.as_str().parse::<usize>().ok();
};
if let Some(minor) = metadata.name("minor") {
mi = minor.as_str().parse::<usize>().ok();
};
if let Some(patch) = metadata.name("patch") {
pa = patch.as_str().parse::<usize>().ok();
};
Ok(MetadataVersion::new(ma, mi, pa))
}
// Bump the patchset of the provided version
fn
|
(mut version: MetadataVersion) -> Result<MetadataVersion, DeliveryError> {
version = MetadataVersion {
patch: version.patch + 1,
..version
};
Ok(version)
}
// Saves the new version to the metadata and commit the changes
fn save_version(metadata: &PathBuf, version: String) -> Result<(), DeliveryError> {
let current_meta = try!(read_file(metadata));
let current_meta_version = try!(metadata_version_from(¤t_meta));
let current_version = current_meta_version.to_string();
let new_metadata = current_meta.replace(&*current_version, &*version);
// Recreate the file and dump the processed contents to it
let mut recreate_meta = try!(File::create(metadata));
try!(recreate_meta.write(new_metadata.as_bytes()));
// Commit the changes made to the metadata
let mut commit_msg = String::from("Bump version to ");
commit_msg.push_str(&version);
try!(git::git_command(
&["add", metadata.to_str().unwrap()],
&utils::cwd()
));
try!(git::git_command(
&["commit", "-m", &commit_msg],
&utils::cwd()
));
Ok(())
}
#[cfg(test)]
mod tests {
use cookbook::*;
use std::path::PathBuf;
#[test]
fn test_metadata_version_constructor() {
let version_generator = MetadataVersion::new(None, Some(2), None);
let MetadataVersion {
major: ma,
minor: mi,
patch: pa,
} = version_generator;
assert_eq!(ma, 0);
assert_eq!(mi, 2);
assert_eq!(pa, 0);
assert_eq!("0.2.0", &version_generator.to_string());
}
#[test]
fn verify_version_bumping_using_bump_patchset() {
let version = MetadataVersion {
major: 1,
minor: 2,
patch: 3,
};
let MetadataVersion {
major: ma,
minor: mi,
patch: pa,
} = super::bump_patchset(version).unwrap();
assert_eq!(ma, 1);
assert_eq!(mi, 2);
assert_eq!(pa, 4);
}
#[test]
fn return_the_metadata_file_path() {
let project_path = PathBuf::from("/cookbook");
assert_eq!(
String::from("/cookbook/metadata.rb"),
super::metadata_file(&project_path)
);
}
#[test]
fn verify_happy_metadata_version_from_content() {
let happy_version = "1.2.3";
let happy_metadata_content = metadata_from_version(&happy_version);
let happy_metadata_version = super::metadata_version_from(&happy_metadata_content).unwrap();
assert_eq!(happy_version, &happy_metadata_version.to_string());
let valid_version = "1.2";
let valid_metadata_content = metadata_from_version(&valid_version);
let valid_metadata_version = super::metadata_version_from(&valid_metadata_content).unwrap();
assert_eq!("1.2.0", &valid_metadata_version.to_string());
let awesome_version = "123.123.123";
let awesome_metadata_content = metadata_from_version(&awesome_version);
let awesome_metadata_version =
super::metadata_version_from(&awesome_metadata_content).unwrap();
assert_eq!(awesome_version, &awesome_metadata_version.to_string());
}
#[test]
fn verify_sad_metadata_version_from_content() {
let sad_version = "1..";
let sad_metadata = metadata_from_version(&sad_version);
assert!(super::metadata_version_from(&sad_metadata).is_err());
let typo_version = "1.2.";
let typo_metadata = metadata_from_version(&typo_version);
assert!(super::metadata_version_from(&typo_metadata).is_err());
let no_version = "";
let no_metadata = metadata_from_version(&no_version);
assert!(super::metadata_version_from(&no_metadata).is_err());
}
// Quick helper to render a dummy metadata from a provided version
fn metadata_from_version(version: &str) -> String {
String::from(["version '", version, "'"].concat())
}
}
|
bump_patchset
|
identifier_name
|
mod.rs
|
//
// Copyright:: Copyright (c) 2016 Chef Software, Inc.
// License:: Apache License, Version 2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use errors::{DeliveryError, Kind};
use git;
use regex::Captures;
use regex::Regex;
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use utils::path_ext::is_file;
use utils::say::{say, sayln};
use utils::{self, read_file};
#[derive(Debug, Clone)]
pub struct MetadataVersion {
pub major: usize,
pub minor: usize,
pub patch: usize,
}
impl MetadataVersion {
pub fn new(ma: Option<usize>, mi: Option<usize>, pa: Option<usize>) -> MetadataVersion {
MetadataVersion {
major: ma.unwrap_or_default(),
minor: mi.unwrap_or_default(),
patch: pa.unwrap_or_default(),
}
}
pub fn to_string(&self) -> String {
[
self.major.to_string(),
".".to_string(),
self.minor.to_string(),
".".to_string(),
self.patch.to_string(),
].concat()
}
}
// Bump the metadata version, only if:
// * The project is a cookbook
// * The version hasn't been updated
//
// @param p_root [&PathBuf] The project root path
// @param pipeline [&str] Pipeline the change is targeting to
// @return () if success
pub fn bump_version(p_root: &PathBuf, pipeline: &str, project: &str) -> Result<(), DeliveryError> {
if is_cookbook(&p_root) {
say("white", "Project ");
say("yellow", &project);
sayln("white", " is a cookbook");
sayln("white", "Validating version in metadata");
let meta_f_p = PathBuf::from(metadata_file(&p_root));
let meta_f_c = try!(read_file(&meta_f_p));
let current_meta_v = try!(metadata_version_from(&meta_f_c));
let current_v = current_meta_v.to_string();
let mut t_file = pipeline.to_string();
t_file.push_str(":metadata.rb");
let pipeline_meta = try!(git::git_command(&["show", &t_file], &p_root));
let pipeline_meta_v = try!(metadata_version_from(&pipeline_meta.stdout));
let pipeline_v = pipeline_meta_v.to_string();
if current_v == pipeline_v {
say("yellow", "The version hasn't been updated (");
say("red", &pipeline_v);
sayln("yellow", ")");
let new_meta_version = try!(bump_patchset(pipeline_meta_v.clone()));
let new_version = new_meta_version.to_string();
say("white", "Bumping version to: ");
sayln("green", &new_version);
try!(save_version(&meta_f_p, new_version));
} else {
say("white", "Version already updated (");
say("magenta", &pipeline_v);
say("white", "/");
say("green", ¤t_v);
sayln("white", ")");
}
}
Ok(())
}
// @Private
// Return the path to the metadata.rb file
fn metadata_file(path: &PathBuf) -> String {
let mut metadata = path.to_str().unwrap().to_string();
metadata.push_str("/metadata.rb");
return metadata;
}
// Verify if the provided path is a cookbook, or not
fn is_cookbook(path: &PathBuf) -> bool {
let meta_f = metadata_file(path);
is_file(&Path::new(&meta_f))
}
// Extract the cookbook version from the provided metadata content
//
// This function expects you to read the metadata in advance and
// pass just the content of the file. From there it will read every
// line and search for the version to return it.
//
// There are two valid version formats:
// a) 'x.y.z' - The normal semantic version. (major.minor.patch)
// b) 'x.y' - Where the patchset will be 0 by default. (major.minor.0)
fn metadata_version_from(content: &str) -> Result<MetadataVersion, DeliveryError> {
for l in content.lines() {
let r_m_m_p =
Regex::new(r"version\s+'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'").unwrap();
if let Some(version) = r_m_m_p.captures(l) {
return generate_metadata_version(version);
}
let r_m_m = Regex::new(r"version\s+'(?P<major>\d+)\.(?P<minor>\d+)'").unwrap();
if let Some(version) = r_m_m.captures(l) {
return generate_metadata_version(version);
}
}
return Err(DeliveryError {
kind: Kind::MissingMetadataVersion,
detail: None,
});
}
fn generate_metadata_version(metadata: Captures) -> Result<MetadataVersion, DeliveryError> {
let mut ma = None;
let mut mi = None;
let mut pa = None;
if let Some(major) = metadata.name("major")
|
;
if let Some(minor) = metadata.name("minor") {
mi = minor.as_str().parse::<usize>().ok();
};
if let Some(patch) = metadata.name("patch") {
pa = patch.as_str().parse::<usize>().ok();
};
Ok(MetadataVersion::new(ma, mi, pa))
}
// Bump the patchset of the provided version
fn bump_patchset(mut version: MetadataVersion) -> Result<MetadataVersion, DeliveryError> {
version = MetadataVersion {
patch: version.patch + 1,
..version
};
Ok(version)
}
// Saves the new version to the metadata and commit the changes
fn save_version(metadata: &PathBuf, version: String) -> Result<(), DeliveryError> {
let current_meta = try!(read_file(metadata));
let current_meta_version = try!(metadata_version_from(¤t_meta));
let current_version = current_meta_version.to_string();
let new_metadata = current_meta.replace(&*current_version, &*version);
// Recreate the file and dump the processed contents to it
let mut recreate_meta = try!(File::create(metadata));
try!(recreate_meta.write(new_metadata.as_bytes()));
// Commit the changes made to the metadata
let mut commit_msg = String::from("Bump version to ");
commit_msg.push_str(&version);
try!(git::git_command(
&["add", metadata.to_str().unwrap()],
&utils::cwd()
));
try!(git::git_command(
&["commit", "-m", &commit_msg],
&utils::cwd()
));
Ok(())
}
#[cfg(test)]
mod tests {
use cookbook::*;
use std::path::PathBuf;
#[test]
fn test_metadata_version_constructor() {
let version_generator = MetadataVersion::new(None, Some(2), None);
let MetadataVersion {
major: ma,
minor: mi,
patch: pa,
} = version_generator;
assert_eq!(ma, 0);
assert_eq!(mi, 2);
assert_eq!(pa, 0);
assert_eq!("0.2.0", &version_generator.to_string());
}
#[test]
fn verify_version_bumping_using_bump_patchset() {
let version = MetadataVersion {
major: 1,
minor: 2,
patch: 3,
};
let MetadataVersion {
major: ma,
minor: mi,
patch: pa,
} = super::bump_patchset(version).unwrap();
assert_eq!(ma, 1);
assert_eq!(mi, 2);
assert_eq!(pa, 4);
}
#[test]
fn return_the_metadata_file_path() {
let project_path = PathBuf::from("/cookbook");
assert_eq!(
String::from("/cookbook/metadata.rb"),
super::metadata_file(&project_path)
);
}
#[test]
fn verify_happy_metadata_version_from_content() {
let happy_version = "1.2.3";
let happy_metadata_content = metadata_from_version(&happy_version);
let happy_metadata_version = super::metadata_version_from(&happy_metadata_content).unwrap();
assert_eq!(happy_version, &happy_metadata_version.to_string());
let valid_version = "1.2";
let valid_metadata_content = metadata_from_version(&valid_version);
let valid_metadata_version = super::metadata_version_from(&valid_metadata_content).unwrap();
assert_eq!("1.2.0", &valid_metadata_version.to_string());
let awesome_version = "123.123.123";
let awesome_metadata_content = metadata_from_version(&awesome_version);
let awesome_metadata_version =
super::metadata_version_from(&awesome_metadata_content).unwrap();
assert_eq!(awesome_version, &awesome_metadata_version.to_string());
}
#[test]
fn verify_sad_metadata_version_from_content() {
let sad_version = "1..";
let sad_metadata = metadata_from_version(&sad_version);
assert!(super::metadata_version_from(&sad_metadata).is_err());
let typo_version = "1.2.";
let typo_metadata = metadata_from_version(&typo_version);
assert!(super::metadata_version_from(&typo_metadata).is_err());
let no_version = "";
let no_metadata = metadata_from_version(&no_version);
assert!(super::metadata_version_from(&no_metadata).is_err());
}
// Quick helper to render a dummy metadata from a provided version
fn metadata_from_version(version: &str) -> String {
String::from(["version '", version, "'"].concat())
}
}
|
{
ma = major.as_str().parse::<usize>().ok();
}
|
conditional_block
|
lib.rs
|
//! Differential dataflow is a high-throughput, low-latency data-parallel programming framework.
//!
//! Differential dataflow programs are written in a collection-oriented style, where you transform
//! collections of records using traditional operations like `map`, `filter`, `join`, and `group_by`.
//! Differential dataflow also includes the less traditional operation `iterate`, which allows you
|
//!
//! Once you have defined a differential dataflow computation, you may then add records to or remove
//! records from its inputs; the system will automatically update the computation's outputs with the
//! appropriate corresponding additions and removals, and report these changes to you.
//!
//! Differential dataflow is built on the [timely dataflow](https://github.com/frankmcsherry/timely-dataflow)
//! framework for data-parallel programming which automatically parallelizes across multiple threads,
//! processes, and computers. Furthermore, because it uses timely dataflow's primitives, it seamlessly
//! inter-operates with other timely dataflow computations.
//!
//! Differential dataflow is still very much a work in progress, with features and ergonomics still
//! wildly in development. It is generally improving, though.
//!
//! # Examples
//!
//! This fragment creates a collection of pairs of integers, imagined as graph edges, and then counts
//! first the number of times the source coordinate occurs, and then the number of times each count
//! occurs, giving us a sense for the distribution of degrees in the graph.
//!
//! ```ignore
//! // create a degree counting differential dataflow
//! let (mut input, probe) = worker.dataflow(|scope| {
//!
//! // create edge input, count a few ways.
//! let (input, edges) = scope.new_collection();
//!
//! // extract the source field, and then count.
//! let degrs = edges.map(|(src, _dst)| src)
//! .count();
//!
//! // extract the count field, and then count them.
//! let distr = degrs.map(|(_src, cnt)| cnt)
//! .count();
//!
//! // report the changes to the count collection, notice when done.
//! let probe = distr.inspect(|x| println!("observed: {:?}", x))
//! .probe();
//!
//! (input, probe)
//! });
//! ```
//!
//! Now assembled, we can drive the computation like a timely dataflow computation, by pushing update
//! records (triples of data, time, and change in count) at the `input` stream handle. The `probe` is
//! how timely dataflow tells us that we have seen all corresponding output updates (in case there are
//! none).
//!
//! ```ignore
//! loop {
//! let time = input.epoch();
//! for round in time.. time + 100 {
//! input.advance_to(round);
//! input.insert((round % 13, round % 7));
//! }
//!
//! input.flush();
//! while probe.less_than(input.time()) {
//! worker.step();
//! }
//! }
//! ```
//!
//! This example should print out the 100 changes in the output, in this case each reflecting the increase
//! of some node degree by one (typically four output changes, corresponding to the addition and deletion
//! of the new and old counts of the old and new degrees of the affected node).
#![forbid(missing_docs)]
use std::fmt::Debug;
pub use collection::{Collection, AsCollection};
pub use hashable::Hashable;
pub use difference::Abelian as Diff;
/// Data type usable in differential dataflow.
///
/// Most differential dataflow operators require the ability to cancel corresponding updates, and the
/// way that they do this is by putting the data in a canonical form. The `Ord` trait allows us to sort
/// the data, at which point we can consolidate updates for equivalent records.
pub trait Data : timely::Data + Ord + Debug { }
impl<T: timely::Data + Ord + Debug> Data for T { }
/// Data types exchangeable in differential dataflow.
pub trait ExchangeData : timely::ExchangeData + Ord + Debug { }
impl<T: timely::ExchangeData + Ord + Debug> ExchangeData for T { }
extern crate fnv;
extern crate timely;
extern crate timely_sort;
#[macro_use]
extern crate abomonation_derive;
extern crate abomonation;
#[macro_use]
extern crate serde_derive;
extern crate serde;
pub mod hashable;
pub mod operators;
pub mod algorithms;
pub mod lattice;
pub mod trace;
pub mod input;
pub mod difference;
pub mod collection;
pub mod logging;
pub mod consolidation;
|
//! to repeatedly apply differential dataflow transformations to collections.
|
random_line_split
|
mod.rs
|
//! Kafka consumers.
use std::ptr;
use std::sync::Arc;
use std::time::Duration;
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::client::{Client, ClientContext, NativeClient};
use crate::error::KafkaResult;
use crate::groups::GroupList;
use crate::log::{error, trace};
use crate::message::BorrowedMessage;
use crate::metadata::Metadata;
use crate::topic_partition_list::{Offset, TopicPartitionList};
use crate::util::{cstr_to_owned, KafkaDrop, NativePtr, Timeout};
pub mod base_consumer;
pub mod stream_consumer;
// Re-exports.
#[doc(inline)]
pub use self::base_consumer::BaseConsumer;
#[doc(inline)]
pub use self::stream_consumer::{MessageStream, StreamConsumer};
/// Rebalance information.
#[derive(Clone, Debug)]
pub enum Rebalance<'a> {
/// A new partition assignment is received.
Assign(&'a TopicPartitionList),
/// A new partition revocation is received.
Revoke(&'a TopicPartitionList),
/// Unexpected error from Kafka.
Error(String),
}
/// Consumer-specific context.
///
/// This user-defined object can be used to provide custom callbacks for
/// consumer events. Refer to the list of methods to check which callbacks can
/// be specified.
///
/// See also the [`ClientContext`] trait.
pub trait ConsumerContext: ClientContext {
/// Implements the default rebalancing strategy and calls the
/// [`pre_rebalance`](ConsumerContext::pre_rebalance) and
/// [`post_rebalance`](ConsumerContext::post_rebalance) methods. If this
/// method is overridden, it will be responsibility of the user to call them
/// if needed.
fn rebalance(
&self,
native_client: &NativeClient,
err: RDKafkaRespErr,
tpl: &mut TopicPartitionList,
) {
let rebalance = match err {
RDKafkaRespErr::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => Rebalance::Assign(tpl),
RDKafkaRespErr::RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS => Rebalance::Revoke(tpl),
_ =>
|
};
trace!("Running pre-rebalance with {:?}", rebalance);
self.pre_rebalance(&rebalance);
trace!("Running rebalance with {:?}", rebalance);
// Execute rebalance
unsafe {
match err {
RDKafkaRespErr::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => {
match native_client.rebalance_protocol() {
RebalanceProtocol::Cooperative => {
rdsys::rd_kafka_incremental_assign(native_client.ptr(), tpl.ptr());
}
_ => {
rdsys::rd_kafka_assign(native_client.ptr(), tpl.ptr());
}
}
}
_ => match native_client.rebalance_protocol() {
RebalanceProtocol::Cooperative => {
rdsys::rd_kafka_incremental_unassign(native_client.ptr(), tpl.ptr());
}
_ => {
rdsys::rd_kafka_assign(native_client.ptr(), ptr::null());
}
},
}
}
trace!("Running post-rebalance with {:?}", rebalance);
self.post_rebalance(&rebalance);
}
/// Pre-rebalance callback. This method will run before the rebalance and
/// should terminate its execution quickly.
#[allow(unused_variables)]
fn pre_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {}
/// Post-rebalance callback. This method will run after the rebalance and
/// should terminate its execution quickly.
#[allow(unused_variables)]
fn post_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {}
// TODO: convert pointer to structure
/// Post commit callback. This method will run after a group of offsets was
/// committed to the offset store.
#[allow(unused_variables)]
fn commit_callback(&self, result: KafkaResult<()>, offsets: &TopicPartitionList) {}
/// Returns the minimum interval at which to poll the main queue, which
/// services the logging, stats, and error callbacks.
///
/// The main queue is polled once whenever [`BaseConsumer::poll`] is called.
/// If `poll` is called with a timeout that is larger than this interval,
/// then the main queue will be polled at that interval while the consumer
/// queue is blocked.
///
/// For example, if the main queue's minimum poll interval is 200ms and
/// `poll` is called with a timeout of 1s, then `poll` may block for up to
/// 1s waiting for a message, but it will poll the main queue every 200ms
/// while it is waiting.
///
/// By default, the minimum poll interval for the main queue is 1s.
fn main_queue_min_poll_interval(&self) -> Timeout {
Timeout::After(Duration::from_secs(1))
}
}
/// An inert [`ConsumerContext`] that can be used when no customizations are
/// needed.
#[derive(Clone, Debug, Default)]
pub struct DefaultConsumerContext;
impl ClientContext for DefaultConsumerContext {}
impl ConsumerContext for DefaultConsumerContext {}
/// Specifies whether a commit should be performed synchronously or asynchronously.
///
/// Regardless of the `CommitMode`, the commit APIs enqueue the commit request in
/// a local work queue. A separate worker thread picks up this commit request and forwards
/// it to the Kafka broker over the network. [1]
///
/// The difference between CommitMode::Sync and CommitMode::Async is in whether the caller
/// waits for the Kafka broker to signal that it finished handling the commit request. [2]
///
/// Note that the commit APIs are not async in the Rust sense due to the lack of a
/// callback-based interface exposed by librdkafka. [3]
///
/// [1]: https://github.com/edenhill/librdkafka/blob/e3d9515e396615b57674a93b39be2ca60355f4f4/src/rdkafka_cgrp.c#L3161
/// [2]: https://github.com/edenhill/librdkafka/blob/f092c290995ca81b3afb4015fcc3350ba02caa96/src/rdkafka_offset.c#L387
/// [3]: https://github.com/edenhill/librdkafka/issues/3212
/// [4]: https://github.com/edenhill/librdkafka/blob/e3d9515e396615b57674a93b39be2ca60355f4f4/src/rdkafka_cgrp.c#L2846
#[derive(Clone, Copy, Debug)]
pub enum CommitMode {
/// In `Sync` mode, the caller blocks until the Kafka broker finishes processing
/// the commit request. [4]
Sync = 0,
/// In `Async` mode, the caller enqueues the commit request in a local
/// work queue and returns immediately. [2]
Async = 1,
}
/// Consumer group metadata.
///
/// For use with [`Producer::send_offsets_to_transaction`].
///
/// [`Producer::send_offsets_to_transaction`]: crate::producer::Producer::send_offsets_to_transaction
pub struct ConsumerGroupMetadata(NativePtr<RDKafkaConsumerGroupMetadata>);
impl ConsumerGroupMetadata {
pub(crate) fn ptr(&self) -> *const RDKafkaConsumerGroupMetadata {
self.0.ptr()
}
}
unsafe impl KafkaDrop for RDKafkaConsumerGroupMetadata {
const TYPE: &'static str = "consumer_group_metadata";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_consumer_group_metadata_destroy;
}
unsafe impl Send for ConsumerGroupMetadata {}
unsafe impl Sync for ConsumerGroupMetadata {}
/// The rebalance protocol for a consumer.
pub enum RebalanceProtocol {
/// The consumer has not (yet) joined a group.
None,
/// Eager rebalance protocol.
Eager,
/// Cooperative rebalance protocol.
Cooperative,
}
/// Common trait for all consumers.
///
/// # Note about object safety
///
/// Doing type erasure on consumers is expected to be rare (eg. `Box<dyn
/// Consumer>`). Therefore, the API is optimised for the case where a concrete
/// type is available. As a result, some methods are not available on trait
/// objects, since they are generic.
pub trait Consumer<C = DefaultConsumerContext>
where
C: ConsumerContext,
{
/// Returns the [`Client`] underlying this consumer.
fn client(&self) -> &Client<C>;
/// Returns a reference to the [`ConsumerContext`] used to create this
/// consumer.
fn context(&self) -> &Arc<C> {
self.client().context()
}
/// Returns the current consumer group metadata associated with the
/// consumer.
///
/// If the consumer was not configured with a `group.id`, returns `None`.
/// For use with [`Producer::send_offsets_to_transaction`].
///
/// [`Producer::send_offsets_to_transaction`]: crate::producer::Producer::send_offsets_to_transaction
fn group_metadata(&self) -> Option<ConsumerGroupMetadata>;
/// Subscribes the consumer to a list of topics.
fn subscribe(&self, topics: &[&str]) -> KafkaResult<()>;
/// Unsubscribes the current subscription list.
fn unsubscribe(&self);
/// Manually assigns topics and partitions to the consumer. If used,
/// automatic consumer rebalance won't be activated.
fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()>;
/// Seeks to `offset` for the specified `topic` and `partition`. After a
/// successful call to `seek`, the next poll of the consumer will return the
/// message with `offset`.
fn seek<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T,
) -> KafkaResult<()>;
/// Commits the offset of the specified message. The commit can be sync
/// (blocking), or async. Notice that when a specific offset is committed,
/// all the previous offsets are considered committed as well. Use this
/// method only if you are processing messages in order.
fn commit(
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode,
) -> KafkaResult<()>;
/// Commits the current consumer state. Notice that if the consumer fails
/// after a message has been received, but before the message has been
/// processed by the user code, this might lead to data loss. Check the
/// "at-least-once delivery" section in the readme for more information.
fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()>;
/// Commit the provided message. Note that this will also automatically
/// commit every message with lower offset within the same partition.
fn commit_message(&self, message: &BorrowedMessage<'_>, mode: CommitMode) -> KafkaResult<()>;
/// Stores offset to be used on the next (auto)commit. When
/// using this `enable.auto.offset.store` should be set to `false` in the
/// config.
fn store_offset(&self, topic: &str, partition: i32, offset: i64) -> KafkaResult<()>;
/// Like [`Consumer::store_offset`], but the offset to store is derived from
/// the provided message.
fn store_offset_from_message(&self, message: &BorrowedMessage<'_>) -> KafkaResult<()>;
/// Store offsets to be used on the next (auto)commit. When using this
/// `enable.auto.offset.store` should be set to `false` in the config.
fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()>;
/// Returns the current topic subscription.
fn subscription(&self) -> KafkaResult<TopicPartitionList>;
/// Returns the current partition assignment.
fn assignment(&self) -> KafkaResult<TopicPartitionList>;
/// Retrieves the committed offsets for topics and partitions.
fn committed<T>(&self, timeout: T) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Retrieves the committed offsets for specified topics and partitions.
fn committed_offsets<T>(
&self,
tpl: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>;
/// Looks up the offsets for this consumer's partitions by timestamp.
fn offsets_for_timestamp<T>(
&self,
timestamp: i64,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Looks up the offsets for the specified partitions by timestamp.
fn offsets_for_times<T>(
&self,
timestamps: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Retrieve current positions (offsets) for topics and partitions.
fn position(&self) -> KafkaResult<TopicPartitionList>;
/// Returns the metadata information for the specified topic, or for all
/// topics in the cluster if no topic is specified.
fn fetch_metadata<T>(&self, topic: Option<&str>, timeout: T) -> KafkaResult<Metadata>
where
T: Into<Timeout>,
Self: Sized;
/// Returns the low and high watermarks for a specific topic and partition.
fn fetch_watermarks<T>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)>
where
T: Into<Timeout>,
Self: Sized;
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
fn fetch_group_list<T>(&self, group: Option<&str>, timeout: T) -> KafkaResult<GroupList>
where
T: Into<Timeout>,
Self: Sized;
/// Pauses consumption for the provided list of partitions.
fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()>;
/// Resumes consumption for the provided list of partitions.
fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()>;
/// Reports the rebalance protocol in use.
fn rebalance_protocol(&self) -> RebalanceProtocol;
}
|
{
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(err)) };
error!("Error rebalancing: {}", error);
Rebalance::Error(error)
}
|
conditional_block
|
mod.rs
|
//! Kafka consumers.
use std::ptr;
use std::sync::Arc;
use std::time::Duration;
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::client::{Client, ClientContext, NativeClient};
use crate::error::KafkaResult;
use crate::groups::GroupList;
use crate::log::{error, trace};
use crate::message::BorrowedMessage;
use crate::metadata::Metadata;
use crate::topic_partition_list::{Offset, TopicPartitionList};
use crate::util::{cstr_to_owned, KafkaDrop, NativePtr, Timeout};
pub mod base_consumer;
pub mod stream_consumer;
// Re-exports.
#[doc(inline)]
pub use self::base_consumer::BaseConsumer;
#[doc(inline)]
pub use self::stream_consumer::{MessageStream, StreamConsumer};
/// Rebalance information.
#[derive(Clone, Debug)]
pub enum Rebalance<'a> {
/// A new partition assignment is received.
Assign(&'a TopicPartitionList),
/// A new partition revocation is received.
Revoke(&'a TopicPartitionList),
/// Unexpected error from Kafka.
Error(String),
}
/// Consumer-specific context.
///
/// This user-defined object can be used to provide custom callbacks for
/// consumer events. Refer to the list of methods to check which callbacks can
/// be specified.
///
/// See also the [`ClientContext`] trait.
pub trait ConsumerContext: ClientContext {
/// Implements the default rebalancing strategy and calls the
/// [`pre_rebalance`](ConsumerContext::pre_rebalance) and
/// [`post_rebalance`](ConsumerContext::post_rebalance) methods. If this
/// method is overridden, it will be responsibility of the user to call them
/// if needed.
fn rebalance(
&self,
native_client: &NativeClient,
err: RDKafkaRespErr,
tpl: &mut TopicPartitionList,
) {
let rebalance = match err {
RDKafkaRespErr::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => Rebalance::Assign(tpl),
RDKafkaRespErr::RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS => Rebalance::Revoke(tpl),
_ => {
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(err)) };
error!("Error rebalancing: {}", error);
Rebalance::Error(error)
}
};
trace!("Running pre-rebalance with {:?}", rebalance);
self.pre_rebalance(&rebalance);
trace!("Running rebalance with {:?}", rebalance);
// Execute rebalance
unsafe {
match err {
RDKafkaRespErr::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => {
match native_client.rebalance_protocol() {
RebalanceProtocol::Cooperative => {
rdsys::rd_kafka_incremental_assign(native_client.ptr(), tpl.ptr());
}
_ => {
rdsys::rd_kafka_assign(native_client.ptr(), tpl.ptr());
}
}
}
_ => match native_client.rebalance_protocol() {
RebalanceProtocol::Cooperative => {
rdsys::rd_kafka_incremental_unassign(native_client.ptr(), tpl.ptr());
}
_ => {
rdsys::rd_kafka_assign(native_client.ptr(), ptr::null());
}
},
}
}
trace!("Running post-rebalance with {:?}", rebalance);
self.post_rebalance(&rebalance);
}
/// Pre-rebalance callback. This method will run before the rebalance and
/// should terminate its execution quickly.
#[allow(unused_variables)]
fn pre_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {}
/// Post-rebalance callback. This method will run after the rebalance and
/// should terminate its execution quickly.
#[allow(unused_variables)]
fn post_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {}
// TODO: convert pointer to structure
/// Post commit callback. This method will run after a group of offsets was
/// committed to the offset store.
#[allow(unused_variables)]
fn commit_callback(&self, result: KafkaResult<()>, offsets: &TopicPartitionList) {}
/// Returns the minimum interval at which to poll the main queue, which
/// services the logging, stats, and error callbacks.
///
/// The main queue is polled once whenever [`BaseConsumer::poll`] is called.
/// If `poll` is called with a timeout that is larger than this interval,
/// then the main queue will be polled at that interval while the consumer
/// queue is blocked.
///
/// For example, if the main queue's minimum poll interval is 200ms and
/// `poll` is called with a timeout of 1s, then `poll` may block for up to
/// 1s waiting for a message, but it will poll the main queue every 200ms
/// while it is waiting.
///
/// By default, the minimum poll interval for the main queue is 1s.
fn main_queue_min_poll_interval(&self) -> Timeout {
Timeout::After(Duration::from_secs(1))
}
}
|
pub struct DefaultConsumerContext;
impl ClientContext for DefaultConsumerContext {}
impl ConsumerContext for DefaultConsumerContext {}
/// Specifies whether a commit should be performed synchronously or asynchronously.
///
/// Regardless of the `CommitMode`, the commit APIs enqueue the commit request in
/// a local work queue. A separate worker thread picks up this commit request and forwards
/// it to the Kafka broker over the network. [1]
///
/// The difference between CommitMode::Sync and CommitMode::Async is in whether the caller
/// waits for the Kafka broker to signal that it finished handling the commit request. [2]
///
/// Note that the commit APIs are not async in the Rust sense due to the lack of a
/// callback-based interface exposed by librdkafka. [3]
///
/// [1]: https://github.com/edenhill/librdkafka/blob/e3d9515e396615b57674a93b39be2ca60355f4f4/src/rdkafka_cgrp.c#L3161
/// [2]: https://github.com/edenhill/librdkafka/blob/f092c290995ca81b3afb4015fcc3350ba02caa96/src/rdkafka_offset.c#L387
/// [3]: https://github.com/edenhill/librdkafka/issues/3212
/// [4]: https://github.com/edenhill/librdkafka/blob/e3d9515e396615b57674a93b39be2ca60355f4f4/src/rdkafka_cgrp.c#L2846
#[derive(Clone, Copy, Debug)]
pub enum CommitMode {
/// In `Sync` mode, the caller blocks until the Kafka broker finishes processing
/// the commit request. [4]
Sync = 0,
/// In `Async` mode, the caller enqueues the commit request in a local
/// work queue and returns immediately. [2]
Async = 1,
}
/// Consumer group metadata.
///
/// For use with [`Producer::send_offsets_to_transaction`].
///
/// [`Producer::send_offsets_to_transaction`]: crate::producer::Producer::send_offsets_to_transaction
pub struct ConsumerGroupMetadata(NativePtr<RDKafkaConsumerGroupMetadata>);
impl ConsumerGroupMetadata {
pub(crate) fn ptr(&self) -> *const RDKafkaConsumerGroupMetadata {
self.0.ptr()
}
}
unsafe impl KafkaDrop for RDKafkaConsumerGroupMetadata {
const TYPE: &'static str = "consumer_group_metadata";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_consumer_group_metadata_destroy;
}
unsafe impl Send for ConsumerGroupMetadata {}
unsafe impl Sync for ConsumerGroupMetadata {}
/// The rebalance protocol for a consumer.
pub enum RebalanceProtocol {
/// The consumer has not (yet) joined a group.
None,
/// Eager rebalance protocol.
Eager,
/// Cooperative rebalance protocol.
Cooperative,
}
/// Common trait for all consumers.
///
/// # Note about object safety
///
/// Doing type erasure on consumers is expected to be rare (eg. `Box<dyn
/// Consumer>`). Therefore, the API is optimised for the case where a concrete
/// type is available. As a result, some methods are not available on trait
/// objects, since they are generic.
pub trait Consumer<C = DefaultConsumerContext>
where
C: ConsumerContext,
{
/// Returns the [`Client`] underlying this consumer.
fn client(&self) -> &Client<C>;
/// Returns a reference to the [`ConsumerContext`] used to create this
/// consumer.
fn context(&self) -> &Arc<C> {
self.client().context()
}
/// Returns the current consumer group metadata associated with the
/// consumer.
///
/// If the consumer was not configured with a `group.id`, returns `None`.
/// For use with [`Producer::send_offsets_to_transaction`].
///
/// [`Producer::send_offsets_to_transaction`]: crate::producer::Producer::send_offsets_to_transaction
fn group_metadata(&self) -> Option<ConsumerGroupMetadata>;
/// Subscribes the consumer to a list of topics.
fn subscribe(&self, topics: &[&str]) -> KafkaResult<()>;
/// Unsubscribes the current subscription list.
fn unsubscribe(&self);
/// Manually assigns topics and partitions to the consumer. If used,
/// automatic consumer rebalance won't be activated.
fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()>;
/// Seeks to `offset` for the specified `topic` and `partition`. After a
/// successful call to `seek`, the next poll of the consumer will return the
/// message with `offset`.
fn seek<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T,
) -> KafkaResult<()>;
/// Commits the offset of the specified message. The commit can be sync
/// (blocking), or async. Notice that when a specific offset is committed,
/// all the previous offsets are considered committed as well. Use this
/// method only if you are processing messages in order.
fn commit(
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode,
) -> KafkaResult<()>;
/// Commits the current consumer state. Notice that if the consumer fails
/// after a message has been received, but before the message has been
/// processed by the user code, this might lead to data loss. Check the
/// "at-least-once delivery" section in the readme for more information.
fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()>;
/// Commit the provided message. Note that this will also automatically
/// commit every message with lower offset within the same partition.
fn commit_message(&self, message: &BorrowedMessage<'_>, mode: CommitMode) -> KafkaResult<()>;
/// Stores offset to be used on the next (auto)commit. When
/// using this `enable.auto.offset.store` should be set to `false` in the
/// config.
fn store_offset(&self, topic: &str, partition: i32, offset: i64) -> KafkaResult<()>;
/// Like [`Consumer::store_offset`], but the offset to store is derived from
/// the provided message.
fn store_offset_from_message(&self, message: &BorrowedMessage<'_>) -> KafkaResult<()>;
/// Store offsets to be used on the next (auto)commit. When using this
/// `enable.auto.offset.store` should be set to `false` in the config.
fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()>;
/// Returns the current topic subscription.
fn subscription(&self) -> KafkaResult<TopicPartitionList>;
/// Returns the current partition assignment.
fn assignment(&self) -> KafkaResult<TopicPartitionList>;
/// Retrieves the committed offsets for topics and partitions.
fn committed<T>(&self, timeout: T) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Retrieves the committed offsets for specified topics and partitions.
fn committed_offsets<T>(
&self,
tpl: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>;
/// Looks up the offsets for this consumer's partitions by timestamp.
fn offsets_for_timestamp<T>(
&self,
timestamp: i64,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Looks up the offsets for the specified partitions by timestamp.
fn offsets_for_times<T>(
&self,
timestamps: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Retrieve current positions (offsets) for topics and partitions.
fn position(&self) -> KafkaResult<TopicPartitionList>;
/// Returns the metadata information for the specified topic, or for all
/// topics in the cluster if no topic is specified.
fn fetch_metadata<T>(&self, topic: Option<&str>, timeout: T) -> KafkaResult<Metadata>
where
T: Into<Timeout>,
Self: Sized;
/// Returns the low and high watermarks for a specific topic and partition.
fn fetch_watermarks<T>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)>
where
T: Into<Timeout>,
Self: Sized;
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
fn fetch_group_list<T>(&self, group: Option<&str>, timeout: T) -> KafkaResult<GroupList>
where
T: Into<Timeout>,
Self: Sized;
/// Pauses consumption for the provided list of partitions.
fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()>;
/// Resumes consumption for the provided list of partitions.
fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()>;
/// Reports the rebalance protocol in use.
fn rebalance_protocol(&self) -> RebalanceProtocol;
}
|
/// An inert [`ConsumerContext`] that can be used when no customizations are
/// needed.
#[derive(Clone, Debug, Default)]
|
random_line_split
|
mod.rs
|
//! Kafka consumers.
use std::ptr;
use std::sync::Arc;
use std::time::Duration;
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::client::{Client, ClientContext, NativeClient};
use crate::error::KafkaResult;
use crate::groups::GroupList;
use crate::log::{error, trace};
use crate::message::BorrowedMessage;
use crate::metadata::Metadata;
use crate::topic_partition_list::{Offset, TopicPartitionList};
use crate::util::{cstr_to_owned, KafkaDrop, NativePtr, Timeout};
pub mod base_consumer;
pub mod stream_consumer;
// Re-exports.
#[doc(inline)]
pub use self::base_consumer::BaseConsumer;
#[doc(inline)]
pub use self::stream_consumer::{MessageStream, StreamConsumer};
/// Rebalance information.
#[derive(Clone, Debug)]
pub enum Rebalance<'a> {
/// A new partition assignment is received.
Assign(&'a TopicPartitionList),
/// A new partition revocation is received.
Revoke(&'a TopicPartitionList),
/// Unexpected error from Kafka.
Error(String),
}
/// Consumer-specific context.
///
/// This user-defined object can be used to provide custom callbacks for
/// consumer events. Refer to the list of methods to check which callbacks can
/// be specified.
///
/// See also the [`ClientContext`] trait.
pub trait ConsumerContext: ClientContext {
/// Implements the default rebalancing strategy and calls the
/// [`pre_rebalance`](ConsumerContext::pre_rebalance) and
/// [`post_rebalance`](ConsumerContext::post_rebalance) methods. If this
/// method is overridden, it will be responsibility of the user to call them
/// if needed.
fn rebalance(
&self,
native_client: &NativeClient,
err: RDKafkaRespErr,
tpl: &mut TopicPartitionList,
) {
let rebalance = match err {
RDKafkaRespErr::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => Rebalance::Assign(tpl),
RDKafkaRespErr::RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS => Rebalance::Revoke(tpl),
_ => {
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(err)) };
error!("Error rebalancing: {}", error);
Rebalance::Error(error)
}
};
trace!("Running pre-rebalance with {:?}", rebalance);
self.pre_rebalance(&rebalance);
trace!("Running rebalance with {:?}", rebalance);
// Execute rebalance
unsafe {
match err {
RDKafkaRespErr::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => {
match native_client.rebalance_protocol() {
RebalanceProtocol::Cooperative => {
rdsys::rd_kafka_incremental_assign(native_client.ptr(), tpl.ptr());
}
_ => {
rdsys::rd_kafka_assign(native_client.ptr(), tpl.ptr());
}
}
}
_ => match native_client.rebalance_protocol() {
RebalanceProtocol::Cooperative => {
rdsys::rd_kafka_incremental_unassign(native_client.ptr(), tpl.ptr());
}
_ => {
rdsys::rd_kafka_assign(native_client.ptr(), ptr::null());
}
},
}
}
trace!("Running post-rebalance with {:?}", rebalance);
self.post_rebalance(&rebalance);
}
/// Pre-rebalance callback. This method will run before the rebalance and
/// should terminate its execution quickly.
#[allow(unused_variables)]
fn pre_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {}
/// Post-rebalance callback. This method will run after the rebalance and
/// should terminate its execution quickly.
#[allow(unused_variables)]
fn
|
<'a>(&self, rebalance: &Rebalance<'a>) {}
// TODO: convert pointer to structure
/// Post commit callback. This method will run after a group of offsets was
/// committed to the offset store.
#[allow(unused_variables)]
fn commit_callback(&self, result: KafkaResult<()>, offsets: &TopicPartitionList) {}
/// Returns the minimum interval at which to poll the main queue, which
/// services the logging, stats, and error callbacks.
///
/// The main queue is polled once whenever [`BaseConsumer::poll`] is called.
/// If `poll` is called with a timeout that is larger than this interval,
/// then the main queue will be polled at that interval while the consumer
/// queue is blocked.
///
/// For example, if the main queue's minimum poll interval is 200ms and
/// `poll` is called with a timeout of 1s, then `poll` may block for up to
/// 1s waiting for a message, but it will poll the main queue every 200ms
/// while it is waiting.
///
/// By default, the minimum poll interval for the main queue is 1s.
fn main_queue_min_poll_interval(&self) -> Timeout {
Timeout::After(Duration::from_secs(1))
}
}
/// An inert [`ConsumerContext`] that can be used when no customizations are
/// needed.
#[derive(Clone, Debug, Default)]
pub struct DefaultConsumerContext;
impl ClientContext for DefaultConsumerContext {}
impl ConsumerContext for DefaultConsumerContext {}
/// Specifies whether a commit should be performed synchronously or asynchronously.
///
/// Regardless of the `CommitMode`, the commit APIs enqueue the commit request in
/// a local work queue. A separate worker thread picks up this commit request and forwards
/// it to the Kafka broker over the network. [1]
///
/// The difference between CommitMode::Sync and CommitMode::Async is in whether the caller
/// waits for the Kafka broker to signal that it finished handling the commit request. [2]
///
/// Note that the commit APIs are not async in the Rust sense due to the lack of a
/// callback-based interface exposed by librdkafka. [3]
///
/// [1]: https://github.com/edenhill/librdkafka/blob/e3d9515e396615b57674a93b39be2ca60355f4f4/src/rdkafka_cgrp.c#L3161
/// [2]: https://github.com/edenhill/librdkafka/blob/f092c290995ca81b3afb4015fcc3350ba02caa96/src/rdkafka_offset.c#L387
/// [3]: https://github.com/edenhill/librdkafka/issues/3212
/// [4]: https://github.com/edenhill/librdkafka/blob/e3d9515e396615b57674a93b39be2ca60355f4f4/src/rdkafka_cgrp.c#L2846
#[derive(Clone, Copy, Debug)]
pub enum CommitMode {
/// In `Sync` mode, the caller blocks until the Kafka broker finishes processing
/// the commit request. [4]
Sync = 0,
/// In `Async` mode, the caller enqueues the commit request in a local
/// work queue and returns immediately. [2]
Async = 1,
}
/// Consumer group metadata.
///
/// For use with [`Producer::send_offsets_to_transaction`].
///
/// [`Producer::send_offsets_to_transaction`]: crate::producer::Producer::send_offsets_to_transaction
pub struct ConsumerGroupMetadata(NativePtr<RDKafkaConsumerGroupMetadata>);
impl ConsumerGroupMetadata {
pub(crate) fn ptr(&self) -> *const RDKafkaConsumerGroupMetadata {
self.0.ptr()
}
}
unsafe impl KafkaDrop for RDKafkaConsumerGroupMetadata {
const TYPE: &'static str = "consumer_group_metadata";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_consumer_group_metadata_destroy;
}
unsafe impl Send for ConsumerGroupMetadata {}
unsafe impl Sync for ConsumerGroupMetadata {}
/// The rebalance protocol for a consumer.
pub enum RebalanceProtocol {
/// The consumer has not (yet) joined a group.
None,
/// Eager rebalance protocol.
Eager,
/// Cooperative rebalance protocol.
Cooperative,
}
/// Common trait for all consumers.
///
/// # Note about object safety
///
/// Doing type erasure on consumers is expected to be rare (eg. `Box<dyn
/// Consumer>`). Therefore, the API is optimised for the case where a concrete
/// type is available. As a result, some methods are not available on trait
/// objects, since they are generic.
pub trait Consumer<C = DefaultConsumerContext>
where
C: ConsumerContext,
{
/// Returns the [`Client`] underlying this consumer.
fn client(&self) -> &Client<C>;
/// Returns a reference to the [`ConsumerContext`] used to create this
/// consumer.
fn context(&self) -> &Arc<C> {
self.client().context()
}
/// Returns the current consumer group metadata associated with the
/// consumer.
///
/// If the consumer was not configured with a `group.id`, returns `None`.
/// For use with [`Producer::send_offsets_to_transaction`].
///
/// [`Producer::send_offsets_to_transaction`]: crate::producer::Producer::send_offsets_to_transaction
fn group_metadata(&self) -> Option<ConsumerGroupMetadata>;
/// Subscribes the consumer to a list of topics.
fn subscribe(&self, topics: &[&str]) -> KafkaResult<()>;
/// Unsubscribes the current subscription list.
fn unsubscribe(&self);
/// Manually assigns topics and partitions to the consumer. If used,
/// automatic consumer rebalance won't be activated.
fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()>;
/// Seeks to `offset` for the specified `topic` and `partition`. After a
/// successful call to `seek`, the next poll of the consumer will return the
/// message with `offset`.
fn seek<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T,
) -> KafkaResult<()>;
/// Commits the offset of the specified message. The commit can be sync
/// (blocking), or async. Notice that when a specific offset is committed,
/// all the previous offsets are considered committed as well. Use this
/// method only if you are processing messages in order.
fn commit(
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode,
) -> KafkaResult<()>;
/// Commits the current consumer state. Notice that if the consumer fails
/// after a message has been received, but before the message has been
/// processed by the user code, this might lead to data loss. Check the
/// "at-least-once delivery" section in the readme for more information.
fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()>;
/// Commit the provided message. Note that this will also automatically
/// commit every message with lower offset within the same partition.
fn commit_message(&self, message: &BorrowedMessage<'_>, mode: CommitMode) -> KafkaResult<()>;
/// Stores offset to be used on the next (auto)commit. When
/// using this `enable.auto.offset.store` should be set to `false` in the
/// config.
fn store_offset(&self, topic: &str, partition: i32, offset: i64) -> KafkaResult<()>;
/// Like [`Consumer::store_offset`], but the offset to store is derived from
/// the provided message.
fn store_offset_from_message(&self, message: &BorrowedMessage<'_>) -> KafkaResult<()>;
/// Store offsets to be used on the next (auto)commit. When using this
/// `enable.auto.offset.store` should be set to `false` in the config.
fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()>;
/// Returns the current topic subscription.
fn subscription(&self) -> KafkaResult<TopicPartitionList>;
/// Returns the current partition assignment.
fn assignment(&self) -> KafkaResult<TopicPartitionList>;
/// Retrieves the committed offsets for topics and partitions.
fn committed<T>(&self, timeout: T) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Retrieves the committed offsets for specified topics and partitions.
fn committed_offsets<T>(
&self,
tpl: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>;
/// Looks up the offsets for this consumer's partitions by timestamp.
fn offsets_for_timestamp<T>(
&self,
timestamp: i64,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Looks up the offsets for the specified partitions by timestamp.
fn offsets_for_times<T>(
&self,
timestamps: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Retrieve current positions (offsets) for topics and partitions.
fn position(&self) -> KafkaResult<TopicPartitionList>;
/// Returns the metadata information for the specified topic, or for all
/// topics in the cluster if no topic is specified.
fn fetch_metadata<T>(&self, topic: Option<&str>, timeout: T) -> KafkaResult<Metadata>
where
T: Into<Timeout>,
Self: Sized;
/// Returns the low and high watermarks for a specific topic and partition.
fn fetch_watermarks<T>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)>
where
T: Into<Timeout>,
Self: Sized;
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
fn fetch_group_list<T>(&self, group: Option<&str>, timeout: T) -> KafkaResult<GroupList>
where
T: Into<Timeout>,
Self: Sized;
/// Pauses consumption for the provided list of partitions.
fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()>;
/// Resumes consumption for the provided list of partitions.
fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()>;
/// Reports the rebalance protocol in use.
fn rebalance_protocol(&self) -> RebalanceProtocol;
}
|
post_rebalance
|
identifier_name
|
mod.rs
|
//! Kafka consumers.
use std::ptr;
use std::sync::Arc;
use std::time::Duration;
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::client::{Client, ClientContext, NativeClient};
use crate::error::KafkaResult;
use crate::groups::GroupList;
use crate::log::{error, trace};
use crate::message::BorrowedMessage;
use crate::metadata::Metadata;
use crate::topic_partition_list::{Offset, TopicPartitionList};
use crate::util::{cstr_to_owned, KafkaDrop, NativePtr, Timeout};
pub mod base_consumer;
pub mod stream_consumer;
// Re-exports.
#[doc(inline)]
pub use self::base_consumer::BaseConsumer;
#[doc(inline)]
pub use self::stream_consumer::{MessageStream, StreamConsumer};
/// Rebalance information.
#[derive(Clone, Debug)]
pub enum Rebalance<'a> {
/// A new partition assignment is received.
Assign(&'a TopicPartitionList),
/// A new partition revocation is received.
Revoke(&'a TopicPartitionList),
/// Unexpected error from Kafka.
Error(String),
}
/// Consumer-specific context.
///
/// This user-defined object can be used to provide custom callbacks for
/// consumer events. Refer to the list of methods to check which callbacks can
/// be specified.
///
/// See also the [`ClientContext`] trait.
pub trait ConsumerContext: ClientContext {
/// Implements the default rebalancing strategy and calls the
/// [`pre_rebalance`](ConsumerContext::pre_rebalance) and
/// [`post_rebalance`](ConsumerContext::post_rebalance) methods. If this
/// method is overridden, it will be responsibility of the user to call them
/// if needed.
fn rebalance(
&self,
native_client: &NativeClient,
err: RDKafkaRespErr,
tpl: &mut TopicPartitionList,
) {
let rebalance = match err {
RDKafkaRespErr::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => Rebalance::Assign(tpl),
RDKafkaRespErr::RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS => Rebalance::Revoke(tpl),
_ => {
let error = unsafe { cstr_to_owned(rdsys::rd_kafka_err2str(err)) };
error!("Error rebalancing: {}", error);
Rebalance::Error(error)
}
};
trace!("Running pre-rebalance with {:?}", rebalance);
self.pre_rebalance(&rebalance);
trace!("Running rebalance with {:?}", rebalance);
// Execute rebalance
unsafe {
match err {
RDKafkaRespErr::RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS => {
match native_client.rebalance_protocol() {
RebalanceProtocol::Cooperative => {
rdsys::rd_kafka_incremental_assign(native_client.ptr(), tpl.ptr());
}
_ => {
rdsys::rd_kafka_assign(native_client.ptr(), tpl.ptr());
}
}
}
_ => match native_client.rebalance_protocol() {
RebalanceProtocol::Cooperative => {
rdsys::rd_kafka_incremental_unassign(native_client.ptr(), tpl.ptr());
}
_ => {
rdsys::rd_kafka_assign(native_client.ptr(), ptr::null());
}
},
}
}
trace!("Running post-rebalance with {:?}", rebalance);
self.post_rebalance(&rebalance);
}
/// Pre-rebalance callback. This method will run before the rebalance and
/// should terminate its execution quickly.
#[allow(unused_variables)]
fn pre_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {}
/// Post-rebalance callback. This method will run after the rebalance and
/// should terminate its execution quickly.
#[allow(unused_variables)]
fn post_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {}
// TODO: convert pointer to structure
/// Post commit callback. This method will run after a group of offsets was
/// committed to the offset store.
#[allow(unused_variables)]
fn commit_callback(&self, result: KafkaResult<()>, offsets: &TopicPartitionList)
|
/// Returns the minimum interval at which to poll the main queue, which
/// services the logging, stats, and error callbacks.
///
/// The main queue is polled once whenever [`BaseConsumer::poll`] is called.
/// If `poll` is called with a timeout that is larger than this interval,
/// then the main queue will be polled at that interval while the consumer
/// queue is blocked.
///
/// For example, if the main queue's minimum poll interval is 200ms and
/// `poll` is called with a timeout of 1s, then `poll` may block for up to
/// 1s waiting for a message, but it will poll the main queue every 200ms
/// while it is waiting.
///
/// By default, the minimum poll interval for the main queue is 1s.
fn main_queue_min_poll_interval(&self) -> Timeout {
Timeout::After(Duration::from_secs(1))
}
}
/// An inert [`ConsumerContext`] that can be used when no customizations are
/// needed.
#[derive(Clone, Debug, Default)]
pub struct DefaultConsumerContext;
impl ClientContext for DefaultConsumerContext {}
impl ConsumerContext for DefaultConsumerContext {}
/// Specifies whether a commit should be performed synchronously or asynchronously.
///
/// Regardless of the `CommitMode`, the commit APIs enqueue the commit request in
/// a local work queue. A separate worker thread picks up this commit request and forwards
/// it to the Kafka broker over the network. [1]
///
/// The difference between CommitMode::Sync and CommitMode::Async is in whether the caller
/// waits for the Kafka broker to signal that it finished handling the commit request. [2]
///
/// Note that the commit APIs are not async in the Rust sense due to the lack of a
/// callback-based interface exposed by librdkafka. [3]
///
/// [1]: https://github.com/edenhill/librdkafka/blob/e3d9515e396615b57674a93b39be2ca60355f4f4/src/rdkafka_cgrp.c#L3161
/// [2]: https://github.com/edenhill/librdkafka/blob/f092c290995ca81b3afb4015fcc3350ba02caa96/src/rdkafka_offset.c#L387
/// [3]: https://github.com/edenhill/librdkafka/issues/3212
/// [4]: https://github.com/edenhill/librdkafka/blob/e3d9515e396615b57674a93b39be2ca60355f4f4/src/rdkafka_cgrp.c#L2846
#[derive(Clone, Copy, Debug)]
pub enum CommitMode {
/// In `Sync` mode, the caller blocks until the Kafka broker finishes processing
/// the commit request. [4]
Sync = 0,
/// In `Async` mode, the caller enqueues the commit request in a local
/// work queue and returns immediately. [2]
Async = 1,
}
/// Consumer group metadata.
///
/// For use with [`Producer::send_offsets_to_transaction`].
///
/// [`Producer::send_offsets_to_transaction`]: crate::producer::Producer::send_offsets_to_transaction
pub struct ConsumerGroupMetadata(NativePtr<RDKafkaConsumerGroupMetadata>);
impl ConsumerGroupMetadata {
pub(crate) fn ptr(&self) -> *const RDKafkaConsumerGroupMetadata {
self.0.ptr()
}
}
unsafe impl KafkaDrop for RDKafkaConsumerGroupMetadata {
const TYPE: &'static str = "consumer_group_metadata";
const DROP: unsafe extern "C" fn(*mut Self) = rdsys::rd_kafka_consumer_group_metadata_destroy;
}
unsafe impl Send for ConsumerGroupMetadata {}
unsafe impl Sync for ConsumerGroupMetadata {}
/// The rebalance protocol for a consumer.
pub enum RebalanceProtocol {
/// The consumer has not (yet) joined a group.
None,
/// Eager rebalance protocol.
Eager,
/// Cooperative rebalance protocol.
Cooperative,
}
/// Common trait for all consumers.
///
/// # Note about object safety
///
/// Doing type erasure on consumers is expected to be rare (eg. `Box<dyn
/// Consumer>`). Therefore, the API is optimised for the case where a concrete
/// type is available. As a result, some methods are not available on trait
/// objects, since they are generic.
pub trait Consumer<C = DefaultConsumerContext>
where
C: ConsumerContext,
{
/// Returns the [`Client`] underlying this consumer.
fn client(&self) -> &Client<C>;
/// Returns a reference to the [`ConsumerContext`] used to create this
/// consumer.
fn context(&self) -> &Arc<C> {
self.client().context()
}
/// Returns the current consumer group metadata associated with the
/// consumer.
///
/// If the consumer was not configured with a `group.id`, returns `None`.
/// For use with [`Producer::send_offsets_to_transaction`].
///
/// [`Producer::send_offsets_to_transaction`]: crate::producer::Producer::send_offsets_to_transaction
fn group_metadata(&self) -> Option<ConsumerGroupMetadata>;
/// Subscribes the consumer to a list of topics.
fn subscribe(&self, topics: &[&str]) -> KafkaResult<()>;
/// Unsubscribes the current subscription list.
fn unsubscribe(&self);
/// Manually assigns topics and partitions to the consumer. If used,
/// automatic consumer rebalance won't be activated.
fn assign(&self, assignment: &TopicPartitionList) -> KafkaResult<()>;
/// Seeks to `offset` for the specified `topic` and `partition`. After a
/// successful call to `seek`, the next poll of the consumer will return the
/// message with `offset`.
fn seek<T: Into<Timeout>>(
&self,
topic: &str,
partition: i32,
offset: Offset,
timeout: T,
) -> KafkaResult<()>;
/// Commits the offset of the specified message. The commit can be sync
/// (blocking), or async. Notice that when a specific offset is committed,
/// all the previous offsets are considered committed as well. Use this
/// method only if you are processing messages in order.
fn commit(
&self,
topic_partition_list: &TopicPartitionList,
mode: CommitMode,
) -> KafkaResult<()>;
/// Commits the current consumer state. Notice that if the consumer fails
/// after a message has been received, but before the message has been
/// processed by the user code, this might lead to data loss. Check the
/// "at-least-once delivery" section in the readme for more information.
fn commit_consumer_state(&self, mode: CommitMode) -> KafkaResult<()>;
/// Commit the provided message. Note that this will also automatically
/// commit every message with lower offset within the same partition.
fn commit_message(&self, message: &BorrowedMessage<'_>, mode: CommitMode) -> KafkaResult<()>;
/// Stores offset to be used on the next (auto)commit. When
/// using this `enable.auto.offset.store` should be set to `false` in the
/// config.
fn store_offset(&self, topic: &str, partition: i32, offset: i64) -> KafkaResult<()>;
/// Like [`Consumer::store_offset`], but the offset to store is derived from
/// the provided message.
fn store_offset_from_message(&self, message: &BorrowedMessage<'_>) -> KafkaResult<()>;
/// Store offsets to be used on the next (auto)commit. When using this
/// `enable.auto.offset.store` should be set to `false` in the config.
fn store_offsets(&self, tpl: &TopicPartitionList) -> KafkaResult<()>;
/// Returns the current topic subscription.
fn subscription(&self) -> KafkaResult<TopicPartitionList>;
/// Returns the current partition assignment.
fn assignment(&self) -> KafkaResult<TopicPartitionList>;
/// Retrieves the committed offsets for topics and partitions.
fn committed<T>(&self, timeout: T) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Retrieves the committed offsets for specified topics and partitions.
fn committed_offsets<T>(
&self,
tpl: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>;
/// Looks up the offsets for this consumer's partitions by timestamp.
fn offsets_for_timestamp<T>(
&self,
timestamp: i64,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Looks up the offsets for the specified partitions by timestamp.
fn offsets_for_times<T>(
&self,
timestamps: TopicPartitionList,
timeout: T,
) -> KafkaResult<TopicPartitionList>
where
T: Into<Timeout>,
Self: Sized;
/// Retrieve current positions (offsets) for topics and partitions.
fn position(&self) -> KafkaResult<TopicPartitionList>;
/// Returns the metadata information for the specified topic, or for all
/// topics in the cluster if no topic is specified.
fn fetch_metadata<T>(&self, topic: Option<&str>, timeout: T) -> KafkaResult<Metadata>
where
T: Into<Timeout>,
Self: Sized;
/// Returns the low and high watermarks for a specific topic and partition.
fn fetch_watermarks<T>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)>
where
T: Into<Timeout>,
Self: Sized;
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
fn fetch_group_list<T>(&self, group: Option<&str>, timeout: T) -> KafkaResult<GroupList>
where
T: Into<Timeout>,
Self: Sized;
/// Pauses consumption for the provided list of partitions.
fn pause(&self, partitions: &TopicPartitionList) -> KafkaResult<()>;
/// Resumes consumption for the provided list of partitions.
fn resume(&self, partitions: &TopicPartitionList) -> KafkaResult<()>;
/// Reports the rebalance protocol in use.
fn rebalance_protocol(&self) -> RebalanceProtocol;
}
|
{}
|
identifier_body
|
request.rs
|
use Request;
use hyper::header;
pub trait Referer {
fn referer(&self) -> Option<&str>;
}
|
/// # Examples
/// ```{rust}
/// #[macro_use] extern crate nickel;
///
/// use nickel::{Nickel, HttpRouter};
/// use nickel::extensions::{Referer, Redirect};
///
/// fn main() {
/// let mut server = Nickel::new();
/// server.get("/a", middleware! { |req, res|
/// let back = req.referer().unwrap_or("http://nickel.rs");
/// return res.redirect(back)
/// });
/// }
/// ```
fn referer(&self) -> Option<&str> {
self.origin.headers.get::<header::Referer>()
.map(|r| &***r)
}
}
|
impl<'mw, 'server, D> Referer for Request<'mw, 'server, D> {
/// Get the Request's referer header
///
|
random_line_split
|
request.rs
|
use Request;
use hyper::header;
pub trait Referer {
fn referer(&self) -> Option<&str>;
}
impl<'mw,'server, D> Referer for Request<'mw,'server, D> {
/// Get the Request's referer header
///
/// # Examples
/// ```{rust}
/// #[macro_use] extern crate nickel;
///
/// use nickel::{Nickel, HttpRouter};
/// use nickel::extensions::{Referer, Redirect};
///
/// fn main() {
/// let mut server = Nickel::new();
/// server.get("/a", middleware! { |req, res|
/// let back = req.referer().unwrap_or("http://nickel.rs");
/// return res.redirect(back)
/// });
/// }
/// ```
fn referer(&self) -> Option<&str>
|
}
|
{
self.origin.headers.get::<header::Referer>()
.map(|r| &***r)
}
|
identifier_body
|
request.rs
|
use Request;
use hyper::header;
pub trait Referer {
fn referer(&self) -> Option<&str>;
}
impl<'mw,'server, D> Referer for Request<'mw,'server, D> {
/// Get the Request's referer header
///
/// # Examples
/// ```{rust}
/// #[macro_use] extern crate nickel;
///
/// use nickel::{Nickel, HttpRouter};
/// use nickel::extensions::{Referer, Redirect};
///
/// fn main() {
/// let mut server = Nickel::new();
/// server.get("/a", middleware! { |req, res|
/// let back = req.referer().unwrap_or("http://nickel.rs");
/// return res.redirect(back)
/// });
/// }
/// ```
fn
|
(&self) -> Option<&str> {
self.origin.headers.get::<header::Referer>()
.map(|r| &***r)
}
}
|
referer
|
identifier_name
|
xcrate-private-by-default.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:static_priv_by_default.rs
#[no_std]; // helps if debugging resolve
extern mod static_priv_by_default;
fn
|
<T>() {}
#[start]
fn main(_: int, _: **u8) -> int {
// Actual public items should be public
static_priv_by_default::a;
static_priv_by_default::b;
static_priv_by_default::c;
foo::<static_priv_by_default::d>();
// publicly re-exported items should be available
static_priv_by_default::bar::e;
static_priv_by_default::bar::f;
static_priv_by_default::bar::g;
foo::<static_priv_by_default::bar::h>();
// private items at the top should be inaccessible
static_priv_by_default::i;
//~^ ERROR: static `i` is private
static_priv_by_default::j;
//~^ ERROR: function `j` is private
static_priv_by_default::k;
//~^ ERROR: struct `k` is private
foo::<static_priv_by_default::l>();
//~^ ERROR: type `l` is private
// public items in a private mod should be inaccessible
static_priv_by_default::foo::a;
//~^ ERROR: static `a` is private
static_priv_by_default::foo::b;
//~^ ERROR: function `b` is private
static_priv_by_default::foo::c;
//~^ ERROR: struct `c` is private
foo::<static_priv_by_default::foo::d>();
//~^ ERROR: type `d` is private
3
}
|
foo
|
identifier_name
|
xcrate-private-by-default.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:static_priv_by_default.rs
#[no_std]; // helps if debugging resolve
extern mod static_priv_by_default;
fn foo<T>()
|
#[start]
fn main(_: int, _: **u8) -> int {
// Actual public items should be public
static_priv_by_default::a;
static_priv_by_default::b;
static_priv_by_default::c;
foo::<static_priv_by_default::d>();
// publicly re-exported items should be available
static_priv_by_default::bar::e;
static_priv_by_default::bar::f;
static_priv_by_default::bar::g;
foo::<static_priv_by_default::bar::h>();
// private items at the top should be inaccessible
static_priv_by_default::i;
//~^ ERROR: static `i` is private
static_priv_by_default::j;
//~^ ERROR: function `j` is private
static_priv_by_default::k;
//~^ ERROR: struct `k` is private
foo::<static_priv_by_default::l>();
//~^ ERROR: type `l` is private
// public items in a private mod should be inaccessible
static_priv_by_default::foo::a;
//~^ ERROR: static `a` is private
static_priv_by_default::foo::b;
//~^ ERROR: function `b` is private
static_priv_by_default::foo::c;
//~^ ERROR: struct `c` is private
foo::<static_priv_by_default::foo::d>();
//~^ ERROR: type `d` is private
3
}
|
{}
|
identifier_body
|
xcrate-private-by-default.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:static_priv_by_default.rs
#[no_std]; // helps if debugging resolve
extern mod static_priv_by_default;
fn foo<T>() {}
#[start]
fn main(_: int, _: **u8) -> int {
// Actual public items should be public
static_priv_by_default::a;
static_priv_by_default::b;
static_priv_by_default::c;
foo::<static_priv_by_default::d>();
// publicly re-exported items should be available
static_priv_by_default::bar::e;
static_priv_by_default::bar::f;
static_priv_by_default::bar::g;
foo::<static_priv_by_default::bar::h>();
// private items at the top should be inaccessible
static_priv_by_default::i;
//~^ ERROR: static `i` is private
static_priv_by_default::j;
//~^ ERROR: function `j` is private
static_priv_by_default::k;
//~^ ERROR: struct `k` is private
foo::<static_priv_by_default::l>();
//~^ ERROR: type `l` is private
// public items in a private mod should be inaccessible
static_priv_by_default::foo::a;
//~^ ERROR: static `a` is private
static_priv_by_default::foo::b;
//~^ ERROR: function `b` is private
static_priv_by_default::foo::c;
//~^ ERROR: struct `c` is private
|
foo::<static_priv_by_default::foo::d>();
//~^ ERROR: type `d` is private
3
}
|
random_line_split
|
|
msghandle.rs
|
// CITA
// Copyright 2016-2017 Cryptape Technologies LLC.
// This program is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any
// later version.
// This program is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use citaprotocol::CitaRequest;
use connection::Connection;
use libproto::*;
use libproto::communication::*;
use libproto::request::Request;
use protobuf::Message;
use protobuf::core::parse_from_bytes;
use server::MySender;
use std::io;
use std::sync::mpsc::Sender;
pub fn
|
(con: &Connection, tx_pub: &Sender<(String, Vec<u8>)>, payload: &[u8]) {
if let Ok(msg) = parse_from_bytes::<communication::Message>(payload) {
let t = msg.get_field_type();
let cid = msg.get_cmd_id();
info!("-----cmd_id--------{:?}--------------", display_cmd(cid));
if cid == cmd_id(submodules::JSON_RPC, topics::REQUEST) && t == MsgType::REQUEST {
let mut ts = parse_from_bytes::<Request>(msg.get_content()).unwrap();
let mut response = request::Response::new();
response.set_request_id(ts.take_request_id());
if ts.has_peercount() {
let peercount = con.peers_pair.iter().filter(|x| x.2.as_ref().read().is_some()).count();
response.set_peercount(peercount as u32);
let ms: communication::Message = response.into();
tx_pub.send(("chain.rpc".to_string(), ms.write_to_bytes().unwrap())).unwrap();
}
}
}
}
pub fn is_need_proc(payload: &[u8]) -> (String, bool, communication::Message) {
if let Ok(msg) = parse_from_bytes::<communication::Message>(payload) {
let mut topic = String::default();
let mut is_proc = true;
let t = msg.get_field_type();
let cid = msg.get_cmd_id();
if cid == cmd_id(submodules::CONSENSUS, topics::NEW_TX) && t == MsgType::TX {
trace!("CONSENSUS broadcast tx");
topic = "net.tx".to_string();
} else if cid == cmd_id(submodules::CONSENSUS, topics::NEW_BLK) && t == MsgType::BLOCK {
info!("CONSENSUS pub blk");
topic = "net.blk".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::NEW_BLK) && t == MsgType::BLOCK {
info!("CHAIN pub blk");
topic = "net.blk".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::NEW_STATUS) && t == MsgType::STATUS {
info!("CHAIN pub status");
topic = "net.status".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::SYNC_BLK) && t == MsgType::MSG {
info!("CHAIN sync blk");
topic = "net.sync".to_string();
} else if (cid == cmd_id(submodules::CONSENSUS, topics::CONSENSUS_MSG) && t == MsgType::MSG) || (cid == cmd_id(submodules::CONSENSUS, topics::NEW_PROPOSAL) && t == MsgType::MSG) {
trace!("CONSENSUS pub msg");
topic = "net.msg".to_string();
} else {
is_proc = false;
}
return (topic, is_proc, msg);
}
("".to_string(), false, communication::Message::new())
}
pub fn net_msg_handler(payload: CitaRequest, mysender: &MySender) -> Result<Vec<u8>, io::Error> {
//info!("--------net_msg_handler-----------{:?}-", payload);
trace!("SERVER get msg: {:?}", payload);
if let (topic, true, msg) = is_need_proc(payload.as_ref()) {
info!("recive msg from origin = {:?}", msg.get_origin());
mysender.send((topic, payload))
}
Ok(vec![])
}
|
handle_rpc
|
identifier_name
|
msghandle.rs
|
// CITA
// Copyright 2016-2017 Cryptape Technologies LLC.
// This program is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any
// later version.
// This program is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use citaprotocol::CitaRequest;
use connection::Connection;
use libproto::*;
use libproto::communication::*;
use libproto::request::Request;
|
pub fn handle_rpc(con: &Connection, tx_pub: &Sender<(String, Vec<u8>)>, payload: &[u8]) {
if let Ok(msg) = parse_from_bytes::<communication::Message>(payload) {
let t = msg.get_field_type();
let cid = msg.get_cmd_id();
info!("-----cmd_id--------{:?}--------------", display_cmd(cid));
if cid == cmd_id(submodules::JSON_RPC, topics::REQUEST) && t == MsgType::REQUEST {
let mut ts = parse_from_bytes::<Request>(msg.get_content()).unwrap();
let mut response = request::Response::new();
response.set_request_id(ts.take_request_id());
if ts.has_peercount() {
let peercount = con.peers_pair.iter().filter(|x| x.2.as_ref().read().is_some()).count();
response.set_peercount(peercount as u32);
let ms: communication::Message = response.into();
tx_pub.send(("chain.rpc".to_string(), ms.write_to_bytes().unwrap())).unwrap();
}
}
}
}
pub fn is_need_proc(payload: &[u8]) -> (String, bool, communication::Message) {
if let Ok(msg) = parse_from_bytes::<communication::Message>(payload) {
let mut topic = String::default();
let mut is_proc = true;
let t = msg.get_field_type();
let cid = msg.get_cmd_id();
if cid == cmd_id(submodules::CONSENSUS, topics::NEW_TX) && t == MsgType::TX {
trace!("CONSENSUS broadcast tx");
topic = "net.tx".to_string();
} else if cid == cmd_id(submodules::CONSENSUS, topics::NEW_BLK) && t == MsgType::BLOCK {
info!("CONSENSUS pub blk");
topic = "net.blk".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::NEW_BLK) && t == MsgType::BLOCK {
info!("CHAIN pub blk");
topic = "net.blk".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::NEW_STATUS) && t == MsgType::STATUS {
info!("CHAIN pub status");
topic = "net.status".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::SYNC_BLK) && t == MsgType::MSG {
info!("CHAIN sync blk");
topic = "net.sync".to_string();
} else if (cid == cmd_id(submodules::CONSENSUS, topics::CONSENSUS_MSG) && t == MsgType::MSG) || (cid == cmd_id(submodules::CONSENSUS, topics::NEW_PROPOSAL) && t == MsgType::MSG) {
trace!("CONSENSUS pub msg");
topic = "net.msg".to_string();
} else {
is_proc = false;
}
return (topic, is_proc, msg);
}
("".to_string(), false, communication::Message::new())
}
pub fn net_msg_handler(payload: CitaRequest, mysender: &MySender) -> Result<Vec<u8>, io::Error> {
//info!("--------net_msg_handler-----------{:?}-", payload);
trace!("SERVER get msg: {:?}", payload);
if let (topic, true, msg) = is_need_proc(payload.as_ref()) {
info!("recive msg from origin = {:?}", msg.get_origin());
mysender.send((topic, payload))
}
Ok(vec![])
}
|
use protobuf::Message;
use protobuf::core::parse_from_bytes;
use server::MySender;
use std::io;
use std::sync::mpsc::Sender;
|
random_line_split
|
msghandle.rs
|
// CITA
// Copyright 2016-2017 Cryptape Technologies LLC.
// This program is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any
// later version.
// This program is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use citaprotocol::CitaRequest;
use connection::Connection;
use libproto::*;
use libproto::communication::*;
use libproto::request::Request;
use protobuf::Message;
use protobuf::core::parse_from_bytes;
use server::MySender;
use std::io;
use std::sync::mpsc::Sender;
pub fn handle_rpc(con: &Connection, tx_pub: &Sender<(String, Vec<u8>)>, payload: &[u8]) {
if let Ok(msg) = parse_from_bytes::<communication::Message>(payload) {
let t = msg.get_field_type();
let cid = msg.get_cmd_id();
info!("-----cmd_id--------{:?}--------------", display_cmd(cid));
if cid == cmd_id(submodules::JSON_RPC, topics::REQUEST) && t == MsgType::REQUEST {
let mut ts = parse_from_bytes::<Request>(msg.get_content()).unwrap();
let mut response = request::Response::new();
response.set_request_id(ts.take_request_id());
if ts.has_peercount() {
let peercount = con.peers_pair.iter().filter(|x| x.2.as_ref().read().is_some()).count();
response.set_peercount(peercount as u32);
let ms: communication::Message = response.into();
tx_pub.send(("chain.rpc".to_string(), ms.write_to_bytes().unwrap())).unwrap();
}
}
}
}
pub fn is_need_proc(payload: &[u8]) -> (String, bool, communication::Message)
|
topic = "net.sync".to_string();
} else if (cid == cmd_id(submodules::CONSENSUS, topics::CONSENSUS_MSG) && t == MsgType::MSG) || (cid == cmd_id(submodules::CONSENSUS, topics::NEW_PROPOSAL) && t == MsgType::MSG) {
trace!("CONSENSUS pub msg");
topic = "net.msg".to_string();
} else {
is_proc = false;
}
return (topic, is_proc, msg);
}
("".to_string(), false, communication::Message::new())
}
pub fn net_msg_handler(payload: CitaRequest, mysender: &MySender) -> Result<Vec<u8>, io::Error> {
//info!("--------net_msg_handler-----------{:?}-", payload);
trace!("SERVER get msg: {:?}", payload);
if let (topic, true, msg) = is_need_proc(payload.as_ref()) {
info!("recive msg from origin = {:?}", msg.get_origin());
mysender.send((topic, payload))
}
Ok(vec![])
}
|
{
if let Ok(msg) = parse_from_bytes::<communication::Message>(payload) {
let mut topic = String::default();
let mut is_proc = true;
let t = msg.get_field_type();
let cid = msg.get_cmd_id();
if cid == cmd_id(submodules::CONSENSUS, topics::NEW_TX) && t == MsgType::TX {
trace!("CONSENSUS broadcast tx");
topic = "net.tx".to_string();
} else if cid == cmd_id(submodules::CONSENSUS, topics::NEW_BLK) && t == MsgType::BLOCK {
info!("CONSENSUS pub blk");
topic = "net.blk".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::NEW_BLK) && t == MsgType::BLOCK {
info!("CHAIN pub blk");
topic = "net.blk".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::NEW_STATUS) && t == MsgType::STATUS {
info!("CHAIN pub status");
topic = "net.status".to_string();
} else if cid == cmd_id(submodules::CHAIN, topics::SYNC_BLK) && t == MsgType::MSG {
info!("CHAIN sync blk");
|
identifier_body
|
str.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! String utils for attributes and similar stuff.
#![deny(missing_docs)]
use num_traits::ToPrimitive;
use std::borrow::Cow;
use std::convert::AsRef;
use std::fmt::{self, Write};
use std::iter::{Filter, Peekable};
use std::str::Split;
/// A static slice of characters.
pub type StaticCharVec = &'static [char];
/// A static slice of `str`s.
pub type StaticStringVec = &'static [&'static str];
/// A "space character" according to:
///
/// <https://html.spec.whatwg.org/multipage/#space-character>
pub static HTML_SPACE_CHARACTERS: StaticCharVec =
&['\u{0020}', '\u{0009}', '\u{000a}', '\u{000c}', '\u{000d}'];
/// Whether a character is a HTML whitespace character.
#[inline]
pub fn char_is_whitespace(c: char) -> bool {
HTML_SPACE_CHARACTERS.contains(&c)
}
/// Whether all the string is HTML whitespace.
#[inline]
pub fn is_whitespace(s: &str) -> bool {
s.chars().all(char_is_whitespace)
}
#[inline]
fn not_empty(&split: &&str) -> bool {
!split.is_empty()
}
/// Split a string on HTML whitespace.
#[inline]
pub fn split_html_space_chars<'a>(
s: &'a str,
) -> Filter<Split<'a, StaticCharVec>, fn(&&str) -> bool> {
s.split(HTML_SPACE_CHARACTERS)
.filter(not_empty as fn(&&str) -> bool)
}
/// Split a string on commas.
#[inline]
pub fn split_commas<'a>(s: &'a str) -> Filter<Split<'a, char>, fn(&&str) -> bool> {
s.split(',').filter(not_empty as fn(&&str) -> bool)
}
/// Character is ascii digit
pub fn is_ascii_digit(c: &char) -> bool {
match *c {
'0'...'9' => true,
_ => false,
}
}
fn is_decimal_point(c: char) -> bool {
c == '.'
}
fn is_exponent_char(c: char) -> bool {
match c {
'e' | 'E' => true,
_ => false,
}
}
/// Read a set of ascii digits and read them into a number.
pub fn read_numbers<I: Iterator<Item = char>>(mut iter: Peekable<I>) -> (Option<i64>, usize) {
match iter.peek() {
Some(c) if is_ascii_digit(c) => (),
_ => return (None, 0),
}
iter.take_while(is_ascii_digit)
.map(|d| d as i64 - '0' as i64)
.fold((Some(0i64), 0), |accumulator, d| {
let digits = accumulator
.0
.and_then(|accumulator| accumulator.checked_mul(10))
.and_then(|accumulator| accumulator.checked_add(d));
(digits, accumulator.1 + 1)
})
}
/// Read a decimal fraction.
pub fn read_fraction<I: Iterator<Item = char>>(
mut iter: Peekable<I>,
mut divisor: f64,
value: f64,
) -> (f64, usize) {
match iter.peek() {
Some(c) if is_decimal_point(*c) => (),
_ => return (value, 0),
}
iter.next();
iter.take_while(is_ascii_digit)
.map(|d| d as i64 - '0' as i64)
.fold((value, 1), |accumulator, d| {
divisor *= 10f64;
(accumulator.0 + d as f64 / divisor, accumulator.1 + 1)
})
}
/// Reads an exponent from an iterator over chars, for example `e100`.
pub fn read_exponent<I: Iterator<Item = char>>(mut iter: Peekable<I>) -> Option<i32> {
match iter.peek() {
Some(c) if is_exponent_char(*c) => (),
_ => return None,
}
iter.next();
match iter.peek() {
None => None,
Some(&'-') => {
iter.next();
read_numbers(iter).0.map(|exp| -exp.to_i32().unwrap_or(0))
},
Some(&'+') => {
iter.next();
read_numbers(iter).0.map(|exp| exp.to_i32().unwrap_or(0))
},
Some(_) => read_numbers(iter).0.map(|exp| exp.to_i32().unwrap_or(0)),
}
}
/// Join a set of strings with a given delimiter `join`.
pub fn str_join<I, T>(strs: I, join: &str) -> String
where
I: IntoIterator<Item = T>,
T: AsRef<str>,
{
strs.into_iter()
.enumerate()
.fold(String::new(), |mut acc, (i, s)| {
if i > 0 {
acc.push_str(join);
}
acc.push_str(s.as_ref());
acc
})
}
/// Returns true if a given string has a given prefix with case-insensitive match.
pub fn starts_with_ignore_ascii_case(string: &str, prefix: &str) -> bool {
string.len() >= prefix.len() &&
string.as_bytes()[0..prefix.len()].eq_ignore_ascii_case(prefix.as_bytes())
}
/// Returns an ascii lowercase version of a string, only allocating if needed.
pub fn string_as_ascii_lowercase<'a>(input: &'a str) -> Cow<'a, str> {
if input.bytes().any(|c| matches!(c, b'A'...b'Z')) {
input.to_ascii_lowercase().into()
} else {
// Already ascii lowercase.
Cow::Borrowed(input)
}
}
/// To avoid accidentally instantiating multiple monomorphizations of large
/// serialization routines, we define explicit concrete types and require
/// them in those routines. This primarily avoids accidental mixing of UTF8
/// with UTF16 serializations in Gecko.
#[cfg(feature = "gecko")]
pub type CssStringWriter = ::nsstring::nsAString;
/// String type that coerces to CssStringWriter, used when serialization code
/// needs to allocate a temporary string.
#[cfg(feature = "gecko")]
pub type CssString = ::nsstring::nsString;
/// Certain serialization code needs to interact with borrowed strings, which
/// are sometimes native UTF8 Rust strings, and other times serialized UTF16
/// strings. This enum multiplexes the two cases.
#[cfg(feature = "gecko")]
pub enum CssStringBorrow<'a> {
/// A borrow of a UTF16 CssString.
UTF16(&'a ::nsstring::nsString),
/// A borrow of a regular Rust UTF8 string.
UTF8(&'a str),
}
#[cfg(feature = "gecko")]
impl<'a> CssStringBorrow<'a> {
/// Writes the borrowed string to the provided writer.
pub fn append_to(&self, dest: &mut CssStringWriter) -> fmt::Result {
match *self {
CssStringBorrow::UTF16(s) => {
dest.append(s);
Ok(())
},
CssStringBorrow::UTF8(s) => dest.write_str(s),
}
}
/// Returns true of the borrowed string is empty.
pub fn is_empty(&self) -> bool
|
}
#[cfg(feature = "gecko")]
impl<'a> From<&'a str> for CssStringBorrow<'a> {
fn from(s: &'a str) -> Self {
CssStringBorrow::UTF8(s)
}
}
#[cfg(feature = "gecko")]
impl<'a> From<&'a ::nsstring::nsString> for CssStringBorrow<'a> {
fn from(s: &'a ::nsstring::nsString) -> Self {
CssStringBorrow::UTF16(s)
}
}
/// String. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub type CssStringWriter = String;
/// String. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub type CssString = String;
/// Borrowed string. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub struct CssStringBorrow<'a>(&'a str);
#[cfg(feature = "servo")]
impl<'a> CssStringBorrow<'a> {
/// Appends the borrowed string to the given string.
pub fn append_to(&self, dest: &mut CssStringWriter) -> fmt::Result {
dest.write_str(self.0)
}
/// Returns true if the borrowed string is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
#[cfg(feature = "servo")]
impl<'a> From<&'a str> for CssStringBorrow<'a> {
fn from(s: &'a str) -> Self {
CssStringBorrow(s)
}
}
#[cfg(feature = "servo")]
impl<'a> From<&'a String> for CssStringBorrow<'a> {
fn from(s: &'a String) -> Self {
CssStringBorrow(&*s)
}
}
|
{
match *self {
CssStringBorrow::UTF16(s) => s.is_empty(),
CssStringBorrow::UTF8(s) => s.is_empty(),
}
}
|
identifier_body
|
str.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! String utils for attributes and similar stuff.
#![deny(missing_docs)]
use num_traits::ToPrimitive;
use std::borrow::Cow;
use std::convert::AsRef;
use std::fmt::{self, Write};
use std::iter::{Filter, Peekable};
use std::str::Split;
/// A static slice of characters.
pub type StaticCharVec = &'static [char];
/// A static slice of `str`s.
pub type StaticStringVec = &'static [&'static str];
/// A "space character" according to:
///
/// <https://html.spec.whatwg.org/multipage/#space-character>
pub static HTML_SPACE_CHARACTERS: StaticCharVec =
&['\u{0020}', '\u{0009}', '\u{000a}', '\u{000c}', '\u{000d}'];
/// Whether a character is a HTML whitespace character.
#[inline]
pub fn char_is_whitespace(c: char) -> bool {
HTML_SPACE_CHARACTERS.contains(&c)
}
/// Whether all the string is HTML whitespace.
#[inline]
pub fn is_whitespace(s: &str) -> bool {
s.chars().all(char_is_whitespace)
}
#[inline]
fn not_empty(&split: &&str) -> bool {
!split.is_empty()
}
/// Split a string on HTML whitespace.
#[inline]
pub fn split_html_space_chars<'a>(
s: &'a str,
) -> Filter<Split<'a, StaticCharVec>, fn(&&str) -> bool> {
s.split(HTML_SPACE_CHARACTERS)
.filter(not_empty as fn(&&str) -> bool)
}
/// Split a string on commas.
#[inline]
pub fn split_commas<'a>(s: &'a str) -> Filter<Split<'a, char>, fn(&&str) -> bool> {
s.split(',').filter(not_empty as fn(&&str) -> bool)
}
/// Character is ascii digit
pub fn is_ascii_digit(c: &char) -> bool {
match *c {
'0'...'9' => true,
_ => false,
}
}
fn is_decimal_point(c: char) -> bool {
c == '.'
}
fn is_exponent_char(c: char) -> bool {
match c {
'e' | 'E' => true,
_ => false,
}
}
/// Read a set of ascii digits and read them into a number.
pub fn read_numbers<I: Iterator<Item = char>>(mut iter: Peekable<I>) -> (Option<i64>, usize) {
match iter.peek() {
Some(c) if is_ascii_digit(c) => (),
_ => return (None, 0),
}
iter.take_while(is_ascii_digit)
.map(|d| d as i64 - '0' as i64)
.fold((Some(0i64), 0), |accumulator, d| {
let digits = accumulator
.0
.and_then(|accumulator| accumulator.checked_mul(10))
.and_then(|accumulator| accumulator.checked_add(d));
(digits, accumulator.1 + 1)
})
}
/// Read a decimal fraction.
pub fn read_fraction<I: Iterator<Item = char>>(
mut iter: Peekable<I>,
mut divisor: f64,
value: f64,
) -> (f64, usize) {
match iter.peek() {
Some(c) if is_decimal_point(*c) => (),
_ => return (value, 0),
}
iter.next();
iter.take_while(is_ascii_digit)
.map(|d| d as i64 - '0' as i64)
.fold((value, 1), |accumulator, d| {
divisor *= 10f64;
(accumulator.0 + d as f64 / divisor, accumulator.1 + 1)
})
}
/// Reads an exponent from an iterator over chars, for example `e100`.
pub fn read_exponent<I: Iterator<Item = char>>(mut iter: Peekable<I>) -> Option<i32> {
match iter.peek() {
Some(c) if is_exponent_char(*c) => (),
_ => return None,
}
iter.next();
match iter.peek() {
None => None,
Some(&'-') => {
iter.next();
read_numbers(iter).0.map(|exp| -exp.to_i32().unwrap_or(0))
},
Some(&'+') => {
iter.next();
read_numbers(iter).0.map(|exp| exp.to_i32().unwrap_or(0))
},
Some(_) => read_numbers(iter).0.map(|exp| exp.to_i32().unwrap_or(0)),
}
}
/// Join a set of strings with a given delimiter `join`.
pub fn
|
<I, T>(strs: I, join: &str) -> String
where
I: IntoIterator<Item = T>,
T: AsRef<str>,
{
strs.into_iter()
.enumerate()
.fold(String::new(), |mut acc, (i, s)| {
if i > 0 {
acc.push_str(join);
}
acc.push_str(s.as_ref());
acc
})
}
/// Returns true if a given string has a given prefix with case-insensitive match.
pub fn starts_with_ignore_ascii_case(string: &str, prefix: &str) -> bool {
string.len() >= prefix.len() &&
string.as_bytes()[0..prefix.len()].eq_ignore_ascii_case(prefix.as_bytes())
}
/// Returns an ascii lowercase version of a string, only allocating if needed.
pub fn string_as_ascii_lowercase<'a>(input: &'a str) -> Cow<'a, str> {
if input.bytes().any(|c| matches!(c, b'A'...b'Z')) {
input.to_ascii_lowercase().into()
} else {
// Already ascii lowercase.
Cow::Borrowed(input)
}
}
/// To avoid accidentally instantiating multiple monomorphizations of large
/// serialization routines, we define explicit concrete types and require
/// them in those routines. This primarily avoids accidental mixing of UTF8
/// with UTF16 serializations in Gecko.
#[cfg(feature = "gecko")]
pub type CssStringWriter = ::nsstring::nsAString;
/// String type that coerces to CssStringWriter, used when serialization code
/// needs to allocate a temporary string.
#[cfg(feature = "gecko")]
pub type CssString = ::nsstring::nsString;
/// Certain serialization code needs to interact with borrowed strings, which
/// are sometimes native UTF8 Rust strings, and other times serialized UTF16
/// strings. This enum multiplexes the two cases.
#[cfg(feature = "gecko")]
pub enum CssStringBorrow<'a> {
/// A borrow of a UTF16 CssString.
UTF16(&'a ::nsstring::nsString),
/// A borrow of a regular Rust UTF8 string.
UTF8(&'a str),
}
#[cfg(feature = "gecko")]
impl<'a> CssStringBorrow<'a> {
/// Writes the borrowed string to the provided writer.
pub fn append_to(&self, dest: &mut CssStringWriter) -> fmt::Result {
match *self {
CssStringBorrow::UTF16(s) => {
dest.append(s);
Ok(())
},
CssStringBorrow::UTF8(s) => dest.write_str(s),
}
}
/// Returns true of the borrowed string is empty.
pub fn is_empty(&self) -> bool {
match *self {
CssStringBorrow::UTF16(s) => s.is_empty(),
CssStringBorrow::UTF8(s) => s.is_empty(),
}
}
}
#[cfg(feature = "gecko")]
impl<'a> From<&'a str> for CssStringBorrow<'a> {
fn from(s: &'a str) -> Self {
CssStringBorrow::UTF8(s)
}
}
#[cfg(feature = "gecko")]
impl<'a> From<&'a ::nsstring::nsString> for CssStringBorrow<'a> {
fn from(s: &'a ::nsstring::nsString) -> Self {
CssStringBorrow::UTF16(s)
}
}
/// String. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub type CssStringWriter = String;
/// String. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub type CssString = String;
/// Borrowed string. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub struct CssStringBorrow<'a>(&'a str);
#[cfg(feature = "servo")]
impl<'a> CssStringBorrow<'a> {
/// Appends the borrowed string to the given string.
pub fn append_to(&self, dest: &mut CssStringWriter) -> fmt::Result {
dest.write_str(self.0)
}
/// Returns true if the borrowed string is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
#[cfg(feature = "servo")]
impl<'a> From<&'a str> for CssStringBorrow<'a> {
fn from(s: &'a str) -> Self {
CssStringBorrow(s)
}
}
#[cfg(feature = "servo")]
impl<'a> From<&'a String> for CssStringBorrow<'a> {
fn from(s: &'a String) -> Self {
CssStringBorrow(&*s)
}
}
|
str_join
|
identifier_name
|
str.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! String utils for attributes and similar stuff.
#![deny(missing_docs)]
use num_traits::ToPrimitive;
use std::borrow::Cow;
use std::convert::AsRef;
use std::fmt::{self, Write};
use std::iter::{Filter, Peekable};
use std::str::Split;
/// A static slice of characters.
pub type StaticCharVec = &'static [char];
/// A static slice of `str`s.
pub type StaticStringVec = &'static [&'static str];
/// A "space character" according to:
///
/// <https://html.spec.whatwg.org/multipage/#space-character>
pub static HTML_SPACE_CHARACTERS: StaticCharVec =
&['\u{0020}', '\u{0009}', '\u{000a}', '\u{000c}', '\u{000d}'];
/// Whether a character is a HTML whitespace character.
#[inline]
pub fn char_is_whitespace(c: char) -> bool {
HTML_SPACE_CHARACTERS.contains(&c)
}
/// Whether all the string is HTML whitespace.
#[inline]
pub fn is_whitespace(s: &str) -> bool {
s.chars().all(char_is_whitespace)
}
#[inline]
fn not_empty(&split: &&str) -> bool {
!split.is_empty()
}
/// Split a string on HTML whitespace.
#[inline]
pub fn split_html_space_chars<'a>(
s: &'a str,
) -> Filter<Split<'a, StaticCharVec>, fn(&&str) -> bool> {
s.split(HTML_SPACE_CHARACTERS)
.filter(not_empty as fn(&&str) -> bool)
}
/// Split a string on commas.
#[inline]
pub fn split_commas<'a>(s: &'a str) -> Filter<Split<'a, char>, fn(&&str) -> bool> {
s.split(',').filter(not_empty as fn(&&str) -> bool)
}
/// Character is ascii digit
pub fn is_ascii_digit(c: &char) -> bool {
match *c {
'0'...'9' => true,
_ => false,
}
}
fn is_decimal_point(c: char) -> bool {
c == '.'
}
fn is_exponent_char(c: char) -> bool {
match c {
'e' | 'E' => true,
_ => false,
}
}
/// Read a set of ascii digits and read them into a number.
pub fn read_numbers<I: Iterator<Item = char>>(mut iter: Peekable<I>) -> (Option<i64>, usize) {
match iter.peek() {
Some(c) if is_ascii_digit(c) => (),
_ => return (None, 0),
}
iter.take_while(is_ascii_digit)
.map(|d| d as i64 - '0' as i64)
.fold((Some(0i64), 0), |accumulator, d| {
let digits = accumulator
.0
.and_then(|accumulator| accumulator.checked_mul(10))
.and_then(|accumulator| accumulator.checked_add(d));
(digits, accumulator.1 + 1)
})
}
/// Read a decimal fraction.
pub fn read_fraction<I: Iterator<Item = char>>(
mut iter: Peekable<I>,
mut divisor: f64,
value: f64,
) -> (f64, usize) {
match iter.peek() {
Some(c) if is_decimal_point(*c) => (),
_ => return (value, 0),
}
iter.next();
iter.take_while(is_ascii_digit)
.map(|d| d as i64 - '0' as i64)
.fold((value, 1), |accumulator, d| {
divisor *= 10f64;
(accumulator.0 + d as f64 / divisor, accumulator.1 + 1)
})
}
/// Reads an exponent from an iterator over chars, for example `e100`.
pub fn read_exponent<I: Iterator<Item = char>>(mut iter: Peekable<I>) -> Option<i32> {
match iter.peek() {
Some(c) if is_exponent_char(*c) => (),
_ => return None,
}
iter.next();
match iter.peek() {
None => None,
Some(&'-') => {
iter.next();
read_numbers(iter).0.map(|exp| -exp.to_i32().unwrap_or(0))
},
Some(&'+') => {
iter.next();
read_numbers(iter).0.map(|exp| exp.to_i32().unwrap_or(0))
},
Some(_) => read_numbers(iter).0.map(|exp| exp.to_i32().unwrap_or(0)),
}
}
/// Join a set of strings with a given delimiter `join`.
pub fn str_join<I, T>(strs: I, join: &str) -> String
where
I: IntoIterator<Item = T>,
T: AsRef<str>,
{
strs.into_iter()
.enumerate()
.fold(String::new(), |mut acc, (i, s)| {
if i > 0 {
acc.push_str(join);
}
acc.push_str(s.as_ref());
acc
})
}
/// Returns true if a given string has a given prefix with case-insensitive match.
pub fn starts_with_ignore_ascii_case(string: &str, prefix: &str) -> bool {
string.len() >= prefix.len() &&
string.as_bytes()[0..prefix.len()].eq_ignore_ascii_case(prefix.as_bytes())
}
/// Returns an ascii lowercase version of a string, only allocating if needed.
pub fn string_as_ascii_lowercase<'a>(input: &'a str) -> Cow<'a, str> {
if input.bytes().any(|c| matches!(c, b'A'...b'Z')) {
input.to_ascii_lowercase().into()
} else {
// Already ascii lowercase.
Cow::Borrowed(input)
}
}
/// To avoid accidentally instantiating multiple monomorphizations of large
/// serialization routines, we define explicit concrete types and require
/// them in those routines. This primarily avoids accidental mixing of UTF8
/// with UTF16 serializations in Gecko.
#[cfg(feature = "gecko")]
pub type CssStringWriter = ::nsstring::nsAString;
|
/// String type that coerces to CssStringWriter, used when serialization code
/// needs to allocate a temporary string.
#[cfg(feature = "gecko")]
pub type CssString = ::nsstring::nsString;
/// Certain serialization code needs to interact with borrowed strings, which
/// are sometimes native UTF8 Rust strings, and other times serialized UTF16
/// strings. This enum multiplexes the two cases.
#[cfg(feature = "gecko")]
pub enum CssStringBorrow<'a> {
/// A borrow of a UTF16 CssString.
UTF16(&'a ::nsstring::nsString),
/// A borrow of a regular Rust UTF8 string.
UTF8(&'a str),
}
#[cfg(feature = "gecko")]
impl<'a> CssStringBorrow<'a> {
/// Writes the borrowed string to the provided writer.
pub fn append_to(&self, dest: &mut CssStringWriter) -> fmt::Result {
match *self {
CssStringBorrow::UTF16(s) => {
dest.append(s);
Ok(())
},
CssStringBorrow::UTF8(s) => dest.write_str(s),
}
}
/// Returns true of the borrowed string is empty.
pub fn is_empty(&self) -> bool {
match *self {
CssStringBorrow::UTF16(s) => s.is_empty(),
CssStringBorrow::UTF8(s) => s.is_empty(),
}
}
}
#[cfg(feature = "gecko")]
impl<'a> From<&'a str> for CssStringBorrow<'a> {
fn from(s: &'a str) -> Self {
CssStringBorrow::UTF8(s)
}
}
#[cfg(feature = "gecko")]
impl<'a> From<&'a ::nsstring::nsString> for CssStringBorrow<'a> {
fn from(s: &'a ::nsstring::nsString) -> Self {
CssStringBorrow::UTF16(s)
}
}
/// String. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub type CssStringWriter = String;
/// String. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub type CssString = String;
/// Borrowed string. The comments for the Gecko types explain the need for this abstraction.
#[cfg(feature = "servo")]
pub struct CssStringBorrow<'a>(&'a str);
#[cfg(feature = "servo")]
impl<'a> CssStringBorrow<'a> {
/// Appends the borrowed string to the given string.
pub fn append_to(&self, dest: &mut CssStringWriter) -> fmt::Result {
dest.write_str(self.0)
}
/// Returns true if the borrowed string is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
#[cfg(feature = "servo")]
impl<'a> From<&'a str> for CssStringBorrow<'a> {
fn from(s: &'a str) -> Self {
CssStringBorrow(s)
}
}
#[cfg(feature = "servo")]
impl<'a> From<&'a String> for CssStringBorrow<'a> {
fn from(s: &'a String) -> Self {
CssStringBorrow(&*s)
}
}
|
random_line_split
|
|
coherence-blanket-conflicts-with-blanket-implemented.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::Show;
use std::default::Default;
// Test that two blanket impls conflict (at least without negative
// bounds). After all, some other crate could implement Even or Odd
// for the same type (though this crate doesn't).
trait MyTrait {
fn get(&self) -> usize;
}
trait Even { }
trait Odd { }
impl Even for isize { }
impl Odd for usize { }
impl<T:Even> MyTrait for T { //~ ERROR E0119
fn get(&self) -> usize { 0 }
}
impl<T:Odd> MyTrait for T {
fn get(&self) -> usize { 0 }
}
|
fn main() { }
|
random_line_split
|
|
coherence-blanket-conflicts-with-blanket-implemented.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::Show;
use std::default::Default;
// Test that two blanket impls conflict (at least without negative
// bounds). After all, some other crate could implement Even or Odd
// for the same type (though this crate doesn't).
trait MyTrait {
fn get(&self) -> usize;
}
trait Even { }
trait Odd { }
impl Even for isize { }
impl Odd for usize { }
impl<T:Even> MyTrait for T { //~ ERROR E0119
fn
|
(&self) -> usize { 0 }
}
impl<T:Odd> MyTrait for T {
fn get(&self) -> usize { 0 }
}
fn main() { }
|
get
|
identifier_name
|
coherence-blanket-conflicts-with-blanket-implemented.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::Show;
use std::default::Default;
// Test that two blanket impls conflict (at least without negative
// bounds). After all, some other crate could implement Even or Odd
// for the same type (though this crate doesn't).
trait MyTrait {
fn get(&self) -> usize;
}
trait Even { }
trait Odd { }
impl Even for isize { }
impl Odd for usize { }
impl<T:Even> MyTrait for T { //~ ERROR E0119
fn get(&self) -> usize { 0 }
}
impl<T:Odd> MyTrait for T {
fn get(&self) -> usize
|
}
fn main() { }
|
{ 0 }
|
identifier_body
|
lib.rs
|
extern crate regex;
use regex::Regex;
struct ParsedTimeStr {
finded_time: String,
finded_type: String,
}
pub struct TimeStr {
time_string: String,
}
impl TimeStr {
pub fn new(t_str: &str) -> TimeStr {
TimeStr {
time_string: t_str.to_string(),
}
}
fn parse_str_time(&self) -> ParsedTimeStr {
let re = Regex::new(r"(\d+)(h|m|s)").unwrap();
let caps = re.captures(&self.time_string[..]).unwrap();
let mut f_time = "";
let mut f_type = "";
match caps.at(1) {
Some(time) => f_time = time,
None => panic!("Bad format"),
}
match caps.at(2) {
Some(t_type) => f_type = t_type,
None => panic!("Bad format"),
}
ParsedTimeStr {
finded_time: f_time.to_string(),
finded_type: f_type.to_string(),
}
}
pub fn to_seconds(&self) -> u32 {
let parsed_string = self.parse_str_time();
let i_time: u32 = parsed_string.finded_time.parse::<u32>().unwrap();
if parsed_string.finded_type == "h" {
return i_time * 60 * 60;
}
if parsed_string.finded_type == "m" {
return i_time * 60;
}
return i_time;
}
}
#[test]
fn test_hours() {
let p1 = TimeStr::new("1h");
assert_eq!(3600, p1.to_seconds());
let p2 = TimeStr::new("10h");
assert_eq!(36000, p2.to_seconds());
let p3 = TimeStr::new("5h");
assert_eq!(18000, p3.to_seconds());
}
#[test]
fn
|
() {
let p1 = TimeStr::new("1m");
assert_eq!(60, p1.to_seconds());
let p2 = TimeStr::new("10m");
assert_eq!(600, p2.to_seconds());
let p3 = TimeStr::new("5m");
assert_eq!(300, p3.to_seconds());
}
#[test]
fn test_seconds() {
let p1 = TimeStr::new("1s");
assert_eq!(1, p1.to_seconds());
let p2 = TimeStr::new("10s");
assert_eq!(10, p2.to_seconds());
let p3 = TimeStr::new("5s");
assert_eq!(5, p3.to_seconds());
}
#[test]
#[should_panic]
fn test_bad_format() {
let p1 = TimeStr::new("100a");
p1.to_seconds();
}
|
test_minutes
|
identifier_name
|
lib.rs
|
extern crate regex;
use regex::Regex;
struct ParsedTimeStr {
finded_time: String,
finded_type: String,
}
pub struct TimeStr {
time_string: String,
}
impl TimeStr {
pub fn new(t_str: &str) -> TimeStr {
TimeStr {
time_string: t_str.to_string(),
}
}
fn parse_str_time(&self) -> ParsedTimeStr {
let re = Regex::new(r"(\d+)(h|m|s)").unwrap();
let caps = re.captures(&self.time_string[..]).unwrap();
let mut f_time = "";
let mut f_type = "";
match caps.at(1) {
Some(time) => f_time = time,
None => panic!("Bad format"),
}
match caps.at(2) {
Some(t_type) => f_type = t_type,
None => panic!("Bad format"),
}
ParsedTimeStr {
finded_time: f_time.to_string(),
finded_type: f_type.to_string(),
}
}
pub fn to_seconds(&self) -> u32 {
let parsed_string = self.parse_str_time();
let i_time: u32 = parsed_string.finded_time.parse::<u32>().unwrap();
if parsed_string.finded_type == "h" {
return i_time * 60 * 60;
}
if parsed_string.finded_type == "m" {
return i_time * 60;
}
return i_time;
}
}
#[test]
fn test_hours() {
let p1 = TimeStr::new("1h");
assert_eq!(3600, p1.to_seconds());
let p2 = TimeStr::new("10h");
assert_eq!(36000, p2.to_seconds());
let p3 = TimeStr::new("5h");
assert_eq!(18000, p3.to_seconds());
}
#[test]
fn test_minutes() {
let p1 = TimeStr::new("1m");
assert_eq!(60, p1.to_seconds());
|
assert_eq!(600, p2.to_seconds());
let p3 = TimeStr::new("5m");
assert_eq!(300, p3.to_seconds());
}
#[test]
fn test_seconds() {
let p1 = TimeStr::new("1s");
assert_eq!(1, p1.to_seconds());
let p2 = TimeStr::new("10s");
assert_eq!(10, p2.to_seconds());
let p3 = TimeStr::new("5s");
assert_eq!(5, p3.to_seconds());
}
#[test]
#[should_panic]
fn test_bad_format() {
let p1 = TimeStr::new("100a");
p1.to_seconds();
}
|
let p2 = TimeStr::new("10m");
|
random_line_split
|
lib.rs
|
extern crate regex;
use regex::Regex;
struct ParsedTimeStr {
finded_time: String,
finded_type: String,
}
pub struct TimeStr {
time_string: String,
}
impl TimeStr {
pub fn new(t_str: &str) -> TimeStr {
TimeStr {
time_string: t_str.to_string(),
}
}
fn parse_str_time(&self) -> ParsedTimeStr {
let re = Regex::new(r"(\d+)(h|m|s)").unwrap();
let caps = re.captures(&self.time_string[..]).unwrap();
let mut f_time = "";
let mut f_type = "";
match caps.at(1) {
Some(time) => f_time = time,
None => panic!("Bad format"),
}
match caps.at(2) {
Some(t_type) => f_type = t_type,
None => panic!("Bad format"),
}
ParsedTimeStr {
finded_time: f_time.to_string(),
finded_type: f_type.to_string(),
}
}
pub fn to_seconds(&self) -> u32 {
let parsed_string = self.parse_str_time();
let i_time: u32 = parsed_string.finded_time.parse::<u32>().unwrap();
if parsed_string.finded_type == "h"
|
if parsed_string.finded_type == "m" {
return i_time * 60;
}
return i_time;
}
}
#[test]
fn test_hours() {
let p1 = TimeStr::new("1h");
assert_eq!(3600, p1.to_seconds());
let p2 = TimeStr::new("10h");
assert_eq!(36000, p2.to_seconds());
let p3 = TimeStr::new("5h");
assert_eq!(18000, p3.to_seconds());
}
#[test]
fn test_minutes() {
let p1 = TimeStr::new("1m");
assert_eq!(60, p1.to_seconds());
let p2 = TimeStr::new("10m");
assert_eq!(600, p2.to_seconds());
let p3 = TimeStr::new("5m");
assert_eq!(300, p3.to_seconds());
}
#[test]
fn test_seconds() {
let p1 = TimeStr::new("1s");
assert_eq!(1, p1.to_seconds());
let p2 = TimeStr::new("10s");
assert_eq!(10, p2.to_seconds());
let p3 = TimeStr::new("5s");
assert_eq!(5, p3.to_seconds());
}
#[test]
#[should_panic]
fn test_bad_format() {
let p1 = TimeStr::new("100a");
p1.to_seconds();
}
|
{
return i_time * 60 * 60;
}
|
conditional_block
|
det.rs
|
// Std imports
use num::traits::{Float, Signed, One};
// local imports
use sralgebra::{CommutativeRingPartial};
use srmatrix::api::{Matrix};
use srmatrix::api::{Shape, Extraction, MatrixBuffer, Search};
use srmatrix::api::eo_traits::ECO;
use srmatrix::api::SRError;
#[doc="Returns the determinant of a matrix.
# Remarks
This is a naive implementation of determinant based on the
definition of determinant. See det_float for better implementation
for floating point matrices.
Usually determinants based on
elimination or factorization are much faster.
The determinant is defined only for square matrices.
The determinant of an empty matrix is 1.
See http://en.wikipedia.org/wiki/Matrix_(mathematics)#Empty_matrices.
"]
pub fn det<T:CommutativeRingPartial+Signed>(m : &Matrix<T>)->Result<T,SRError>{
if!m.is_square(){
return Err(SRError::IsNotSquareMatrix);
}
if m.is_empty(){
return Ok(One::one());
}
Ok(det_naive(m))
}
#[doc="Returns the determinant of a matrix of floating point numbers.
# Remarks
"]
pub fn det_float<T:CommutativeRingPartial+Float+Signed>(m : &Matrix<T>)->Result<T,SRError>{
if!m.is_square(){
return Err(SRError::IsNotSquareMatrix);
}
if m.is_empty(){
return Ok(One::one());
}
Ok(det_ge(&mut m.clone()))
}
/// Private implementation of determinant
/// Assumes that matrix is indeed square.
pub fn det_naive<T:CommutativeRingPartial>(m : &Matrix<T>)->T{
if m.is_empty(){
return One::one();
}
let a0 = m.get(0, 0).unwrap();
//debug!("m: {}", m);
if m.is_scalar(){
return a0;
}
let n = m.num_cols();
let mut m2 = m.sub_matrix(1,1, n-1, n-1);
let ps = m.as_ptr();
let pd = m2.as_mut_ptr();
let mut sign : T = One::one();
let mut result = sign * a0 * det_naive(&m2);
sign = -sign;
for c in 0..(n-1){
for r in 1..n{
//debug!("r : {}, c : {}", r, c);
let src_offset = m.cell_to_offset(r, c);
let dst_offset = m2.cell_to_offset(r - 1, c);
//debug_assert!(src_offset < m.capacity() as int);
//debug_assert!(dst_offset < m2.capacity() as int);
unsafe {
let v = *ps.offset(src_offset);
//debug!("v = {}", v);
*pd.offset(dst_offset) = v;
}
}
let ai = m.get(0, c+1).unwrap();
let ai_minor_det = det_naive(&m2);
//debug!("sign: {}, ai: {}, Ai : {}", sign, ai, ai_minor_det);
result = result + sign * ai * ai_minor_det;
sign = -sign;
}
result
}
#[doc="Computes determinant using Gaussian
elimination with partial pivoting
"]
pub fn det_ge<T:CommutativeRingPartial+Signed+Float+PartialOrd>(a : &mut Matrix<T>)->T{
assert!(a.is_square());
let o = One::one();
let mut result : T = o;
let n = a.num_cols();
// Iterate over rows
for k in 0..n{
// We are working on k-th row.
// Find the pivot position in the row
let (_, cc) = a.max_abs_scalar_in_row(k, k, n);
if cc > k {
// We need to exchange columns of the submatrix.
let mut l_tr = a.view(k, k, n - k, n - k);
l_tr.eco_switch(0, cc - k);
// The sign of determinant would change
// depending on whether the permutation is
// even or odd.
let diff = cc - k;
if (diff & 1)!= 0 {
// the gap in columns is odd.
// we should change the sign of determinant.
result = - result;
}
}
// The top right part of L matrix
let mut l_tr = a.view(k, k, n - k, n -k);
// Pick up the pivot
let pivot = l_tr.get(0, 0).unwrap();
if pivot.is_zero() {
// This is a singular matrix
return pivot;
}
// Update determinant
result = result * pivot;
// bring 1 in the diagonal
l_tr.eco_scale(0, o/pivot);
for c in 1..l_tr.num_cols(){
let first = l_tr.get(0, c).unwrap();
l_tr.eco_scale_add(c, 0, -first);
}
}
result
}
/******************************************************
*
* Unit tests
*
******************************************************/
#[cfg(test)]
mod test{
use super::*;
use srmatrix::api::*;
use matrix::mat_traits::*;
use testdata;
#[test]
fn test_det_0(){
let m = matrix_rw_f64(2,2, &[
1., 2.,
3., 4.]);
let d = det_naive(&m);
assert_eq!(d, -2.);
let d = det_ge(&mut m.clone());
assert_eq!(d, -2.);
}
#[test]
fn test_det_1(){
let m = matrix_rw_f64(3, 3, &[1., 2., 3.,
4., 5., 6.,
7., 8., 9.]);
let d = det_naive(&m);
assert_eq!(d, 0.);
let d = det_ge(&mut m.clone());
assert_eq!(d, 0.);
}
#[test]
fn test_det_hadamard(){
let m = hadamard(4).unwrap();
let d = det_naive(&m);
assert_eq!(d, 16.0);
let d = det_ge(&mut m.clone());
assert_eq!(d, 16.0);
let m = hadamard(8).unwrap();
let d = det_naive(&m);
assert_eq!(d, 4096.0);
let d = det_ge(&mut m.clone());
assert_eq!(d, 4096.0);
}
#[test]
fn test_det_hilbert()
|
#[test]
fn test_empty_mat_det(){
let m : MatrixI64 = Matrix::new_uninitialized(0, 0);
let d = m.det().unwrap();
assert_eq!(d, 1);
}
#[test]
fn test_examples_from_testdata(){
assert_eq!(testdata::matrix::square_0().det().unwrap(), -13.);
assert_eq!(testdata::matrix::square_1().det().unwrap(), 6.);
}
}
/******************************************************
*
* Benchmarks
*
******************************************************/
#[cfg(test)]
mod bench{
// extern crate test;
// use self::test::Bencher;
// use super::*;
// use matrix::constructors::*;
// #[bench]
// fn bench_det_naive_hadamard_8 (b: &mut Bencher){
// let a = hadamard(8).unwrap();
// b.iter(|| {
// det_naive(&a);
// });
// }
// #[bench]
// fn bench_det_ge_hadamard_8 (b: &mut Bencher){
// let a = hadamard(8).unwrap();
// b.iter(|| {
// det_ge(&mut a.clone());
// });
// }
// #[bench]
// #[ignore]
// fn bench_det_naive_hadamard_16 (b: &mut Bencher){
// let a = hadamard(16).unwrap();
// b.iter(|| {
// det_naive(&a);
// });
// }
}
|
{
let sizes = vec![1, 2, 4, 8];
let determinants = vec![1.0, 0.083333333333333,
1.653439153439264e-07, 2.737050310006999e-33];
let threshold = 1e-10;
for (size, expected_value) in sizes.iter().zip(determinants.iter()){
let m = hilbert(*size);
let d = det_naive(&m);
assert!((d - *expected_value).abs() < threshold);
let d = det_ge(&mut m.clone());
assert!((d - *expected_value).abs() < threshold);
}
}
|
identifier_body
|
det.rs
|
// Std imports
use num::traits::{Float, Signed, One};
// local imports
use sralgebra::{CommutativeRingPartial};
use srmatrix::api::{Matrix};
use srmatrix::api::{Shape, Extraction, MatrixBuffer, Search};
use srmatrix::api::eo_traits::ECO;
use srmatrix::api::SRError;
#[doc="Returns the determinant of a matrix.
# Remarks
This is a naive implementation of determinant based on the
definition of determinant. See det_float for better implementation
for floating point matrices.
Usually determinants based on
elimination or factorization are much faster.
The determinant is defined only for square matrices.
The determinant of an empty matrix is 1.
See http://en.wikipedia.org/wiki/Matrix_(mathematics)#Empty_matrices.
"]
pub fn det<T:CommutativeRingPartial+Signed>(m : &Matrix<T>)->Result<T,SRError>{
if!m.is_square(){
return Err(SRError::IsNotSquareMatrix);
}
if m.is_empty(){
return Ok(One::one());
}
Ok(det_naive(m))
}
#[doc="Returns the determinant of a matrix of floating point numbers.
# Remarks
"]
pub fn det_float<T:CommutativeRingPartial+Float+Signed>(m : &Matrix<T>)->Result<T,SRError>{
if!m.is_square(){
return Err(SRError::IsNotSquareMatrix);
}
if m.is_empty(){
return Ok(One::one());
}
Ok(det_ge(&mut m.clone()))
}
/// Private implementation of determinant
/// Assumes that matrix is indeed square.
pub fn det_naive<T:CommutativeRingPartial>(m : &Matrix<T>)->T{
if m.is_empty(){
return One::one();
}
let a0 = m.get(0, 0).unwrap();
//debug!("m: {}", m);
if m.is_scalar(){
return a0;
}
let n = m.num_cols();
let mut m2 = m.sub_matrix(1,1, n-1, n-1);
let ps = m.as_ptr();
let pd = m2.as_mut_ptr();
let mut sign : T = One::one();
let mut result = sign * a0 * det_naive(&m2);
sign = -sign;
for c in 0..(n-1){
for r in 1..n{
//debug!("r : {}, c : {}", r, c);
let src_offset = m.cell_to_offset(r, c);
let dst_offset = m2.cell_to_offset(r - 1, c);
//debug_assert!(src_offset < m.capacity() as int);
//debug_assert!(dst_offset < m2.capacity() as int);
unsafe {
let v = *ps.offset(src_offset);
//debug!("v = {}", v);
*pd.offset(dst_offset) = v;
}
}
let ai = m.get(0, c+1).unwrap();
let ai_minor_det = det_naive(&m2);
//debug!("sign: {}, ai: {}, Ai : {}", sign, ai, ai_minor_det);
result = result + sign * ai * ai_minor_det;
sign = -sign;
}
result
}
#[doc="Computes determinant using Gaussian
elimination with partial pivoting
"]
pub fn
|
<T:CommutativeRingPartial+Signed+Float+PartialOrd>(a : &mut Matrix<T>)->T{
assert!(a.is_square());
let o = One::one();
let mut result : T = o;
let n = a.num_cols();
// Iterate over rows
for k in 0..n{
// We are working on k-th row.
// Find the pivot position in the row
let (_, cc) = a.max_abs_scalar_in_row(k, k, n);
if cc > k {
// We need to exchange columns of the submatrix.
let mut l_tr = a.view(k, k, n - k, n - k);
l_tr.eco_switch(0, cc - k);
// The sign of determinant would change
// depending on whether the permutation is
// even or odd.
let diff = cc - k;
if (diff & 1)!= 0 {
// the gap in columns is odd.
// we should change the sign of determinant.
result = - result;
}
}
// The top right part of L matrix
let mut l_tr = a.view(k, k, n - k, n -k);
// Pick up the pivot
let pivot = l_tr.get(0, 0).unwrap();
if pivot.is_zero() {
// This is a singular matrix
return pivot;
}
// Update determinant
result = result * pivot;
// bring 1 in the diagonal
l_tr.eco_scale(0, o/pivot);
for c in 1..l_tr.num_cols(){
let first = l_tr.get(0, c).unwrap();
l_tr.eco_scale_add(c, 0, -first);
}
}
result
}
/******************************************************
*
* Unit tests
*
******************************************************/
#[cfg(test)]
mod test{
use super::*;
use srmatrix::api::*;
use matrix::mat_traits::*;
use testdata;
#[test]
fn test_det_0(){
let m = matrix_rw_f64(2,2, &[
1., 2.,
3., 4.]);
let d = det_naive(&m);
assert_eq!(d, -2.);
let d = det_ge(&mut m.clone());
assert_eq!(d, -2.);
}
#[test]
fn test_det_1(){
let m = matrix_rw_f64(3, 3, &[1., 2., 3.,
4., 5., 6.,
7., 8., 9.]);
let d = det_naive(&m);
assert_eq!(d, 0.);
let d = det_ge(&mut m.clone());
assert_eq!(d, 0.);
}
#[test]
fn test_det_hadamard(){
let m = hadamard(4).unwrap();
let d = det_naive(&m);
assert_eq!(d, 16.0);
let d = det_ge(&mut m.clone());
assert_eq!(d, 16.0);
let m = hadamard(8).unwrap();
let d = det_naive(&m);
assert_eq!(d, 4096.0);
let d = det_ge(&mut m.clone());
assert_eq!(d, 4096.0);
}
#[test]
fn test_det_hilbert(){
let sizes = vec![1, 2, 4, 8];
let determinants = vec![1.0, 0.083333333333333,
1.653439153439264e-07, 2.737050310006999e-33];
let threshold = 1e-10;
for (size, expected_value) in sizes.iter().zip(determinants.iter()){
let m = hilbert(*size);
let d = det_naive(&m);
assert!((d - *expected_value).abs() < threshold);
let d = det_ge(&mut m.clone());
assert!((d - *expected_value).abs() < threshold);
}
}
#[test]
fn test_empty_mat_det(){
let m : MatrixI64 = Matrix::new_uninitialized(0, 0);
let d = m.det().unwrap();
assert_eq!(d, 1);
}
#[test]
fn test_examples_from_testdata(){
assert_eq!(testdata::matrix::square_0().det().unwrap(), -13.);
assert_eq!(testdata::matrix::square_1().det().unwrap(), 6.);
}
}
/******************************************************
*
* Benchmarks
*
******************************************************/
#[cfg(test)]
mod bench{
// extern crate test;
// use self::test::Bencher;
// use super::*;
// use matrix::constructors::*;
// #[bench]
// fn bench_det_naive_hadamard_8 (b: &mut Bencher){
// let a = hadamard(8).unwrap();
// b.iter(|| {
// det_naive(&a);
// });
// }
// #[bench]
// fn bench_det_ge_hadamard_8 (b: &mut Bencher){
// let a = hadamard(8).unwrap();
// b.iter(|| {
// det_ge(&mut a.clone());
// });
// }
// #[bench]
// #[ignore]
// fn bench_det_naive_hadamard_16 (b: &mut Bencher){
// let a = hadamard(16).unwrap();
// b.iter(|| {
// det_naive(&a);
// });
// }
}
|
det_ge
|
identifier_name
|
det.rs
|
// Std imports
use num::traits::{Float, Signed, One};
// local imports
use sralgebra::{CommutativeRingPartial};
use srmatrix::api::{Matrix};
use srmatrix::api::{Shape, Extraction, MatrixBuffer, Search};
use srmatrix::api::eo_traits::ECO;
use srmatrix::api::SRError;
#[doc="Returns the determinant of a matrix.
# Remarks
This is a naive implementation of determinant based on the
definition of determinant. See det_float for better implementation
for floating point matrices.
Usually determinants based on
elimination or factorization are much faster.
The determinant is defined only for square matrices.
The determinant of an empty matrix is 1.
See http://en.wikipedia.org/wiki/Matrix_(mathematics)#Empty_matrices.
"]
pub fn det<T:CommutativeRingPartial+Signed>(m : &Matrix<T>)->Result<T,SRError>{
if!m.is_square(){
return Err(SRError::IsNotSquareMatrix);
}
if m.is_empty(){
return Ok(One::one());
}
Ok(det_naive(m))
}
#[doc="Returns the determinant of a matrix of floating point numbers.
# Remarks
"]
pub fn det_float<T:CommutativeRingPartial+Float+Signed>(m : &Matrix<T>)->Result<T,SRError>{
if!m.is_square(){
return Err(SRError::IsNotSquareMatrix);
}
if m.is_empty(){
return Ok(One::one());
}
Ok(det_ge(&mut m.clone()))
}
/// Private implementation of determinant
/// Assumes that matrix is indeed square.
pub fn det_naive<T:CommutativeRingPartial>(m : &Matrix<T>)->T{
if m.is_empty()
|
let a0 = m.get(0, 0).unwrap();
//debug!("m: {}", m);
if m.is_scalar(){
return a0;
}
let n = m.num_cols();
let mut m2 = m.sub_matrix(1,1, n-1, n-1);
let ps = m.as_ptr();
let pd = m2.as_mut_ptr();
let mut sign : T = One::one();
let mut result = sign * a0 * det_naive(&m2);
sign = -sign;
for c in 0..(n-1){
for r in 1..n{
//debug!("r : {}, c : {}", r, c);
let src_offset = m.cell_to_offset(r, c);
let dst_offset = m2.cell_to_offset(r - 1, c);
//debug_assert!(src_offset < m.capacity() as int);
//debug_assert!(dst_offset < m2.capacity() as int);
unsafe {
let v = *ps.offset(src_offset);
//debug!("v = {}", v);
*pd.offset(dst_offset) = v;
}
}
let ai = m.get(0, c+1).unwrap();
let ai_minor_det = det_naive(&m2);
//debug!("sign: {}, ai: {}, Ai : {}", sign, ai, ai_minor_det);
result = result + sign * ai * ai_minor_det;
sign = -sign;
}
result
}
#[doc="Computes determinant using Gaussian
elimination with partial pivoting
"]
pub fn det_ge<T:CommutativeRingPartial+Signed+Float+PartialOrd>(a : &mut Matrix<T>)->T{
assert!(a.is_square());
let o = One::one();
let mut result : T = o;
let n = a.num_cols();
// Iterate over rows
for k in 0..n{
// We are working on k-th row.
// Find the pivot position in the row
let (_, cc) = a.max_abs_scalar_in_row(k, k, n);
if cc > k {
// We need to exchange columns of the submatrix.
let mut l_tr = a.view(k, k, n - k, n - k);
l_tr.eco_switch(0, cc - k);
// The sign of determinant would change
// depending on whether the permutation is
// even or odd.
let diff = cc - k;
if (diff & 1)!= 0 {
// the gap in columns is odd.
// we should change the sign of determinant.
result = - result;
}
}
// The top right part of L matrix
let mut l_tr = a.view(k, k, n - k, n -k);
// Pick up the pivot
let pivot = l_tr.get(0, 0).unwrap();
if pivot.is_zero() {
// This is a singular matrix
return pivot;
}
// Update determinant
result = result * pivot;
// bring 1 in the diagonal
l_tr.eco_scale(0, o/pivot);
for c in 1..l_tr.num_cols(){
let first = l_tr.get(0, c).unwrap();
l_tr.eco_scale_add(c, 0, -first);
}
}
result
}
/******************************************************
*
* Unit tests
*
******************************************************/
#[cfg(test)]
mod test{
use super::*;
use srmatrix::api::*;
use matrix::mat_traits::*;
use testdata;
#[test]
fn test_det_0(){
let m = matrix_rw_f64(2,2, &[
1., 2.,
3., 4.]);
let d = det_naive(&m);
assert_eq!(d, -2.);
let d = det_ge(&mut m.clone());
assert_eq!(d, -2.);
}
#[test]
fn test_det_1(){
let m = matrix_rw_f64(3, 3, &[1., 2., 3.,
4., 5., 6.,
7., 8., 9.]);
let d = det_naive(&m);
assert_eq!(d, 0.);
let d = det_ge(&mut m.clone());
assert_eq!(d, 0.);
}
#[test]
fn test_det_hadamard(){
let m = hadamard(4).unwrap();
let d = det_naive(&m);
assert_eq!(d, 16.0);
let d = det_ge(&mut m.clone());
assert_eq!(d, 16.0);
let m = hadamard(8).unwrap();
let d = det_naive(&m);
assert_eq!(d, 4096.0);
let d = det_ge(&mut m.clone());
assert_eq!(d, 4096.0);
}
#[test]
fn test_det_hilbert(){
let sizes = vec![1, 2, 4, 8];
let determinants = vec![1.0, 0.083333333333333,
1.653439153439264e-07, 2.737050310006999e-33];
let threshold = 1e-10;
for (size, expected_value) in sizes.iter().zip(determinants.iter()){
let m = hilbert(*size);
let d = det_naive(&m);
assert!((d - *expected_value).abs() < threshold);
let d = det_ge(&mut m.clone());
assert!((d - *expected_value).abs() < threshold);
}
}
#[test]
fn test_empty_mat_det(){
let m : MatrixI64 = Matrix::new_uninitialized(0, 0);
let d = m.det().unwrap();
assert_eq!(d, 1);
}
#[test]
fn test_examples_from_testdata(){
assert_eq!(testdata::matrix::square_0().det().unwrap(), -13.);
assert_eq!(testdata::matrix::square_1().det().unwrap(), 6.);
}
}
/******************************************************
*
* Benchmarks
*
******************************************************/
#[cfg(test)]
mod bench{
// extern crate test;
// use self::test::Bencher;
// use super::*;
// use matrix::constructors::*;
// #[bench]
// fn bench_det_naive_hadamard_8 (b: &mut Bencher){
// let a = hadamard(8).unwrap();
// b.iter(|| {
// det_naive(&a);
// });
// }
// #[bench]
// fn bench_det_ge_hadamard_8 (b: &mut Bencher){
// let a = hadamard(8).unwrap();
// b.iter(|| {
// det_ge(&mut a.clone());
// });
// }
// #[bench]
// #[ignore]
// fn bench_det_naive_hadamard_16 (b: &mut Bencher){
// let a = hadamard(16).unwrap();
// b.iter(|| {
// det_naive(&a);
// });
// }
}
|
{
return One::one();
}
|
conditional_block
|
det.rs
|
// Std imports
use num::traits::{Float, Signed, One};
// local imports
use sralgebra::{CommutativeRingPartial};
use srmatrix::api::{Matrix};
use srmatrix::api::{Shape, Extraction, MatrixBuffer, Search};
use srmatrix::api::eo_traits::ECO;
use srmatrix::api::SRError;
#[doc="Returns the determinant of a matrix.
# Remarks
This is a naive implementation of determinant based on the
definition of determinant. See det_float for better implementation
for floating point matrices.
Usually determinants based on
elimination or factorization are much faster.
The determinant is defined only for square matrices.
The determinant of an empty matrix is 1.
See http://en.wikipedia.org/wiki/Matrix_(mathematics)#Empty_matrices.
"]
pub fn det<T:CommutativeRingPartial+Signed>(m : &Matrix<T>)->Result<T,SRError>{
if!m.is_square(){
return Err(SRError::IsNotSquareMatrix);
}
if m.is_empty(){
return Ok(One::one());
}
Ok(det_naive(m))
}
#[doc="Returns the determinant of a matrix of floating point numbers.
# Remarks
"]
pub fn det_float<T:CommutativeRingPartial+Float+Signed>(m : &Matrix<T>)->Result<T,SRError>{
if!m.is_square(){
return Err(SRError::IsNotSquareMatrix);
}
if m.is_empty(){
return Ok(One::one());
}
Ok(det_ge(&mut m.clone()))
}
/// Private implementation of determinant
/// Assumes that matrix is indeed square.
pub fn det_naive<T:CommutativeRingPartial>(m : &Matrix<T>)->T{
if m.is_empty(){
return One::one();
}
let a0 = m.get(0, 0).unwrap();
//debug!("m: {}", m);
if m.is_scalar(){
return a0;
}
let n = m.num_cols();
let mut m2 = m.sub_matrix(1,1, n-1, n-1);
let ps = m.as_ptr();
let pd = m2.as_mut_ptr();
let mut sign : T = One::one();
let mut result = sign * a0 * det_naive(&m2);
sign = -sign;
for c in 0..(n-1){
for r in 1..n{
//debug!("r : {}, c : {}", r, c);
let src_offset = m.cell_to_offset(r, c);
let dst_offset = m2.cell_to_offset(r - 1, c);
//debug_assert!(src_offset < m.capacity() as int);
//debug_assert!(dst_offset < m2.capacity() as int);
unsafe {
let v = *ps.offset(src_offset);
//debug!("v = {}", v);
*pd.offset(dst_offset) = v;
}
}
let ai = m.get(0, c+1).unwrap();
let ai_minor_det = det_naive(&m2);
//debug!("sign: {}, ai: {}, Ai : {}", sign, ai, ai_minor_det);
result = result + sign * ai * ai_minor_det;
sign = -sign;
}
result
}
#[doc="Computes determinant using Gaussian
elimination with partial pivoting
"]
pub fn det_ge<T:CommutativeRingPartial+Signed+Float+PartialOrd>(a : &mut Matrix<T>)->T{
assert!(a.is_square());
let o = One::one();
let mut result : T = o;
let n = a.num_cols();
// Iterate over rows
for k in 0..n{
// We are working on k-th row.
// Find the pivot position in the row
let (_, cc) = a.max_abs_scalar_in_row(k, k, n);
if cc > k {
// We need to exchange columns of the submatrix.
let mut l_tr = a.view(k, k, n - k, n - k);
l_tr.eco_switch(0, cc - k);
// The sign of determinant would change
// depending on whether the permutation is
// even or odd.
let diff = cc - k;
if (diff & 1)!= 0 {
// the gap in columns is odd.
// we should change the sign of determinant.
result = - result;
}
}
// The top right part of L matrix
let mut l_tr = a.view(k, k, n - k, n -k);
// Pick up the pivot
let pivot = l_tr.get(0, 0).unwrap();
if pivot.is_zero() {
// This is a singular matrix
return pivot;
}
// Update determinant
result = result * pivot;
// bring 1 in the diagonal
l_tr.eco_scale(0, o/pivot);
for c in 1..l_tr.num_cols(){
let first = l_tr.get(0, c).unwrap();
l_tr.eco_scale_add(c, 0, -first);
}
}
result
}
|
/******************************************************
*
* Unit tests
*
******************************************************/
#[cfg(test)]
mod test{
use super::*;
use srmatrix::api::*;
use matrix::mat_traits::*;
use testdata;
#[test]
fn test_det_0(){
let m = matrix_rw_f64(2,2, &[
1., 2.,
3., 4.]);
let d = det_naive(&m);
assert_eq!(d, -2.);
let d = det_ge(&mut m.clone());
assert_eq!(d, -2.);
}
#[test]
fn test_det_1(){
let m = matrix_rw_f64(3, 3, &[1., 2., 3.,
4., 5., 6.,
7., 8., 9.]);
let d = det_naive(&m);
assert_eq!(d, 0.);
let d = det_ge(&mut m.clone());
assert_eq!(d, 0.);
}
#[test]
fn test_det_hadamard(){
let m = hadamard(4).unwrap();
let d = det_naive(&m);
assert_eq!(d, 16.0);
let d = det_ge(&mut m.clone());
assert_eq!(d, 16.0);
let m = hadamard(8).unwrap();
let d = det_naive(&m);
assert_eq!(d, 4096.0);
let d = det_ge(&mut m.clone());
assert_eq!(d, 4096.0);
}
#[test]
fn test_det_hilbert(){
let sizes = vec![1, 2, 4, 8];
let determinants = vec![1.0, 0.083333333333333,
1.653439153439264e-07, 2.737050310006999e-33];
let threshold = 1e-10;
for (size, expected_value) in sizes.iter().zip(determinants.iter()){
let m = hilbert(*size);
let d = det_naive(&m);
assert!((d - *expected_value).abs() < threshold);
let d = det_ge(&mut m.clone());
assert!((d - *expected_value).abs() < threshold);
}
}
#[test]
fn test_empty_mat_det(){
let m : MatrixI64 = Matrix::new_uninitialized(0, 0);
let d = m.det().unwrap();
assert_eq!(d, 1);
}
#[test]
fn test_examples_from_testdata(){
assert_eq!(testdata::matrix::square_0().det().unwrap(), -13.);
assert_eq!(testdata::matrix::square_1().det().unwrap(), 6.);
}
}
/******************************************************
*
* Benchmarks
*
******************************************************/
#[cfg(test)]
mod bench{
// extern crate test;
// use self::test::Bencher;
// use super::*;
// use matrix::constructors::*;
// #[bench]
// fn bench_det_naive_hadamard_8 (b: &mut Bencher){
// let a = hadamard(8).unwrap();
// b.iter(|| {
// det_naive(&a);
// });
// }
// #[bench]
// fn bench_det_ge_hadamard_8 (b: &mut Bencher){
// let a = hadamard(8).unwrap();
// b.iter(|| {
// det_ge(&mut a.clone());
// });
// }
// #[bench]
// #[ignore]
// fn bench_det_naive_hadamard_16 (b: &mut Bencher){
// let a = hadamard(16).unwrap();
// b.iter(|| {
// det_naive(&a);
// });
// }
}
|
random_line_split
|
|
thread.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use alloc::boxed::FnBox;
use io;
use libc::{self, c_void, DWORD};
use mem;
use ptr;
use sys::c;
use sys::handle::Handle;
use sys_common::thread::*;
use time::Duration;
pub struct Thread {
handle: Handle
}
impl Thread {
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
-> io::Result<Thread> {
let p = box p;
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// Round up to the next 64 kB because that's what the NT kernel does,
// might as well make it explicit.
let stack_size = (stack + 0xfffe) & (!0xfffe);
let ret = c::CreateThread(ptr::null_mut(), stack_size as libc::size_t,
thread_start, &*p as *const _ as *mut _,
0, ptr::null_mut());
return if ret as usize == 0 {
Err(io::Error::last_os_error())
} else {
mem::forget(p); // ownership passed to CreateThread
Ok(Thread { handle: Handle::new(ret) })
|
unsafe { start_thread(main); }
0
}
}
pub fn set_name(_name: &str) {
// Windows threads are nameless
// The names in MSVC debugger are obtained using a "magic" exception,
// which requires a use of MS C++ extensions.
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
}
pub fn join(self) {
use libc::consts::os::extra::INFINITE;
unsafe { c::WaitForSingleObject(self.handle.raw(), INFINITE); }
}
pub fn yield_now() {
// This function will return 0 if there are no other threads to execute,
// but this also means that the yield was useless so this isn't really a
// case that needs to be worried about.
unsafe { c::SwitchToThread(); }
}
pub fn sleep(dur: Duration) {
unsafe {
c::Sleep(super::dur2timeout(dur))
}
}
}
pub mod guard {
pub unsafe fn current() -> Option<usize> { None }
pub unsafe fn init() -> Option<usize> { None }
}
|
};
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
|
random_line_split
|
thread.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use alloc::boxed::FnBox;
use io;
use libc::{self, c_void, DWORD};
use mem;
use ptr;
use sys::c;
use sys::handle::Handle;
use sys_common::thread::*;
use time::Duration;
pub struct Thread {
handle: Handle
}
impl Thread {
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
-> io::Result<Thread> {
let p = box p;
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// Round up to the next 64 kB because that's what the NT kernel does,
// might as well make it explicit.
let stack_size = (stack + 0xfffe) & (!0xfffe);
let ret = c::CreateThread(ptr::null_mut(), stack_size as libc::size_t,
thread_start, &*p as *const _ as *mut _,
0, ptr::null_mut());
return if ret as usize == 0 {
Err(io::Error::last_os_error())
} else
|
;
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
unsafe { start_thread(main); }
0
}
}
pub fn set_name(_name: &str) {
// Windows threads are nameless
// The names in MSVC debugger are obtained using a "magic" exception,
// which requires a use of MS C++ extensions.
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
}
pub fn join(self) {
use libc::consts::os::extra::INFINITE;
unsafe { c::WaitForSingleObject(self.handle.raw(), INFINITE); }
}
pub fn yield_now() {
// This function will return 0 if there are no other threads to execute,
// but this also means that the yield was useless so this isn't really a
// case that needs to be worried about.
unsafe { c::SwitchToThread(); }
}
pub fn sleep(dur: Duration) {
unsafe {
c::Sleep(super::dur2timeout(dur))
}
}
}
pub mod guard {
pub unsafe fn current() -> Option<usize> { None }
pub unsafe fn init() -> Option<usize> { None }
}
|
{
mem::forget(p); // ownership passed to CreateThread
Ok(Thread { handle: Handle::new(ret) })
}
|
conditional_block
|
thread.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use alloc::boxed::FnBox;
use io;
use libc::{self, c_void, DWORD};
use mem;
use ptr;
use sys::c;
use sys::handle::Handle;
use sys_common::thread::*;
use time::Duration;
pub struct Thread {
handle: Handle
}
impl Thread {
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
-> io::Result<Thread> {
let p = box p;
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// Round up to the next 64 kB because that's what the NT kernel does,
// might as well make it explicit.
let stack_size = (stack + 0xfffe) & (!0xfffe);
let ret = c::CreateThread(ptr::null_mut(), stack_size as libc::size_t,
thread_start, &*p as *const _ as *mut _,
0, ptr::null_mut());
return if ret as usize == 0 {
Err(io::Error::last_os_error())
} else {
mem::forget(p); // ownership passed to CreateThread
Ok(Thread { handle: Handle::new(ret) })
};
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
unsafe { start_thread(main); }
0
}
}
pub fn set_name(_name: &str) {
// Windows threads are nameless
// The names in MSVC debugger are obtained using a "magic" exception,
// which requires a use of MS C++ extensions.
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
}
pub fn
|
(self) {
use libc::consts::os::extra::INFINITE;
unsafe { c::WaitForSingleObject(self.handle.raw(), INFINITE); }
}
pub fn yield_now() {
// This function will return 0 if there are no other threads to execute,
// but this also means that the yield was useless so this isn't really a
// case that needs to be worried about.
unsafe { c::SwitchToThread(); }
}
pub fn sleep(dur: Duration) {
unsafe {
c::Sleep(super::dur2timeout(dur))
}
}
}
pub mod guard {
pub unsafe fn current() -> Option<usize> { None }
pub unsafe fn init() -> Option<usize> { None }
}
|
join
|
identifier_name
|
thread.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use alloc::boxed::FnBox;
use io;
use libc::{self, c_void, DWORD};
use mem;
use ptr;
use sys::c;
use sys::handle::Handle;
use sys_common::thread::*;
use time::Duration;
pub struct Thread {
handle: Handle
}
impl Thread {
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
-> io::Result<Thread> {
let p = box p;
// FIXME On UNIX, we guard against stack sizes that are too small but
// that's because pthreads enforces that stacks are at least
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
// just that below a certain threshold you can't do anything useful.
// That threshold is application and architecture-specific, however.
// Round up to the next 64 kB because that's what the NT kernel does,
// might as well make it explicit.
let stack_size = (stack + 0xfffe) & (!0xfffe);
let ret = c::CreateThread(ptr::null_mut(), stack_size as libc::size_t,
thread_start, &*p as *const _ as *mut _,
0, ptr::null_mut());
return if ret as usize == 0 {
Err(io::Error::last_os_error())
} else {
mem::forget(p); // ownership passed to CreateThread
Ok(Thread { handle: Handle::new(ret) })
};
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
unsafe { start_thread(main); }
0
}
}
pub fn set_name(_name: &str) {
// Windows threads are nameless
// The names in MSVC debugger are obtained using a "magic" exception,
// which requires a use of MS C++ extensions.
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
}
pub fn join(self) {
use libc::consts::os::extra::INFINITE;
unsafe { c::WaitForSingleObject(self.handle.raw(), INFINITE); }
}
pub fn yield_now() {
// This function will return 0 if there are no other threads to execute,
// but this also means that the yield was useless so this isn't really a
// case that needs to be worried about.
unsafe { c::SwitchToThread(); }
}
pub fn sleep(dur: Duration) {
unsafe {
c::Sleep(super::dur2timeout(dur))
}
}
}
pub mod guard {
pub unsafe fn current() -> Option<usize> { None }
pub unsafe fn init() -> Option<usize>
|
}
|
{ None }
|
identifier_body
|
lint-unused-unsafe.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Exercise the unused_unsafe attribute in some positive and negative cases
#[allow(dead_code)];
#[deny(unused_unsafe)];
mod foo {
extern {
pub fn bar();
}
}
fn callback<T>(_f: || -> T) -> T { fail!() }
unsafe fn unsf() {}
fn bad1() { unsafe {} } //~ ERROR: unnecessary `unsafe` block
fn bad2() { unsafe { bad1() } } //~ ERROR: unnecessary `unsafe` block
unsafe fn bad3() { unsafe {} } //~ ERROR: unnecessary `unsafe` block
fn bad4() { unsafe { callback(||{}) } } //~ ERROR: unnecessary `unsafe` block
unsafe fn bad5() { unsafe { unsf() } } //~ ERROR: unnecessary `unsafe` block
fn bad6() {
unsafe { // don't put the warning here
unsafe { //~ ERROR: unnecessary `unsafe` block
|
unsafe { //~ ERROR: unnecessary `unsafe` block
unsafe { //~ ERROR: unnecessary `unsafe` block
unsf()
}
}
}
unsafe fn good0() { unsf() }
fn good1() { unsafe { unsf() } }
fn good2() {
/* bug uncovered when implementing warning about unused unsafe blocks. Be
sure that when purity is inherited that the source of the unsafe-ness
is tracked correctly */
unsafe {
unsafe fn what() -> ~[~str] { fail!() }
callback(|| {
what();
});
}
}
unsafe fn good3() { foo::bar() }
fn good4() { unsafe { foo::bar() } }
#[allow(unused_unsafe)] fn allowed() { unsafe {} }
fn main() {}
|
unsf()
}
}
}
unsafe fn bad7() {
|
random_line_split
|
lint-unused-unsafe.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Exercise the unused_unsafe attribute in some positive and negative cases
#[allow(dead_code)];
#[deny(unused_unsafe)];
mod foo {
extern {
pub fn bar();
}
}
fn callback<T>(_f: || -> T) -> T
|
unsafe fn unsf() {}
fn bad1() { unsafe {} } //~ ERROR: unnecessary `unsafe` block
fn bad2() { unsafe { bad1() } } //~ ERROR: unnecessary `unsafe` block
unsafe fn bad3() { unsafe {} } //~ ERROR: unnecessary `unsafe` block
fn bad4() { unsafe { callback(||{}) } } //~ ERROR: unnecessary `unsafe` block
unsafe fn bad5() { unsafe { unsf() } } //~ ERROR: unnecessary `unsafe` block
fn bad6() {
unsafe { // don't put the warning here
unsafe { //~ ERROR: unnecessary `unsafe` block
unsf()
}
}
}
unsafe fn bad7() {
unsafe { //~ ERROR: unnecessary `unsafe` block
unsafe { //~ ERROR: unnecessary `unsafe` block
unsf()
}
}
}
unsafe fn good0() { unsf() }
fn good1() { unsafe { unsf() } }
fn good2() {
/* bug uncovered when implementing warning about unused unsafe blocks. Be
sure that when purity is inherited that the source of the unsafe-ness
is tracked correctly */
unsafe {
unsafe fn what() -> ~[~str] { fail!() }
callback(|| {
what();
});
}
}
unsafe fn good3() { foo::bar() }
fn good4() { unsafe { foo::bar() } }
#[allow(unused_unsafe)] fn allowed() { unsafe {} }
fn main() {}
|
{ fail!() }
|
identifier_body
|
lint-unused-unsafe.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Exercise the unused_unsafe attribute in some positive and negative cases
#[allow(dead_code)];
#[deny(unused_unsafe)];
mod foo {
extern {
pub fn bar();
}
}
fn callback<T>(_f: || -> T) -> T { fail!() }
unsafe fn unsf() {}
fn bad1() { unsafe {} } //~ ERROR: unnecessary `unsafe` block
fn bad2() { unsafe { bad1() } } //~ ERROR: unnecessary `unsafe` block
unsafe fn bad3() { unsafe {} } //~ ERROR: unnecessary `unsafe` block
fn bad4() { unsafe { callback(||{}) } } //~ ERROR: unnecessary `unsafe` block
unsafe fn bad5() { unsafe { unsf() } } //~ ERROR: unnecessary `unsafe` block
fn bad6() {
unsafe { // don't put the warning here
unsafe { //~ ERROR: unnecessary `unsafe` block
unsf()
}
}
}
unsafe fn bad7() {
unsafe { //~ ERROR: unnecessary `unsafe` block
unsafe { //~ ERROR: unnecessary `unsafe` block
unsf()
}
}
}
unsafe fn good0() { unsf() }
fn
|
() { unsafe { unsf() } }
fn good2() {
/* bug uncovered when implementing warning about unused unsafe blocks. Be
sure that when purity is inherited that the source of the unsafe-ness
is tracked correctly */
unsafe {
unsafe fn what() -> ~[~str] { fail!() }
callback(|| {
what();
});
}
}
unsafe fn good3() { foo::bar() }
fn good4() { unsafe { foo::bar() } }
#[allow(unused_unsafe)] fn allowed() { unsafe {} }
fn main() {}
|
good1
|
identifier_name
|
dbus.rs
|
use chan::{Sender, Receiver};
use dbus::{self, BusType, Connection, Message, MessageItem, NameFlag, Signature};
use dbus::arg::{Arg, ArgType, Get, Iter};
use dbus::tree::{Argument, Factory};
use std::thread;
use std::convert::From;
use std::str::FromStr;
use uuid::Uuid;
use datatype::{Command, DBusConfig, Event, InstalledFirmware, InstalledPackage,
InstallResult, InstalledSoftware, InstallReport};
use gateway::Gateway;
use interpreter::CommandExec;
/// The `DBus` gateway is used with the RVI module for communicating with the
/// system session bus.
#[derive(Clone)]
pub struct DBus {
pub cfg: DBusConfig
}
impl Gateway for DBus {
fn start(&mut self, ctx: Sender<CommandExec>, erx: Receiver<Event>) {
info!("Starting DBus gateway.");
let cfg = self.cfg.clone();
let conn = Connection::get_private(BusType::Session).expect("couldn't get dbus session bus");
conn.register_name(&cfg.name, NameFlag::ReplaceExisting as u32).expect("couldn't register name");
let arg0 = Argument::new(Some("update_id".into()), Signature::new("s").expect("arg1 signature"));
let arg1 = arg0.clone();
let arg2 = Argument::new(Some("operations_results".into()), Signature::new("aa{sv}").expect("arg2 signature"));
let ctx1 = ctx.clone();
let ctx2 = ctx.clone();
let fact = Factory::new_fn::<()>();
let tree = fact.tree(()).add(
fact.object_path(cfg.path, ()).introspectable().add(
fact.interface(cfg.interface, ())
.add_m(fact.method("initiateDownload", (), move |info| {
debug!("dbus initiateDownload called: {:?}", info);
let uuid = Uuid::from_str(info.msg.read1()?)
.map_err(|err| dbus::Error::new_custom("read1", &format!("{}", err)))?;
ctx1.send(CommandExec { cmd: Command::StartDownload(uuid), etx: None });
Ok(Vec::new())
}).in_arg(arg0))
.add_m(fact.method("updateReport", (), move |info| {
debug!("dbus updateReport called: {:?}", info);
let (id, res): (String, Vec<InstallResult>) = info.msg.read2()?;
let report = InstallReport::new(id, res);
ctx2.send(CommandExec { cmd: Command::SendInstallReport(report), etx: None });
Ok(Vec::new())
}).in_arg(arg1).in_arg(arg2))));
let session_cfg = self.cfg.clone();
let session_ctx = ctx.clone();
thread::spawn(move || {
let session = Session::new(session_ctx, session_cfg);
loop {
session.handle_event(erx.recv().expect("dbus etx closed"))
}
});
tree.set_registered(&conn, true).expect("couldn't set registered");
for _ in tree.run(&conn, conn.iter(1000)) {}
}
}
struct Session {
conn: Connection,
ctx: Sender<CommandExec>,
dest: String,
path: String,
iface: String,
timeout: i32,
}
impl Session {
fn new(ctx: Sender<CommandExec>, cfg: DBusConfig) -> Self {
Session {
conn: Connection::get_private(BusType::Session).expect("couldn't get session bus"),
ctx: ctx,
dest: cfg.software_manager.clone(),
path: cfg.software_manager_path.clone(),
iface: cfg.software_manager.clone(),
timeout: cfg.timeout,
}
}
fn send_async(&self, msg: Message) {
let _ = self.conn.send(msg).map_err(|err| error!("couldn't send dbus message: {:?}", err));
}
fn send_sync(&self, msg: Message) -> Result<Message, dbus::Error> {
self.conn.send_with_reply_and_block(msg, self.timeout)
}
fn send_internal(&self, cmd: Command) {
self.ctx.send(CommandExec { cmd: cmd, etx: None });
}
fn
|
(&self, method: &str, args: &[MessageItem]) -> Message {
let mut msg = Message::new_method_call(&self.dest, &self.path, &self.iface, method).expect("new dbus message");
msg.append_items(args);
msg
}
fn handle_event(&self, event: Event) {
match event {
Event::UpdateAvailable(avail) => {
let msg = self.new_message("updateAvailable", &[
MessageItem::from(avail.update_id),
MessageItem::from(avail.signature),
MessageItem::from(avail.description),
MessageItem::from(avail.request_confirmation)
]);
self.send_async(msg);
}
Event::DownloadComplete(comp) => {
let msg = self.new_message("downloadComplete", &[
MessageItem::from(comp.update_image),
MessageItem::from(comp.signature)
]);
self.send_async(msg);
}
Event::InstalledSoftwareNeeded => {
let msg = self.new_message("getInstalledPackages", &[
MessageItem::from(true), // include packages?
MessageItem::from(false) // include firmware?
]);
self.send_sync(msg)
.map(|reply| reply.read2()
.map(|(pkgs, firms): (Vec<InstalledPackage>, Vec<InstalledFirmware>)| {
let inst = InstalledSoftware::new(pkgs, firms);
self.send_internal(Command::SendInstalledSoftware(inst));
})
.unwrap_or_else(|err| error!("failed to send SendInstalledSoftware: {}", err))
)
.unwrap_or_else(|err| error!("failed to send InstalledSoftwareNeeded: {}", err));
}
_ => ()
}
}
}
// FIXME: parsing implementations
impl Arg for InstallResult {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl Arg for InstalledPackage {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl Arg for InstalledFirmware {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl<'a> Get<'a> for InstallResult {
fn get(_: &mut Iter<'a>) -> Option<Self> {
None
}
}
impl<'a> Get<'a> for InstalledPackage {
fn get(_: &mut Iter<'a>) -> Option<Self> {
None
}
}
impl<'a> Get<'a> for InstalledFirmware {
fn get(_: &mut Iter<'a>) -> Option<Self> {
None
}
}
|
new_message
|
identifier_name
|
dbus.rs
|
use chan::{Sender, Receiver};
use dbus::{self, BusType, Connection, Message, MessageItem, NameFlag, Signature};
use dbus::arg::{Arg, ArgType, Get, Iter};
use dbus::tree::{Argument, Factory};
use std::thread;
use std::convert::From;
use std::str::FromStr;
use uuid::Uuid;
use datatype::{Command, DBusConfig, Event, InstalledFirmware, InstalledPackage,
InstallResult, InstalledSoftware, InstallReport};
use gateway::Gateway;
use interpreter::CommandExec;
/// The `DBus` gateway is used with the RVI module for communicating with the
/// system session bus.
#[derive(Clone)]
pub struct DBus {
pub cfg: DBusConfig
}
impl Gateway for DBus {
fn start(&mut self, ctx: Sender<CommandExec>, erx: Receiver<Event>) {
info!("Starting DBus gateway.");
let cfg = self.cfg.clone();
let conn = Connection::get_private(BusType::Session).expect("couldn't get dbus session bus");
conn.register_name(&cfg.name, NameFlag::ReplaceExisting as u32).expect("couldn't register name");
let arg0 = Argument::new(Some("update_id".into()), Signature::new("s").expect("arg1 signature"));
let arg1 = arg0.clone();
let arg2 = Argument::new(Some("operations_results".into()), Signature::new("aa{sv}").expect("arg2 signature"));
let ctx1 = ctx.clone();
let ctx2 = ctx.clone();
let fact = Factory::new_fn::<()>();
let tree = fact.tree(()).add(
fact.object_path(cfg.path, ()).introspectable().add(
fact.interface(cfg.interface, ())
.add_m(fact.method("initiateDownload", (), move |info| {
debug!("dbus initiateDownload called: {:?}", info);
let uuid = Uuid::from_str(info.msg.read1()?)
.map_err(|err| dbus::Error::new_custom("read1", &format!("{}", err)))?;
ctx1.send(CommandExec { cmd: Command::StartDownload(uuid), etx: None });
Ok(Vec::new())
}).in_arg(arg0))
.add_m(fact.method("updateReport", (), move |info| {
debug!("dbus updateReport called: {:?}", info);
let (id, res): (String, Vec<InstallResult>) = info.msg.read2()?;
let report = InstallReport::new(id, res);
ctx2.send(CommandExec { cmd: Command::SendInstallReport(report), etx: None });
Ok(Vec::new())
}).in_arg(arg1).in_arg(arg2))));
let session_cfg = self.cfg.clone();
let session_ctx = ctx.clone();
thread::spawn(move || {
let session = Session::new(session_ctx, session_cfg);
loop {
session.handle_event(erx.recv().expect("dbus etx closed"))
}
});
tree.set_registered(&conn, true).expect("couldn't set registered");
for _ in tree.run(&conn, conn.iter(1000)) {}
}
}
struct Session {
conn: Connection,
ctx: Sender<CommandExec>,
dest: String,
path: String,
iface: String,
timeout: i32,
}
impl Session {
fn new(ctx: Sender<CommandExec>, cfg: DBusConfig) -> Self {
Session {
conn: Connection::get_private(BusType::Session).expect("couldn't get session bus"),
ctx: ctx,
dest: cfg.software_manager.clone(),
path: cfg.software_manager_path.clone(),
iface: cfg.software_manager.clone(),
timeout: cfg.timeout,
}
}
fn send_async(&self, msg: Message) {
let _ = self.conn.send(msg).map_err(|err| error!("couldn't send dbus message: {:?}", err));
}
fn send_sync(&self, msg: Message) -> Result<Message, dbus::Error> {
self.conn.send_with_reply_and_block(msg, self.timeout)
}
fn send_internal(&self, cmd: Command) {
self.ctx.send(CommandExec { cmd: cmd, etx: None });
}
fn new_message(&self, method: &str, args: &[MessageItem]) -> Message {
let mut msg = Message::new_method_call(&self.dest, &self.path, &self.iface, method).expect("new dbus message");
msg.append_items(args);
msg
}
fn handle_event(&self, event: Event) {
match event {
Event::UpdateAvailable(avail) => {
let msg = self.new_message("updateAvailable", &[
MessageItem::from(avail.update_id),
MessageItem::from(avail.signature),
MessageItem::from(avail.description),
MessageItem::from(avail.request_confirmation)
]);
self.send_async(msg);
}
|
MessageItem::from(comp.update_image),
MessageItem::from(comp.signature)
]);
self.send_async(msg);
}
Event::InstalledSoftwareNeeded => {
let msg = self.new_message("getInstalledPackages", &[
MessageItem::from(true), // include packages?
MessageItem::from(false) // include firmware?
]);
self.send_sync(msg)
.map(|reply| reply.read2()
.map(|(pkgs, firms): (Vec<InstalledPackage>, Vec<InstalledFirmware>)| {
let inst = InstalledSoftware::new(pkgs, firms);
self.send_internal(Command::SendInstalledSoftware(inst));
})
.unwrap_or_else(|err| error!("failed to send SendInstalledSoftware: {}", err))
)
.unwrap_or_else(|err| error!("failed to send InstalledSoftwareNeeded: {}", err));
}
_ => ()
}
}
}
// FIXME: parsing implementations
impl Arg for InstallResult {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl Arg for InstalledPackage {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl Arg for InstalledFirmware {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl<'a> Get<'a> for InstallResult {
fn get(_: &mut Iter<'a>) -> Option<Self> {
None
}
}
impl<'a> Get<'a> for InstalledPackage {
fn get(_: &mut Iter<'a>) -> Option<Self> {
None
}
}
impl<'a> Get<'a> for InstalledFirmware {
fn get(_: &mut Iter<'a>) -> Option<Self> {
None
}
}
|
Event::DownloadComplete(comp) => {
let msg = self.new_message("downloadComplete", &[
|
random_line_split
|
dbus.rs
|
use chan::{Sender, Receiver};
use dbus::{self, BusType, Connection, Message, MessageItem, NameFlag, Signature};
use dbus::arg::{Arg, ArgType, Get, Iter};
use dbus::tree::{Argument, Factory};
use std::thread;
use std::convert::From;
use std::str::FromStr;
use uuid::Uuid;
use datatype::{Command, DBusConfig, Event, InstalledFirmware, InstalledPackage,
InstallResult, InstalledSoftware, InstallReport};
use gateway::Gateway;
use interpreter::CommandExec;
/// The `DBus` gateway is used with the RVI module for communicating with the
/// system session bus.
#[derive(Clone)]
pub struct DBus {
pub cfg: DBusConfig
}
impl Gateway for DBus {
fn start(&mut self, ctx: Sender<CommandExec>, erx: Receiver<Event>) {
info!("Starting DBus gateway.");
let cfg = self.cfg.clone();
let conn = Connection::get_private(BusType::Session).expect("couldn't get dbus session bus");
conn.register_name(&cfg.name, NameFlag::ReplaceExisting as u32).expect("couldn't register name");
let arg0 = Argument::new(Some("update_id".into()), Signature::new("s").expect("arg1 signature"));
let arg1 = arg0.clone();
let arg2 = Argument::new(Some("operations_results".into()), Signature::new("aa{sv}").expect("arg2 signature"));
let ctx1 = ctx.clone();
let ctx2 = ctx.clone();
let fact = Factory::new_fn::<()>();
let tree = fact.tree(()).add(
fact.object_path(cfg.path, ()).introspectable().add(
fact.interface(cfg.interface, ())
.add_m(fact.method("initiateDownload", (), move |info| {
debug!("dbus initiateDownload called: {:?}", info);
let uuid = Uuid::from_str(info.msg.read1()?)
.map_err(|err| dbus::Error::new_custom("read1", &format!("{}", err)))?;
ctx1.send(CommandExec { cmd: Command::StartDownload(uuid), etx: None });
Ok(Vec::new())
}).in_arg(arg0))
.add_m(fact.method("updateReport", (), move |info| {
debug!("dbus updateReport called: {:?}", info);
let (id, res): (String, Vec<InstallResult>) = info.msg.read2()?;
let report = InstallReport::new(id, res);
ctx2.send(CommandExec { cmd: Command::SendInstallReport(report), etx: None });
Ok(Vec::new())
}).in_arg(arg1).in_arg(arg2))));
let session_cfg = self.cfg.clone();
let session_ctx = ctx.clone();
thread::spawn(move || {
let session = Session::new(session_ctx, session_cfg);
loop {
session.handle_event(erx.recv().expect("dbus etx closed"))
}
});
tree.set_registered(&conn, true).expect("couldn't set registered");
for _ in tree.run(&conn, conn.iter(1000)) {}
}
}
struct Session {
conn: Connection,
ctx: Sender<CommandExec>,
dest: String,
path: String,
iface: String,
timeout: i32,
}
impl Session {
fn new(ctx: Sender<CommandExec>, cfg: DBusConfig) -> Self {
Session {
conn: Connection::get_private(BusType::Session).expect("couldn't get session bus"),
ctx: ctx,
dest: cfg.software_manager.clone(),
path: cfg.software_manager_path.clone(),
iface: cfg.software_manager.clone(),
timeout: cfg.timeout,
}
}
fn send_async(&self, msg: Message) {
let _ = self.conn.send(msg).map_err(|err| error!("couldn't send dbus message: {:?}", err));
}
fn send_sync(&self, msg: Message) -> Result<Message, dbus::Error> {
self.conn.send_with_reply_and_block(msg, self.timeout)
}
fn send_internal(&self, cmd: Command) {
self.ctx.send(CommandExec { cmd: cmd, etx: None });
}
fn new_message(&self, method: &str, args: &[MessageItem]) -> Message {
let mut msg = Message::new_method_call(&self.dest, &self.path, &self.iface, method).expect("new dbus message");
msg.append_items(args);
msg
}
fn handle_event(&self, event: Event) {
match event {
Event::UpdateAvailable(avail) => {
let msg = self.new_message("updateAvailable", &[
MessageItem::from(avail.update_id),
MessageItem::from(avail.signature),
MessageItem::from(avail.description),
MessageItem::from(avail.request_confirmation)
]);
self.send_async(msg);
}
Event::DownloadComplete(comp) => {
let msg = self.new_message("downloadComplete", &[
MessageItem::from(comp.update_image),
MessageItem::from(comp.signature)
]);
self.send_async(msg);
}
Event::InstalledSoftwareNeeded => {
let msg = self.new_message("getInstalledPackages", &[
MessageItem::from(true), // include packages?
MessageItem::from(false) // include firmware?
]);
self.send_sync(msg)
.map(|reply| reply.read2()
.map(|(pkgs, firms): (Vec<InstalledPackage>, Vec<InstalledFirmware>)| {
let inst = InstalledSoftware::new(pkgs, firms);
self.send_internal(Command::SendInstalledSoftware(inst));
})
.unwrap_or_else(|err| error!("failed to send SendInstalledSoftware: {}", err))
)
.unwrap_or_else(|err| error!("failed to send InstalledSoftwareNeeded: {}", err));
}
_ => ()
}
}
}
// FIXME: parsing implementations
impl Arg for InstallResult {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl Arg for InstalledPackage {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl Arg for InstalledFirmware {
fn arg_type() -> ArgType { ArgType::Variant }
fn signature() -> Signature<'static> { unsafe { Signature::from_slice_unchecked(b"v\0") } }
}
impl<'a> Get<'a> for InstallResult {
fn get(_: &mut Iter<'a>) -> Option<Self> {
None
}
}
impl<'a> Get<'a> for InstalledPackage {
fn get(_: &mut Iter<'a>) -> Option<Self> {
None
}
}
impl<'a> Get<'a> for InstalledFirmware {
fn get(_: &mut Iter<'a>) -> Option<Self>
|
}
|
{
None
}
|
identifier_body
|
api.rs
|
use std::fmt;
use std::ops::Deref;
use std::marker::PhantomData;
use std::error::Error as StdError;
use std::result::Result as StdResult;
use serde::de;
use serde_json::{self, Error as JsonError};
use hyper::client::Client as HttpClient;
use hyper::Error as HttpError;
use url::{self, ParseError as UrlError, Url};
use oauth2::token::Token;
use auth::{AccessToken, OAuth, Permissions};
pub const VK_DOMAIN: &'static str = "api.vk.com";
pub const VK_PATH: &'static str = "method";
#[cfg(feature = "unstable")]
include!("api.rs.in");
#[cfg(not(feature = "unstable"))]
include!(concat!(env!("OUT_DIR"), "/api.rs"));
pub type OwnerId = i64;
pub type Id = u64;
pub type Timestamp = u64;
pub type Duration = u32;
pub type Bool = u8;
#[derive(Copy, Eq, Clone, PartialEq, Debug, Default)]
pub struct FullId(pub OwnerId, pub Id);
impl fmt::Display for FullId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}_{}", self.0, self.1)
}
}
impl From<(OwnerId, Id)> for FullId {
fn from(pair: (OwnerId, Id)) -> FullId {
FullId(pair.0, pair.1)
}
}
impl Into<(OwnerId, Id)> for FullId {
fn into(self) -> (OwnerId, Id) {
(self.0, self.1)
}
}
pub struct Client {
client: HttpClient,
}
#[derive(Debug)]
pub enum Error {
Api(ApiError),
Http(HttpError),
Json(JsonError),
}
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Error::Api(ref err) => err.fmt(f),
Error::Http(ref err) => err.fmt(f),
Error::Json(ref err) => err.fmt(f),
}
}
}
impl From<ApiError> for Error {
fn from(err: ApiError) -> Error {
Error::Api(err)
}
}
impl From<HttpError> for Error {
fn from(err: HttpError) -> Error {
Error::Http(err)
}
}
impl From<JsonError> for Error {
fn from(err: JsonError) -> Error {
Error::Json(err)
}
}
impl From<UrlError> for Error {
fn
|
(err: UrlError) -> Error {
Error::Http(HttpError::Uri(err))
}
}
pub type Result<T> = StdResult<T, Error>;
impl Client {
pub fn auth<K, S>(&self, key: K, secret: S) -> OAuth
where K: Into<String>,
S: Into<String>
{
OAuth::new(&self.client, key.into(), secret.into())
}
pub fn new() -> Client {
Client { client: HttpClient::new() }
}
pub fn get<T: Request>(&self, token: Option<&AccessToken>, req: &T) -> Result<T::Response> {
let url = req.to_url();
let mut query = req.to_query_string();
if let Some(ref token) = token {
query.push_str("&access_token=");
query.push_str(token.access_token());
}
self.client
.post(url)
.body(&query)
.send()
.map_err(Error::Http)
.and_then(|resp| serde_json::from_reader::<_, ApiResult<T::Response>>(resp).map_err(Error::Json))
.and_then(|vkres| vkres.0.map_err(Error::Api))
}
}
/// Trait for things that can be posted to VK API directly
pub trait Request {
type Response: de::Deserialize;
fn method_name() -> &'static str;
fn to_query_string(&self) -> String;
fn permissions() -> Permissions {
Permissions::new(0)
}
fn to_url(&self) -> Url {
Url {
scheme: String::from("https"),
scheme_data: url::SchemeData::Relative(url::RelativeSchemeData {
username: String::new(),
password: None,
host: url::Host::Domain(String::from(VK_DOMAIN)),
port: Some(443),
default_port: None,
path: vec![String::from(VK_PATH), String::from(Self::method_name())],
}),
query: None,
fragment: None,
}
}
}
#[derive(Debug)]
pub struct ApiResult<T>(pub StdResult<T, ApiError>);
impl<T> Deref for ApiResult<T> {
type Target = StdResult<T, ApiError>;
fn deref(&self) -> &StdResult<T, ApiError> {
&self.0
}
}
enum ApiResultField {
Response,
Error,
}
impl de::Deserialize for ApiResultField {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ApiResultField, D::Error> {
struct ApiResultFieldVisitor;
impl de::Visitor for ApiResultFieldVisitor {
type Value = ApiResultField;
fn visit_str<E: de::Error>(&mut self, value: &str) -> StdResult<ApiResultField, E> {
match value {
"response" => Ok(ApiResultField::Response),
"error" => Ok(ApiResultField::Error),
_ => Err(de::Error::syntax("expected response or error")),
}
}
}
d.visit(ApiResultFieldVisitor)
}
}
impl<T: de::Deserialize> de::Deserialize for ApiResult<T> {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ApiResult<T>, D::Error> {
struct ApiResultVisitor<T: de::Deserialize>(PhantomData<T>);
impl<T: de::Deserialize> de::Visitor for ApiResultVisitor<T> {
type Value = ApiResult<T>;
fn visit_map<V: de::MapVisitor>(&mut self, mut v: V) -> StdResult<ApiResult<T>, V::Error> {
v.visit_key()
.and_then(|k| {
k.map(|k| {
match k {
ApiResultField::Response => v.visit_value::<T>().map(Ok),
ApiResultField::Error => v.visit_value::<ApiError>().map(Err),
}
})
.unwrap_or_else(|| v.missing_field("response or error"))
})
.and_then(|res| v.end().map(|_| res))
.map(ApiResult)
}
}
d.visit_map(ApiResultVisitor(PhantomData::<T>))
}
}
impl Into<(String, String)> for KeyVal {
fn into(self) -> (String, String) {
(self.key, self.value)
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum ErrorCode {
General, // 1
Database, // 2
Unauthorized, // 5
Signature, // 10
Request, // 11
ScriptCompileError, // 12
ScriptRuntimeError, // 13
Banned, // 18
Blocked, // 19
GoodsNotFound, // 20
GoodsUnvailable, // 21
UserNotFound, // 22
RequiredParameterMissing, // 100
InvalidHash, // 121
InvalidAudio, // 123
UserMenuAccessDenied, // 148
AccessDenied, // 204
AccessToWallPostDenied, // 210
PostAddAccessDenied, // 214
AdsPostWasRecentlyAdded, // 219,
TooManyRecipients, // 220,
HyperlinksForbidden, // 222
UserDisabledTrackBroadcast, // 221
CopyrightedObjectRemoved, // 270
InvalidFilename, // 301
SizeLimitReached, // 302
VideoAlreadyAdded, // 800
VideoCommentsClosed, // 801
App(u32), // 100-999
Unknown(u32), // other
}
impl From<u32> for ErrorCode {
fn from(value: u32) -> ErrorCode {
use self::ErrorCode::*;
match value {
1 => General,
2 => Database,
5 => Unauthorized,
10 => Signature,
11 => Request,
12 => ScriptCompileError,
13 => ScriptRuntimeError,
18 => Banned,
19 => Blocked,
20 => GoodsNotFound,
21 => GoodsUnvailable,
22 => UserNotFound,
100 => RequiredParameterMissing,
121 => InvalidHash,
123 => InvalidAudio,
148 => UserMenuAccessDenied,
204 => AccessDenied,
210 => AccessToWallPostDenied,
214 => PostAddAccessDenied,
219 => AdsPostWasRecentlyAdded,
220 => TooManyRecipients,
222 => HyperlinksForbidden,
221 => UserDisabledTrackBroadcast,
270 => CopyrightedObjectRemoved,
301 => InvalidFilename,
302 => SizeLimitReached,
800 => VideoAlreadyAdded,
801 => VideoCommentsClosed,
v @ 100...999 => App(v),
v @ _ => Unknown(v),
}
}
}
impl Into<u32> for ErrorCode {
fn into(self) -> u32 {
use self::ErrorCode::*;
match self {
General => 1,
Database => 2,
Unauthorized => 5,
Signature => 10,
Request => 11,
ScriptCompileError => 12,
ScriptRuntimeError => 13,
Banned => 18,
Blocked => 19,
GoodsNotFound => 20,
GoodsUnvailable => 21,
UserNotFound => 22,
RequiredParameterMissing => 100,
InvalidHash => 121,
InvalidAudio => 123,
UserMenuAccessDenied => 148,
AccessDenied => 204,
AccessToWallPostDenied => 210,
PostAddAccessDenied => 214,
AdsPostWasRecentlyAdded => 219,
TooManyRecipients => 220,
HyperlinksForbidden => 222,
UserDisabledTrackBroadcast => 221,
CopyrightedObjectRemoved => 270,
InvalidFilename => 301,
SizeLimitReached => 302,
VideoAlreadyAdded => 800,
VideoCommentsClosed => 801,
App(v) => v,
Unknown(v) => v,
}
}
}
impl fmt::Display for ErrorCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ErrorCode::*;
match *self {
General => f.write_str("general error"),
Database => f.write_str("database error"),
Unauthorized => f.write_str("unauthorized"),
Signature => f.write_str("invalid signature"),
Request => f.write_str("invalid request"),
ScriptCompileError => f.write_str("compile script error"),
ScriptRuntimeError => f.write_str("runtime script error"),
Banned => f.write_str("banned or deleted"),
Blocked => f.write_str("content blocked"),
GoodsNotFound => f.write_str("goods not found"),
GoodsUnvailable => f.write_str("goods unavailable"),
UserNotFound => f.write_str("user not found"),
RequiredParameterMissing => f.write_str("one of required parameters is missing"),
InvalidHash => f.write_str("invalid hash"),
InvalidAudio => f.write_str("invalid audio"),
UserMenuAccessDenied => f.write_str("access to the menu of the user denied"),
AccessDenied => f.write_str("access denied"),
AccessToWallPostDenied => f.write_str("access to wall's post denied"),
PostAddAccessDenied => f.write_str("access to adding post denied"),
AdsPostWasRecentlyAdded => f.write_str("ads post was recently added"),
TooManyRecipients => f.write_str("too many recipients"),
HyperlinksForbidden => f.write_str("hyperlinks are forbidden"),
UserDisabledTrackBroadcast => f.write_str("user disabled track name broadcast"),
CopyrightedObjectRemoved => f.write_str("object was removed by copyright holder request"),
InvalidFilename => f.write_str("invalid filename"),
SizeLimitReached => f.write_str("object size limit is reached"),
VideoAlreadyAdded => f.write_str("video is already added"),
VideoCommentsClosed => f.write_str("comments for this video are closed"),
App(v) => write!(f, "application error #{}", v),
Unknown(v) => write!(f, "unknown error #{}", v),
}
}
}
impl de::Deserialize for ErrorCode {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ErrorCode, D::Error> {
u32::deserialize(d).map(From::from)
}
}
impl StdError for ApiError {
fn description(&self) -> &str {
&*self.error_msg
}
}
impl fmt::Display for ApiError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: {}", self.error_code, self.error_msg)
}
}
pub enum Privacy {
All,
Friends,
FriendsOfFriends,
FriendsOfFriendsOnly,
Nobody,
OnlyMe,
List(u64),
OnlyList(u64),
User(u64),
OnlyUser(u64),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum Sort {
DateAdded = 0,
Length = 1,
Popularity = 2,
}
impl AsRef<str> for Sort {
fn as_ref(&self) -> &str {
use self::Sort::*;
match *self {
DateAdded => "0",
Length => "1",
Popularity => "2",
}
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
#[repr(u8)]
pub enum ReportReason {
Spam = 0,
ChildPorn = 1,
Extremism = 2,
Violence = 3,
Drugs = 4,
AdultOnly = 5,
Offence = 6,
}
impl AsRef<str> for ReportReason {
fn as_ref(&self) -> &str {
use self::ReportReason::*;
match *self {
Spam => "0",
ChildPorn => "1",
Extremism => "2",
Violence => "3",
Drugs => "4",
AdultOnly => "5",
Offence => "6",
}
}
}
|
from
|
identifier_name
|
api.rs
|
use std::fmt;
use std::ops::Deref;
use std::marker::PhantomData;
use std::error::Error as StdError;
use std::result::Result as StdResult;
use serde::de;
use serde_json::{self, Error as JsonError};
use hyper::client::Client as HttpClient;
use hyper::Error as HttpError;
use url::{self, ParseError as UrlError, Url};
use oauth2::token::Token;
use auth::{AccessToken, OAuth, Permissions};
pub const VK_DOMAIN: &'static str = "api.vk.com";
pub const VK_PATH: &'static str = "method";
#[cfg(feature = "unstable")]
include!("api.rs.in");
#[cfg(not(feature = "unstable"))]
include!(concat!(env!("OUT_DIR"), "/api.rs"));
pub type OwnerId = i64;
pub type Id = u64;
pub type Timestamp = u64;
pub type Duration = u32;
pub type Bool = u8;
#[derive(Copy, Eq, Clone, PartialEq, Debug, Default)]
pub struct FullId(pub OwnerId, pub Id);
impl fmt::Display for FullId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
impl From<(OwnerId, Id)> for FullId {
fn from(pair: (OwnerId, Id)) -> FullId {
FullId(pair.0, pair.1)
}
}
impl Into<(OwnerId, Id)> for FullId {
fn into(self) -> (OwnerId, Id) {
(self.0, self.1)
}
}
pub struct Client {
client: HttpClient,
}
#[derive(Debug)]
pub enum Error {
Api(ApiError),
Http(HttpError),
Json(JsonError),
}
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Error::Api(ref err) => err.fmt(f),
Error::Http(ref err) => err.fmt(f),
Error::Json(ref err) => err.fmt(f),
}
}
}
impl From<ApiError> for Error {
fn from(err: ApiError) -> Error {
Error::Api(err)
}
}
impl From<HttpError> for Error {
fn from(err: HttpError) -> Error {
Error::Http(err)
}
}
impl From<JsonError> for Error {
fn from(err: JsonError) -> Error {
Error::Json(err)
}
}
impl From<UrlError> for Error {
fn from(err: UrlError) -> Error {
Error::Http(HttpError::Uri(err))
}
}
pub type Result<T> = StdResult<T, Error>;
impl Client {
pub fn auth<K, S>(&self, key: K, secret: S) -> OAuth
where K: Into<String>,
S: Into<String>
{
OAuth::new(&self.client, key.into(), secret.into())
}
pub fn new() -> Client {
Client { client: HttpClient::new() }
}
pub fn get<T: Request>(&self, token: Option<&AccessToken>, req: &T) -> Result<T::Response> {
let url = req.to_url();
let mut query = req.to_query_string();
if let Some(ref token) = token {
query.push_str("&access_token=");
query.push_str(token.access_token());
}
self.client
.post(url)
.body(&query)
.send()
.map_err(Error::Http)
.and_then(|resp| serde_json::from_reader::<_, ApiResult<T::Response>>(resp).map_err(Error::Json))
.and_then(|vkres| vkres.0.map_err(Error::Api))
}
}
/// Trait for things that can be posted to VK API directly
pub trait Request {
type Response: de::Deserialize;
fn method_name() -> &'static str;
fn to_query_string(&self) -> String;
fn permissions() -> Permissions {
Permissions::new(0)
}
fn to_url(&self) -> Url {
Url {
scheme: String::from("https"),
scheme_data: url::SchemeData::Relative(url::RelativeSchemeData {
username: String::new(),
password: None,
host: url::Host::Domain(String::from(VK_DOMAIN)),
port: Some(443),
default_port: None,
path: vec![String::from(VK_PATH), String::from(Self::method_name())],
}),
query: None,
fragment: None,
}
}
}
#[derive(Debug)]
pub struct ApiResult<T>(pub StdResult<T, ApiError>);
impl<T> Deref for ApiResult<T> {
type Target = StdResult<T, ApiError>;
fn deref(&self) -> &StdResult<T, ApiError> {
&self.0
}
}
enum ApiResultField {
Response,
Error,
}
impl de::Deserialize for ApiResultField {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ApiResultField, D::Error> {
struct ApiResultFieldVisitor;
impl de::Visitor for ApiResultFieldVisitor {
type Value = ApiResultField;
fn visit_str<E: de::Error>(&mut self, value: &str) -> StdResult<ApiResultField, E> {
match value {
"response" => Ok(ApiResultField::Response),
"error" => Ok(ApiResultField::Error),
_ => Err(de::Error::syntax("expected response or error")),
}
}
}
d.visit(ApiResultFieldVisitor)
}
}
impl<T: de::Deserialize> de::Deserialize for ApiResult<T> {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ApiResult<T>, D::Error> {
struct ApiResultVisitor<T: de::Deserialize>(PhantomData<T>);
impl<T: de::Deserialize> de::Visitor for ApiResultVisitor<T> {
type Value = ApiResult<T>;
fn visit_map<V: de::MapVisitor>(&mut self, mut v: V) -> StdResult<ApiResult<T>, V::Error> {
v.visit_key()
.and_then(|k| {
k.map(|k| {
match k {
ApiResultField::Response => v.visit_value::<T>().map(Ok),
ApiResultField::Error => v.visit_value::<ApiError>().map(Err),
}
})
.unwrap_or_else(|| v.missing_field("response or error"))
})
.and_then(|res| v.end().map(|_| res))
.map(ApiResult)
}
}
d.visit_map(ApiResultVisitor(PhantomData::<T>))
}
}
impl Into<(String, String)> for KeyVal {
fn into(self) -> (String, String) {
(self.key, self.value)
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum ErrorCode {
General, // 1
Database, // 2
Unauthorized, // 5
Signature, // 10
Request, // 11
ScriptCompileError, // 12
ScriptRuntimeError, // 13
Banned, // 18
Blocked, // 19
GoodsNotFound, // 20
GoodsUnvailable, // 21
UserNotFound, // 22
RequiredParameterMissing, // 100
InvalidHash, // 121
InvalidAudio, // 123
UserMenuAccessDenied, // 148
AccessDenied, // 204
AccessToWallPostDenied, // 210
PostAddAccessDenied, // 214
AdsPostWasRecentlyAdded, // 219,
TooManyRecipients, // 220,
HyperlinksForbidden, // 222
UserDisabledTrackBroadcast, // 221
CopyrightedObjectRemoved, // 270
InvalidFilename, // 301
SizeLimitReached, // 302
VideoAlreadyAdded, // 800
VideoCommentsClosed, // 801
App(u32), // 100-999
Unknown(u32), // other
}
impl From<u32> for ErrorCode {
fn from(value: u32) -> ErrorCode {
use self::ErrorCode::*;
match value {
1 => General,
2 => Database,
5 => Unauthorized,
10 => Signature,
11 => Request,
12 => ScriptCompileError,
13 => ScriptRuntimeError,
18 => Banned,
19 => Blocked,
20 => GoodsNotFound,
21 => GoodsUnvailable,
22 => UserNotFound,
100 => RequiredParameterMissing,
121 => InvalidHash,
123 => InvalidAudio,
148 => UserMenuAccessDenied,
204 => AccessDenied,
210 => AccessToWallPostDenied,
214 => PostAddAccessDenied,
219 => AdsPostWasRecentlyAdded,
220 => TooManyRecipients,
222 => HyperlinksForbidden,
221 => UserDisabledTrackBroadcast,
270 => CopyrightedObjectRemoved,
301 => InvalidFilename,
302 => SizeLimitReached,
800 => VideoAlreadyAdded,
801 => VideoCommentsClosed,
v @ 100...999 => App(v),
v @ _ => Unknown(v),
}
}
}
impl Into<u32> for ErrorCode {
fn into(self) -> u32 {
use self::ErrorCode::*;
match self {
General => 1,
Database => 2,
Unauthorized => 5,
Signature => 10,
Request => 11,
ScriptCompileError => 12,
ScriptRuntimeError => 13,
Banned => 18,
Blocked => 19,
GoodsNotFound => 20,
GoodsUnvailable => 21,
UserNotFound => 22,
RequiredParameterMissing => 100,
InvalidHash => 121,
InvalidAudio => 123,
UserMenuAccessDenied => 148,
AccessDenied => 204,
AccessToWallPostDenied => 210,
PostAddAccessDenied => 214,
AdsPostWasRecentlyAdded => 219,
TooManyRecipients => 220,
HyperlinksForbidden => 222,
UserDisabledTrackBroadcast => 221,
CopyrightedObjectRemoved => 270,
InvalidFilename => 301,
SizeLimitReached => 302,
VideoAlreadyAdded => 800,
VideoCommentsClosed => 801,
App(v) => v,
Unknown(v) => v,
}
}
}
impl fmt::Display for ErrorCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ErrorCode::*;
match *self {
General => f.write_str("general error"),
Database => f.write_str("database error"),
Unauthorized => f.write_str("unauthorized"),
Signature => f.write_str("invalid signature"),
Request => f.write_str("invalid request"),
ScriptCompileError => f.write_str("compile script error"),
ScriptRuntimeError => f.write_str("runtime script error"),
Banned => f.write_str("banned or deleted"),
Blocked => f.write_str("content blocked"),
GoodsNotFound => f.write_str("goods not found"),
GoodsUnvailable => f.write_str("goods unavailable"),
UserNotFound => f.write_str("user not found"),
RequiredParameterMissing => f.write_str("one of required parameters is missing"),
InvalidHash => f.write_str("invalid hash"),
InvalidAudio => f.write_str("invalid audio"),
UserMenuAccessDenied => f.write_str("access to the menu of the user denied"),
AccessDenied => f.write_str("access denied"),
AccessToWallPostDenied => f.write_str("access to wall's post denied"),
PostAddAccessDenied => f.write_str("access to adding post denied"),
AdsPostWasRecentlyAdded => f.write_str("ads post was recently added"),
TooManyRecipients => f.write_str("too many recipients"),
HyperlinksForbidden => f.write_str("hyperlinks are forbidden"),
UserDisabledTrackBroadcast => f.write_str("user disabled track name broadcast"),
CopyrightedObjectRemoved => f.write_str("object was removed by copyright holder request"),
InvalidFilename => f.write_str("invalid filename"),
SizeLimitReached => f.write_str("object size limit is reached"),
VideoAlreadyAdded => f.write_str("video is already added"),
VideoCommentsClosed => f.write_str("comments for this video are closed"),
App(v) => write!(f, "application error #{}", v),
Unknown(v) => write!(f, "unknown error #{}", v),
}
}
}
impl de::Deserialize for ErrorCode {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ErrorCode, D::Error> {
u32::deserialize(d).map(From::from)
}
}
impl StdError for ApiError {
fn description(&self) -> &str {
&*self.error_msg
}
}
impl fmt::Display for ApiError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: {}", self.error_code, self.error_msg)
}
}
pub enum Privacy {
All,
Friends,
FriendsOfFriends,
FriendsOfFriendsOnly,
Nobody,
OnlyMe,
List(u64),
OnlyList(u64),
User(u64),
OnlyUser(u64),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum Sort {
DateAdded = 0,
Length = 1,
Popularity = 2,
}
impl AsRef<str> for Sort {
fn as_ref(&self) -> &str {
use self::Sort::*;
match *self {
DateAdded => "0",
Length => "1",
Popularity => "2",
}
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
#[repr(u8)]
pub enum ReportReason {
Spam = 0,
ChildPorn = 1,
Extremism = 2,
Violence = 3,
Drugs = 4,
AdultOnly = 5,
Offence = 6,
}
impl AsRef<str> for ReportReason {
fn as_ref(&self) -> &str {
use self::ReportReason::*;
match *self {
Spam => "0",
ChildPorn => "1",
Extremism => "2",
Violence => "3",
Drugs => "4",
AdultOnly => "5",
Offence => "6",
}
}
}
|
{
write!(f, "{}_{}", self.0, self.1)
}
|
identifier_body
|
api.rs
|
use std::fmt;
use std::ops::Deref;
use std::marker::PhantomData;
use std::error::Error as StdError;
use std::result::Result as StdResult;
use serde::de;
use serde_json::{self, Error as JsonError};
use hyper::client::Client as HttpClient;
use hyper::Error as HttpError;
use url::{self, ParseError as UrlError, Url};
use oauth2::token::Token;
use auth::{AccessToken, OAuth, Permissions};
pub const VK_DOMAIN: &'static str = "api.vk.com";
pub const VK_PATH: &'static str = "method";
#[cfg(feature = "unstable")]
include!("api.rs.in");
#[cfg(not(feature = "unstable"))]
include!(concat!(env!("OUT_DIR"), "/api.rs"));
pub type OwnerId = i64;
pub type Id = u64;
pub type Timestamp = u64;
pub type Duration = u32;
pub type Bool = u8;
#[derive(Copy, Eq, Clone, PartialEq, Debug, Default)]
pub struct FullId(pub OwnerId, pub Id);
impl fmt::Display for FullId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}_{}", self.0, self.1)
}
}
impl From<(OwnerId, Id)> for FullId {
fn from(pair: (OwnerId, Id)) -> FullId {
FullId(pair.0, pair.1)
}
}
impl Into<(OwnerId, Id)> for FullId {
fn into(self) -> (OwnerId, Id) {
(self.0, self.1)
}
}
pub struct Client {
client: HttpClient,
}
#[derive(Debug)]
pub enum Error {
Api(ApiError),
Http(HttpError),
Json(JsonError),
}
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Error::Api(ref err) => err.fmt(f),
Error::Http(ref err) => err.fmt(f),
Error::Json(ref err) => err.fmt(f),
}
}
}
impl From<ApiError> for Error {
fn from(err: ApiError) -> Error {
Error::Api(err)
}
}
impl From<HttpError> for Error {
fn from(err: HttpError) -> Error {
Error::Http(err)
}
}
impl From<JsonError> for Error {
fn from(err: JsonError) -> Error {
|
fn from(err: UrlError) -> Error {
Error::Http(HttpError::Uri(err))
}
}
pub type Result<T> = StdResult<T, Error>;
impl Client {
pub fn auth<K, S>(&self, key: K, secret: S) -> OAuth
where K: Into<String>,
S: Into<String>
{
OAuth::new(&self.client, key.into(), secret.into())
}
pub fn new() -> Client {
Client { client: HttpClient::new() }
}
pub fn get<T: Request>(&self, token: Option<&AccessToken>, req: &T) -> Result<T::Response> {
let url = req.to_url();
let mut query = req.to_query_string();
if let Some(ref token) = token {
query.push_str("&access_token=");
query.push_str(token.access_token());
}
self.client
.post(url)
.body(&query)
.send()
.map_err(Error::Http)
.and_then(|resp| serde_json::from_reader::<_, ApiResult<T::Response>>(resp).map_err(Error::Json))
.and_then(|vkres| vkres.0.map_err(Error::Api))
}
}
/// Trait for things that can be posted to VK API directly
pub trait Request {
type Response: de::Deserialize;
fn method_name() -> &'static str;
fn to_query_string(&self) -> String;
fn permissions() -> Permissions {
Permissions::new(0)
}
fn to_url(&self) -> Url {
Url {
scheme: String::from("https"),
scheme_data: url::SchemeData::Relative(url::RelativeSchemeData {
username: String::new(),
password: None,
host: url::Host::Domain(String::from(VK_DOMAIN)),
port: Some(443),
default_port: None,
path: vec![String::from(VK_PATH), String::from(Self::method_name())],
}),
query: None,
fragment: None,
}
}
}
#[derive(Debug)]
pub struct ApiResult<T>(pub StdResult<T, ApiError>);
impl<T> Deref for ApiResult<T> {
type Target = StdResult<T, ApiError>;
fn deref(&self) -> &StdResult<T, ApiError> {
&self.0
}
}
enum ApiResultField {
Response,
Error,
}
impl de::Deserialize for ApiResultField {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ApiResultField, D::Error> {
struct ApiResultFieldVisitor;
impl de::Visitor for ApiResultFieldVisitor {
type Value = ApiResultField;
fn visit_str<E: de::Error>(&mut self, value: &str) -> StdResult<ApiResultField, E> {
match value {
"response" => Ok(ApiResultField::Response),
"error" => Ok(ApiResultField::Error),
_ => Err(de::Error::syntax("expected response or error")),
}
}
}
d.visit(ApiResultFieldVisitor)
}
}
impl<T: de::Deserialize> de::Deserialize for ApiResult<T> {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ApiResult<T>, D::Error> {
struct ApiResultVisitor<T: de::Deserialize>(PhantomData<T>);
impl<T: de::Deserialize> de::Visitor for ApiResultVisitor<T> {
type Value = ApiResult<T>;
fn visit_map<V: de::MapVisitor>(&mut self, mut v: V) -> StdResult<ApiResult<T>, V::Error> {
v.visit_key()
.and_then(|k| {
k.map(|k| {
match k {
ApiResultField::Response => v.visit_value::<T>().map(Ok),
ApiResultField::Error => v.visit_value::<ApiError>().map(Err),
}
})
.unwrap_or_else(|| v.missing_field("response or error"))
})
.and_then(|res| v.end().map(|_| res))
.map(ApiResult)
}
}
d.visit_map(ApiResultVisitor(PhantomData::<T>))
}
}
impl Into<(String, String)> for KeyVal {
fn into(self) -> (String, String) {
(self.key, self.value)
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum ErrorCode {
General, // 1
Database, // 2
Unauthorized, // 5
Signature, // 10
Request, // 11
ScriptCompileError, // 12
ScriptRuntimeError, // 13
Banned, // 18
Blocked, // 19
GoodsNotFound, // 20
GoodsUnvailable, // 21
UserNotFound, // 22
RequiredParameterMissing, // 100
InvalidHash, // 121
InvalidAudio, // 123
UserMenuAccessDenied, // 148
AccessDenied, // 204
AccessToWallPostDenied, // 210
PostAddAccessDenied, // 214
AdsPostWasRecentlyAdded, // 219,
TooManyRecipients, // 220,
HyperlinksForbidden, // 222
UserDisabledTrackBroadcast, // 221
CopyrightedObjectRemoved, // 270
InvalidFilename, // 301
SizeLimitReached, // 302
VideoAlreadyAdded, // 800
VideoCommentsClosed, // 801
App(u32), // 100-999
Unknown(u32), // other
}
impl From<u32> for ErrorCode {
fn from(value: u32) -> ErrorCode {
use self::ErrorCode::*;
match value {
1 => General,
2 => Database,
5 => Unauthorized,
10 => Signature,
11 => Request,
12 => ScriptCompileError,
13 => ScriptRuntimeError,
18 => Banned,
19 => Blocked,
20 => GoodsNotFound,
21 => GoodsUnvailable,
22 => UserNotFound,
100 => RequiredParameterMissing,
121 => InvalidHash,
123 => InvalidAudio,
148 => UserMenuAccessDenied,
204 => AccessDenied,
210 => AccessToWallPostDenied,
214 => PostAddAccessDenied,
219 => AdsPostWasRecentlyAdded,
220 => TooManyRecipients,
222 => HyperlinksForbidden,
221 => UserDisabledTrackBroadcast,
270 => CopyrightedObjectRemoved,
301 => InvalidFilename,
302 => SizeLimitReached,
800 => VideoAlreadyAdded,
801 => VideoCommentsClosed,
v @ 100...999 => App(v),
v @ _ => Unknown(v),
}
}
}
impl Into<u32> for ErrorCode {
fn into(self) -> u32 {
use self::ErrorCode::*;
match self {
General => 1,
Database => 2,
Unauthorized => 5,
Signature => 10,
Request => 11,
ScriptCompileError => 12,
ScriptRuntimeError => 13,
Banned => 18,
Blocked => 19,
GoodsNotFound => 20,
GoodsUnvailable => 21,
UserNotFound => 22,
RequiredParameterMissing => 100,
InvalidHash => 121,
InvalidAudio => 123,
UserMenuAccessDenied => 148,
AccessDenied => 204,
AccessToWallPostDenied => 210,
PostAddAccessDenied => 214,
AdsPostWasRecentlyAdded => 219,
TooManyRecipients => 220,
HyperlinksForbidden => 222,
UserDisabledTrackBroadcast => 221,
CopyrightedObjectRemoved => 270,
InvalidFilename => 301,
SizeLimitReached => 302,
VideoAlreadyAdded => 800,
VideoCommentsClosed => 801,
App(v) => v,
Unknown(v) => v,
}
}
}
impl fmt::Display for ErrorCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::ErrorCode::*;
match *self {
General => f.write_str("general error"),
Database => f.write_str("database error"),
Unauthorized => f.write_str("unauthorized"),
Signature => f.write_str("invalid signature"),
Request => f.write_str("invalid request"),
ScriptCompileError => f.write_str("compile script error"),
ScriptRuntimeError => f.write_str("runtime script error"),
Banned => f.write_str("banned or deleted"),
Blocked => f.write_str("content blocked"),
GoodsNotFound => f.write_str("goods not found"),
GoodsUnvailable => f.write_str("goods unavailable"),
UserNotFound => f.write_str("user not found"),
RequiredParameterMissing => f.write_str("one of required parameters is missing"),
InvalidHash => f.write_str("invalid hash"),
InvalidAudio => f.write_str("invalid audio"),
UserMenuAccessDenied => f.write_str("access to the menu of the user denied"),
AccessDenied => f.write_str("access denied"),
AccessToWallPostDenied => f.write_str("access to wall's post denied"),
PostAddAccessDenied => f.write_str("access to adding post denied"),
AdsPostWasRecentlyAdded => f.write_str("ads post was recently added"),
TooManyRecipients => f.write_str("too many recipients"),
HyperlinksForbidden => f.write_str("hyperlinks are forbidden"),
UserDisabledTrackBroadcast => f.write_str("user disabled track name broadcast"),
CopyrightedObjectRemoved => f.write_str("object was removed by copyright holder request"),
InvalidFilename => f.write_str("invalid filename"),
SizeLimitReached => f.write_str("object size limit is reached"),
VideoAlreadyAdded => f.write_str("video is already added"),
VideoCommentsClosed => f.write_str("comments for this video are closed"),
App(v) => write!(f, "application error #{}", v),
Unknown(v) => write!(f, "unknown error #{}", v),
}
}
}
impl de::Deserialize for ErrorCode {
fn deserialize<D: de::Deserializer>(d: &mut D) -> StdResult<ErrorCode, D::Error> {
u32::deserialize(d).map(From::from)
}
}
impl StdError for ApiError {
fn description(&self) -> &str {
&*self.error_msg
}
}
impl fmt::Display for ApiError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: {}", self.error_code, self.error_msg)
}
}
pub enum Privacy {
All,
Friends,
FriendsOfFriends,
FriendsOfFriendsOnly,
Nobody,
OnlyMe,
List(u64),
OnlyList(u64),
User(u64),
OnlyUser(u64),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum Sort {
DateAdded = 0,
Length = 1,
Popularity = 2,
}
impl AsRef<str> for Sort {
fn as_ref(&self) -> &str {
use self::Sort::*;
match *self {
DateAdded => "0",
Length => "1",
Popularity => "2",
}
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
#[repr(u8)]
pub enum ReportReason {
Spam = 0,
ChildPorn = 1,
Extremism = 2,
Violence = 3,
Drugs = 4,
AdultOnly = 5,
Offence = 6,
}
impl AsRef<str> for ReportReason {
fn as_ref(&self) -> &str {
use self::ReportReason::*;
match *self {
Spam => "0",
ChildPorn => "1",
Extremism => "2",
Violence => "3",
Drugs => "4",
AdultOnly => "5",
Offence => "6",
}
}
}
|
Error::Json(err)
}
}
impl From<UrlError> for Error {
|
random_line_split
|
test.rs
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Based off of libcss's examples/example1.c
mod example1 {
use CssResult;
use CssProperty;
use types::*;
use hint::*;
use select::*;
use util::VoidPtrLike;
use wapcaplet::LwcString;
use std::libc;
use std::cast;
struct MyDomNode {
name: @LwcString
}
impl VoidPtrLike for MyDomNode {
fn from_void_ptr(node: *libc::c_void) -> MyDomNode {
assert!(node.is_not_null());
MyDomNode {
name: unsafe {
let box = cast::transmute(node);
cast::bump_box_refcount(box);
box
}
}
}
fn to_void_ptr(&self) -> *libc::c_void {
unsafe { cast::transmute(self.name) }
}
}
#[test]
fn run() {
use super::super::stylesheet::{CssUrlResolutionFn, CssStylesheetParams, CssStylesheet,
css_stylesheet_create, CssStylesheetParamsVersion1};
use super::super::computed::CssComputedStyle;
use super::super::values::{CssColorColor, CssColorInherit};
use super::super::ll::types::{CSS_ORIGIN_AUTHOR, CSS_MEDIA_ALL, CSS_MEDIA_SCREEN};
use super::super::conversions::ToLl;
use wapcaplet::{LwcString, from_rust_string};
let data = "h1 { color: red; }\
h4 { color: #321; }\
h4, h5 { color: #123456; }";
let resolve: CssUrlResolutionFn = |a,b| resolve_url(a, b);
let params: CssStylesheetParams = CssStylesheetParams {
params_version: CssStylesheetParamsVersion1,
level: CssLevel21,
charset: ~"UTF-8",
url: ~"foo",
title: ~"foo",
allow_quirks: false,
inline_style: false,
resolve: Some(resolve),
import: None,
color: None,
font: None,
};
let mut sheet: CssStylesheet = css_stylesheet_create(¶ms);
debug!("stylesheet: %?", sheet);
debug!("stylesheet size: %?", sheet.size());
sheet.append_data(data.as_bytes().to_owned());
sheet.data_done();
debug!("stylesheet size: %?", sheet.size());
let mut select_ctx: CssSelectCtx = css_select_ctx_create();
assert!(select_ctx.count_sheets() == 0);
select_ctx.append_sheet(sheet, CSS_ORIGIN_AUTHOR, CSS_MEDIA_ALL);
debug!("count sheets: %?", select_ctx.count_sheets());
assert!(select_ctx.count_sheets() == 1);
for hh in range(1u, 7u) {
let element = fmt!("h%u", hh);
let element_name: @LwcString = @from_rust_string(element);
let node = MyDomNode { name: element_name };
let select_handler = SelectHandler { bogus: () };
let style: CssSelectResults = select_ctx.select_style(&node,
CSS_MEDIA_SCREEN,
None,
&select_handler);
let computed: CssComputedStyle = style.computed_style(CssPseudoElementNone);
match computed.color() {
CssColorInherit => {
debug!("color of h%u is 'inherit'", hh);
}
CssColorColor(color) => {
debug!("color of h%u is %x", hh, color.to_ll() as uint);
}
}
}
}
fn resolve_url(_base: &str, _rel: &LwcString) -> CssResult<LwcString> {
fail!(~"resolving url");
}
struct SelectHandler {
bogus: ()
}
impl CssSelectHandler<MyDomNode> for SelectHandler {
fn node_name(&self, node: &MyDomNode) -> CssQName {
debug!("HL node_name!");
debug!("SS %?", (*node.name).to_str());
CssQName {
ns: None,
name: (*node.name).clone()
}
}
fn node_classes(&self, _node: &MyDomNode) -> Option<~[LwcString]> { None }
fn node_id(&self, _node: &MyDomNode) -> Option<LwcString> { None }
fn named_parent_node(&self, _node: &MyDomNode, _qname: &CssQName) -> Option<MyDomNode> {
None
}
fn parent_node(&self, _node: &MyDomNode) -> Option<MyDomNode> {
None
}
fn node_has_class(&self, _node: &MyDomNode, _name: LwcString) -> bool { false }
fn node_has_id(&self, _node: &MyDomNode, _name: LwcString) -> bool { false }
fn named_ancestor_node(&self, _node: &MyDomNode, _qname: &CssQName) -> Option<MyDomNode> {
None
}
fn node_is_root(&self, _node: &MyDomNode) -> bool { false }
fn node_is_link(&self, _node: &MyDomNode) -> bool { false }
fn node_is_visited(&self, _node: &MyDomNode) -> bool { false }
fn ua_default_for_property(&self, property: CssProperty) -> CssHint {
match property {
_ => CssHintDefault
}
}
}
}
#[test]
fn
|
() {
use extra::arc::Arc;
use stylesheet::*;
use types::CssLevel21;
use wapcaplet::LwcString;
use super::CssResult;
let resolve: CssUrlResolutionFn = |a,b| resolve_url(a, b);
let params: CssStylesheetParams = CssStylesheetParams {
params_version: CssStylesheetParamsVersion1,
level: CssLevel21,
charset: ~"UTF-8",
url: ~"foo",
title: ~"foo",
allow_quirks: false,
inline_style: false,
resolve: Some(resolve),
import: None,
color: None,
font: None,
};
let sheet: CssStylesheet = css_stylesheet_create(¶ms);
let _arc = Arc::new(sheet);
fn resolve_url(_base: &str, _rel: &LwcString) -> CssResult<LwcString> {
fail!(~"resolving url");
}
}
|
test_arc
|
identifier_name
|
test.rs
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Based off of libcss's examples/example1.c
mod example1 {
use CssResult;
use CssProperty;
use types::*;
use hint::*;
use select::*;
use util::VoidPtrLike;
use wapcaplet::LwcString;
use std::libc;
use std::cast;
struct MyDomNode {
name: @LwcString
}
impl VoidPtrLike for MyDomNode {
fn from_void_ptr(node: *libc::c_void) -> MyDomNode {
assert!(node.is_not_null());
MyDomNode {
name: unsafe {
let box = cast::transmute(node);
cast::bump_box_refcount(box);
box
}
}
}
fn to_void_ptr(&self) -> *libc::c_void {
unsafe { cast::transmute(self.name) }
}
}
#[test]
fn run() {
use super::super::stylesheet::{CssUrlResolutionFn, CssStylesheetParams, CssStylesheet,
css_stylesheet_create, CssStylesheetParamsVersion1};
use super::super::computed::CssComputedStyle;
use super::super::values::{CssColorColor, CssColorInherit};
use super::super::ll::types::{CSS_ORIGIN_AUTHOR, CSS_MEDIA_ALL, CSS_MEDIA_SCREEN};
use super::super::conversions::ToLl;
use wapcaplet::{LwcString, from_rust_string};
let data = "h1 { color: red; }\
h4 { color: #321; }\
h4, h5 { color: #123456; }";
let resolve: CssUrlResolutionFn = |a,b| resolve_url(a, b);
let params: CssStylesheetParams = CssStylesheetParams {
params_version: CssStylesheetParamsVersion1,
level: CssLevel21,
charset: ~"UTF-8",
url: ~"foo",
title: ~"foo",
allow_quirks: false,
inline_style: false,
|
color: None,
font: None,
};
let mut sheet: CssStylesheet = css_stylesheet_create(¶ms);
debug!("stylesheet: %?", sheet);
debug!("stylesheet size: %?", sheet.size());
sheet.append_data(data.as_bytes().to_owned());
sheet.data_done();
debug!("stylesheet size: %?", sheet.size());
let mut select_ctx: CssSelectCtx = css_select_ctx_create();
assert!(select_ctx.count_sheets() == 0);
select_ctx.append_sheet(sheet, CSS_ORIGIN_AUTHOR, CSS_MEDIA_ALL);
debug!("count sheets: %?", select_ctx.count_sheets());
assert!(select_ctx.count_sheets() == 1);
for hh in range(1u, 7u) {
let element = fmt!("h%u", hh);
let element_name: @LwcString = @from_rust_string(element);
let node = MyDomNode { name: element_name };
let select_handler = SelectHandler { bogus: () };
let style: CssSelectResults = select_ctx.select_style(&node,
CSS_MEDIA_SCREEN,
None,
&select_handler);
let computed: CssComputedStyle = style.computed_style(CssPseudoElementNone);
match computed.color() {
CssColorInherit => {
debug!("color of h%u is 'inherit'", hh);
}
CssColorColor(color) => {
debug!("color of h%u is %x", hh, color.to_ll() as uint);
}
}
}
}
fn resolve_url(_base: &str, _rel: &LwcString) -> CssResult<LwcString> {
fail!(~"resolving url");
}
struct SelectHandler {
bogus: ()
}
impl CssSelectHandler<MyDomNode> for SelectHandler {
fn node_name(&self, node: &MyDomNode) -> CssQName {
debug!("HL node_name!");
debug!("SS %?", (*node.name).to_str());
CssQName {
ns: None,
name: (*node.name).clone()
}
}
fn node_classes(&self, _node: &MyDomNode) -> Option<~[LwcString]> { None }
fn node_id(&self, _node: &MyDomNode) -> Option<LwcString> { None }
fn named_parent_node(&self, _node: &MyDomNode, _qname: &CssQName) -> Option<MyDomNode> {
None
}
fn parent_node(&self, _node: &MyDomNode) -> Option<MyDomNode> {
None
}
fn node_has_class(&self, _node: &MyDomNode, _name: LwcString) -> bool { false }
fn node_has_id(&self, _node: &MyDomNode, _name: LwcString) -> bool { false }
fn named_ancestor_node(&self, _node: &MyDomNode, _qname: &CssQName) -> Option<MyDomNode> {
None
}
fn node_is_root(&self, _node: &MyDomNode) -> bool { false }
fn node_is_link(&self, _node: &MyDomNode) -> bool { false }
fn node_is_visited(&self, _node: &MyDomNode) -> bool { false }
fn ua_default_for_property(&self, property: CssProperty) -> CssHint {
match property {
_ => CssHintDefault
}
}
}
}
#[test]
fn test_arc() {
use extra::arc::Arc;
use stylesheet::*;
use types::CssLevel21;
use wapcaplet::LwcString;
use super::CssResult;
let resolve: CssUrlResolutionFn = |a,b| resolve_url(a, b);
let params: CssStylesheetParams = CssStylesheetParams {
params_version: CssStylesheetParamsVersion1,
level: CssLevel21,
charset: ~"UTF-8",
url: ~"foo",
title: ~"foo",
allow_quirks: false,
inline_style: false,
resolve: Some(resolve),
import: None,
color: None,
font: None,
};
let sheet: CssStylesheet = css_stylesheet_create(¶ms);
let _arc = Arc::new(sheet);
fn resolve_url(_base: &str, _rel: &LwcString) -> CssResult<LwcString> {
fail!(~"resolving url");
}
}
|
resolve: Some(resolve),
import: None,
|
random_line_split
|
test.rs
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Based off of libcss's examples/example1.c
mod example1 {
use CssResult;
use CssProperty;
use types::*;
use hint::*;
use select::*;
use util::VoidPtrLike;
use wapcaplet::LwcString;
use std::libc;
use std::cast;
struct MyDomNode {
name: @LwcString
}
impl VoidPtrLike for MyDomNode {
fn from_void_ptr(node: *libc::c_void) -> MyDomNode {
assert!(node.is_not_null());
MyDomNode {
name: unsafe {
let box = cast::transmute(node);
cast::bump_box_refcount(box);
box
}
}
}
fn to_void_ptr(&self) -> *libc::c_void
|
}
#[test]
fn run() {
use super::super::stylesheet::{CssUrlResolutionFn, CssStylesheetParams, CssStylesheet,
css_stylesheet_create, CssStylesheetParamsVersion1};
use super::super::computed::CssComputedStyle;
use super::super::values::{CssColorColor, CssColorInherit};
use super::super::ll::types::{CSS_ORIGIN_AUTHOR, CSS_MEDIA_ALL, CSS_MEDIA_SCREEN};
use super::super::conversions::ToLl;
use wapcaplet::{LwcString, from_rust_string};
let data = "h1 { color: red; }\
h4 { color: #321; }\
h4, h5 { color: #123456; }";
let resolve: CssUrlResolutionFn = |a,b| resolve_url(a, b);
let params: CssStylesheetParams = CssStylesheetParams {
params_version: CssStylesheetParamsVersion1,
level: CssLevel21,
charset: ~"UTF-8",
url: ~"foo",
title: ~"foo",
allow_quirks: false,
inline_style: false,
resolve: Some(resolve),
import: None,
color: None,
font: None,
};
let mut sheet: CssStylesheet = css_stylesheet_create(¶ms);
debug!("stylesheet: %?", sheet);
debug!("stylesheet size: %?", sheet.size());
sheet.append_data(data.as_bytes().to_owned());
sheet.data_done();
debug!("stylesheet size: %?", sheet.size());
let mut select_ctx: CssSelectCtx = css_select_ctx_create();
assert!(select_ctx.count_sheets() == 0);
select_ctx.append_sheet(sheet, CSS_ORIGIN_AUTHOR, CSS_MEDIA_ALL);
debug!("count sheets: %?", select_ctx.count_sheets());
assert!(select_ctx.count_sheets() == 1);
for hh in range(1u, 7u) {
let element = fmt!("h%u", hh);
let element_name: @LwcString = @from_rust_string(element);
let node = MyDomNode { name: element_name };
let select_handler = SelectHandler { bogus: () };
let style: CssSelectResults = select_ctx.select_style(&node,
CSS_MEDIA_SCREEN,
None,
&select_handler);
let computed: CssComputedStyle = style.computed_style(CssPseudoElementNone);
match computed.color() {
CssColorInherit => {
debug!("color of h%u is 'inherit'", hh);
}
CssColorColor(color) => {
debug!("color of h%u is %x", hh, color.to_ll() as uint);
}
}
}
}
fn resolve_url(_base: &str, _rel: &LwcString) -> CssResult<LwcString> {
fail!(~"resolving url");
}
struct SelectHandler {
bogus: ()
}
impl CssSelectHandler<MyDomNode> for SelectHandler {
fn node_name(&self, node: &MyDomNode) -> CssQName {
debug!("HL node_name!");
debug!("SS %?", (*node.name).to_str());
CssQName {
ns: None,
name: (*node.name).clone()
}
}
fn node_classes(&self, _node: &MyDomNode) -> Option<~[LwcString]> { None }
fn node_id(&self, _node: &MyDomNode) -> Option<LwcString> { None }
fn named_parent_node(&self, _node: &MyDomNode, _qname: &CssQName) -> Option<MyDomNode> {
None
}
fn parent_node(&self, _node: &MyDomNode) -> Option<MyDomNode> {
None
}
fn node_has_class(&self, _node: &MyDomNode, _name: LwcString) -> bool { false }
fn node_has_id(&self, _node: &MyDomNode, _name: LwcString) -> bool { false }
fn named_ancestor_node(&self, _node: &MyDomNode, _qname: &CssQName) -> Option<MyDomNode> {
None
}
fn node_is_root(&self, _node: &MyDomNode) -> bool { false }
fn node_is_link(&self, _node: &MyDomNode) -> bool { false }
fn node_is_visited(&self, _node: &MyDomNode) -> bool { false }
fn ua_default_for_property(&self, property: CssProperty) -> CssHint {
match property {
_ => CssHintDefault
}
}
}
}
#[test]
fn test_arc() {
use extra::arc::Arc;
use stylesheet::*;
use types::CssLevel21;
use wapcaplet::LwcString;
use super::CssResult;
let resolve: CssUrlResolutionFn = |a,b| resolve_url(a, b);
let params: CssStylesheetParams = CssStylesheetParams {
params_version: CssStylesheetParamsVersion1,
level: CssLevel21,
charset: ~"UTF-8",
url: ~"foo",
title: ~"foo",
allow_quirks: false,
inline_style: false,
resolve: Some(resolve),
import: None,
color: None,
font: None,
};
let sheet: CssStylesheet = css_stylesheet_create(¶ms);
let _arc = Arc::new(sheet);
fn resolve_url(_base: &str, _rel: &LwcString) -> CssResult<LwcString> {
fail!(~"resolving url");
}
}
|
{
unsafe { cast::transmute(self.name) }
}
|
identifier_body
|
udp_stream.rs
|
// Copyright 2015-2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::marker::PhantomData;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::pin::Pin;
use std::task::{Context, Poll};
use async_trait::async_trait;
use futures_channel::mpsc::{unbounded, UnboundedReceiver};
use futures_util::stream::{Fuse, Peekable, Stream, StreamExt};
use futures_util::{future::Future, ready, FutureExt, TryFutureExt};
use log::debug;
use rand;
use rand::distributions::{uniform::Uniform, Distribution};
use crate::xfer::{BufStreamHandle, SerialMessage};
/// Trait for UdpSocket
#[async_trait]
pub trait UdpSocket
where
Self: Sized + Unpin,
{
/// UdpSocket
async fn bind(addr: &SocketAddr) -> io::Result<Self>;
/// Receive data from the socket and returns the number of bytes read and the address from
/// where the data came on success.
async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)>;
/// Send data to the given address.
async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result<usize>;
}
/// A UDP stream of DNS binary packets
#[must_use = "futures do nothing unless polled"]
pub struct UdpStream<S: Send> {
socket: S,
outbound_messages: Peekable<Fuse<UnboundedReceiver<SerialMessage>>>,
}
impl<S: UdpSocket + Send +'static> UdpStream<S> {
/// This method is intended for client connections, see `with_bound` for a method better for
/// straight listening. It is expected that the resolver wrapper will be responsible for
/// creating and managing new UdpStreams such that each new client would have a random port
/// (reduce chance of cache poisoning). This will return a randomly assigned local port.
///
/// # Arguments
///
/// * `name_server` - socket address for the remote server (used to determine IPv4 or IPv6)
///
/// # Return
///
/// a tuple of a Future Stream which will handle sending and receiving messages, and a
/// handle which can be used to send messages into the stream.
#[allow(clippy::type_complexity)]
pub fn new(
name_server: SocketAddr,
) -> (
Box<dyn Future<Output = Result<UdpStream<S>, io::Error>> + Send + Unpin>,
BufStreamHandle,
) {
let (message_sender, outbound_messages) = unbounded();
let message_sender = BufStreamHandle::new(message_sender);
// TODO: allow the bind address to be specified...
// constructs a future for getting the next randomly bound port to a UdpSocket
let next_socket = NextRandomUdpSocket::new(&name_server);
// This set of futures collapses the next udp socket into a stream which can be used for
// sending and receiving udp packets.
let stream = Box::new(next_socket.map_ok(move |socket| UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
}));
(stream, message_sender)
}
/// Initialize the Stream with an already bound socket. Generally this should be only used for
/// server listening sockets. See `new` for a client oriented socket. Specifically, this there
/// is already a bound socket in this context, whereas `new` makes sure to randomize ports
/// for additional cache poison prevention.
///
/// # Arguments
///
/// * `socket` - an already bound UDP socket
///
/// # Return
///
/// a tuple of a Future Stream which will handle sending and receiving messsages, and a
/// handle which can be used to send messages into the stream.
pub fn with_bound(socket: S) -> (Self, BufStreamHandle) {
let (message_sender, outbound_messages) = unbounded();
let message_sender = BufStreamHandle::new(message_sender);
let stream = UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
};
(stream, message_sender)
}
#[allow(unused)]
pub(crate) fn from_parts(
socket: S,
outbound_messages: UnboundedReceiver<SerialMessage>,
) -> Self {
UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
}
}
}
impl<S: Send> UdpStream<S> {
#[allow(clippy::type_complexity)]
fn pollable_split(
|
(&mut self.socket, &mut self.outbound_messages)
}
}
impl<S: UdpSocket + Send +'static> Stream for UdpStream<S> {
type Item = Result<SerialMessage, io::Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let (socket, outbound_messages) = self.pollable_split();
let mut socket = Pin::new(socket);
let mut outbound_messages = Pin::new(outbound_messages);
// this will not accept incoming data while there is data to send
// makes this self throttling.
while let Poll::Ready(Some(message)) = outbound_messages.as_mut().poll_peek(cx) {
// first try to send
let addr = &message.addr();
// this wiil return if not ready,
// meaning that sending will be prefered over receiving...
// TODO: shouldn't this return the error to send to the sender?
ready!(socket.send_to(message.bytes(), addr).poll_unpin(cx))?;
// message sent, need to pop the message
assert!(outbound_messages.as_mut().poll_next(cx).is_ready());
}
// For QoS, this will only accept one message and output that
// receive all inbound messages
// TODO: this should match edns settings
let mut buf = [0u8; 4096];
let (len, src) = ready!(socket.recv_from(&mut buf).poll_unpin(cx))?;
Poll::Ready(Some(Ok(SerialMessage::new(
buf.iter().take(len).cloned().collect(),
src,
))))
}
}
#[must_use = "futures do nothing unless polled"]
pub(crate) struct NextRandomUdpSocket<S> {
bind_address: IpAddr,
marker: PhantomData<S>,
}
impl<S: UdpSocket> NextRandomUdpSocket<S> {
/// Creates a future for randomly binding to a local socket address for client connections.
pub(crate) fn new(name_server: &SocketAddr) -> NextRandomUdpSocket<S> {
let zero_addr: IpAddr = match *name_server {
SocketAddr::V4(..) => IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
SocketAddr::V6(..) => IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)),
};
NextRandomUdpSocket {
bind_address: zero_addr,
marker: PhantomData,
}
}
async fn bind(zero_addr: SocketAddr) -> Result<S, io::Error> {
S::bind(&zero_addr).await
}
}
impl<S: UdpSocket> Future for NextRandomUdpSocket<S> {
type Output = Result<S, io::Error>;
/// polls until there is an available next random UDP port.
///
/// if there is no port available after 10 attempts, returns NotReady
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let rand_port_range = Uniform::new_inclusive(1025_u16, u16::max_value());
let mut rand = rand::thread_rng();
for attempt in 0..10 {
let port = rand_port_range.sample(&mut rand); // the range is [0... u16::max]
let zero_addr = SocketAddr::new(self.bind_address, port);
// TODO: allow TTL to be adjusted...
// TODO: this immediate poll might be wrong in some cases...
match Box::pin(Self::bind(zero_addr)).as_mut().poll(cx) {
Poll::Ready(Ok(socket)) => {
debug!("created socket successfully");
return Poll::Ready(Ok(socket));
}
Poll::Ready(Err(err)) => {
debug!("unable to bind port, attempt: {}: {}", attempt, err)
}
Poll::Pending => debug!("unable to bind port, attempt: {}", attempt),
}
}
debug!("could not get next random port, delaying");
// TODO: because no interest is registered anywhere, we must awake.
cx.waker().wake_by_ref();
// returning NotReady here, perhaps the next poll there will be some more socket available.
Poll::Pending
}
}
#[cfg(feature = "tokio-runtime")]
#[async_trait]
impl UdpSocket for tokio::net::UdpSocket {
async fn bind(addr: &SocketAddr) -> io::Result<Self> {
tokio::net::UdpSocket::bind(addr).await
}
async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from(buf).await
}
async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
self.send_to(buf, target).await
}
}
#[cfg(test)]
#[cfg(feature = "tokio-runtime")]
mod tests {
#[cfg(not(target_os = "linux"))] // ignored until Travis-CI fixes IPv6
use std::net::Ipv6Addr;
use std::net::{IpAddr, Ipv4Addr};
use tokio::{net::UdpSocket as TokioUdpSocket, runtime::Runtime};
#[test]
fn test_next_random_socket() {
use crate::tests::next_random_socket_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
next_random_socket_test::<TokioUdpSocket, Runtime>(io_loop)
}
#[test]
fn test_udp_stream_ipv4() {
use crate::tests::udp_stream_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
udp_stream_test::<TokioUdpSocket, Runtime>(
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
io_loop,
);
}
#[test]
#[cfg(not(target_os = "linux"))] // ignored until Travis-CI fixes IPv6
fn test_udp_stream_ipv6() {
use crate::tests::udp_stream_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
udp_stream_test::<TokioUdpSocket, Runtime>(
IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)),
io_loop,
);
}
}
|
&mut self,
) -> (
&mut S,
&mut Peekable<Fuse<UnboundedReceiver<SerialMessage>>>,
) {
|
random_line_split
|
udp_stream.rs
|
// Copyright 2015-2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::marker::PhantomData;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::pin::Pin;
use std::task::{Context, Poll};
use async_trait::async_trait;
use futures_channel::mpsc::{unbounded, UnboundedReceiver};
use futures_util::stream::{Fuse, Peekable, Stream, StreamExt};
use futures_util::{future::Future, ready, FutureExt, TryFutureExt};
use log::debug;
use rand;
use rand::distributions::{uniform::Uniform, Distribution};
use crate::xfer::{BufStreamHandle, SerialMessage};
/// Trait for UdpSocket
#[async_trait]
pub trait UdpSocket
where
Self: Sized + Unpin,
{
/// UdpSocket
async fn bind(addr: &SocketAddr) -> io::Result<Self>;
/// Receive data from the socket and returns the number of bytes read and the address from
/// where the data came on success.
async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)>;
/// Send data to the given address.
async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result<usize>;
}
/// A UDP stream of DNS binary packets
#[must_use = "futures do nothing unless polled"]
pub struct UdpStream<S: Send> {
socket: S,
outbound_messages: Peekable<Fuse<UnboundedReceiver<SerialMessage>>>,
}
impl<S: UdpSocket + Send +'static> UdpStream<S> {
/// This method is intended for client connections, see `with_bound` for a method better for
/// straight listening. It is expected that the resolver wrapper will be responsible for
/// creating and managing new UdpStreams such that each new client would have a random port
/// (reduce chance of cache poisoning). This will return a randomly assigned local port.
///
/// # Arguments
///
/// * `name_server` - socket address for the remote server (used to determine IPv4 or IPv6)
///
/// # Return
///
/// a tuple of a Future Stream which will handle sending and receiving messages, and a
/// handle which can be used to send messages into the stream.
#[allow(clippy::type_complexity)]
pub fn new(
name_server: SocketAddr,
) -> (
Box<dyn Future<Output = Result<UdpStream<S>, io::Error>> + Send + Unpin>,
BufStreamHandle,
) {
let (message_sender, outbound_messages) = unbounded();
let message_sender = BufStreamHandle::new(message_sender);
// TODO: allow the bind address to be specified...
// constructs a future for getting the next randomly bound port to a UdpSocket
let next_socket = NextRandomUdpSocket::new(&name_server);
// This set of futures collapses the next udp socket into a stream which can be used for
// sending and receiving udp packets.
let stream = Box::new(next_socket.map_ok(move |socket| UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
}));
(stream, message_sender)
}
/// Initialize the Stream with an already bound socket. Generally this should be only used for
/// server listening sockets. See `new` for a client oriented socket. Specifically, this there
/// is already a bound socket in this context, whereas `new` makes sure to randomize ports
/// for additional cache poison prevention.
///
/// # Arguments
///
/// * `socket` - an already bound UDP socket
///
/// # Return
///
/// a tuple of a Future Stream which will handle sending and receiving messsages, and a
/// handle which can be used to send messages into the stream.
pub fn with_bound(socket: S) -> (Self, BufStreamHandle) {
let (message_sender, outbound_messages) = unbounded();
let message_sender = BufStreamHandle::new(message_sender);
let stream = UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
};
(stream, message_sender)
}
#[allow(unused)]
pub(crate) fn from_parts(
socket: S,
outbound_messages: UnboundedReceiver<SerialMessage>,
) -> Self {
UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
}
}
}
impl<S: Send> UdpStream<S> {
#[allow(clippy::type_complexity)]
fn pollable_split(
&mut self,
) -> (
&mut S,
&mut Peekable<Fuse<UnboundedReceiver<SerialMessage>>>,
) {
(&mut self.socket, &mut self.outbound_messages)
}
}
impl<S: UdpSocket + Send +'static> Stream for UdpStream<S> {
type Item = Result<SerialMessage, io::Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let (socket, outbound_messages) = self.pollable_split();
let mut socket = Pin::new(socket);
let mut outbound_messages = Pin::new(outbound_messages);
// this will not accept incoming data while there is data to send
// makes this self throttling.
while let Poll::Ready(Some(message)) = outbound_messages.as_mut().poll_peek(cx) {
// first try to send
let addr = &message.addr();
// this wiil return if not ready,
// meaning that sending will be prefered over receiving...
// TODO: shouldn't this return the error to send to the sender?
ready!(socket.send_to(message.bytes(), addr).poll_unpin(cx))?;
// message sent, need to pop the message
assert!(outbound_messages.as_mut().poll_next(cx).is_ready());
}
// For QoS, this will only accept one message and output that
// receive all inbound messages
// TODO: this should match edns settings
let mut buf = [0u8; 4096];
let (len, src) = ready!(socket.recv_from(&mut buf).poll_unpin(cx))?;
Poll::Ready(Some(Ok(SerialMessage::new(
buf.iter().take(len).cloned().collect(),
src,
))))
}
}
#[must_use = "futures do nothing unless polled"]
pub(crate) struct NextRandomUdpSocket<S> {
bind_address: IpAddr,
marker: PhantomData<S>,
}
impl<S: UdpSocket> NextRandomUdpSocket<S> {
/// Creates a future for randomly binding to a local socket address for client connections.
pub(crate) fn new(name_server: &SocketAddr) -> NextRandomUdpSocket<S> {
let zero_addr: IpAddr = match *name_server {
SocketAddr::V4(..) => IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
SocketAddr::V6(..) => IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)),
};
NextRandomUdpSocket {
bind_address: zero_addr,
marker: PhantomData,
}
}
async fn bind(zero_addr: SocketAddr) -> Result<S, io::Error> {
S::bind(&zero_addr).await
}
}
impl<S: UdpSocket> Future for NextRandomUdpSocket<S> {
type Output = Result<S, io::Error>;
/// polls until there is an available next random UDP port.
///
/// if there is no port available after 10 attempts, returns NotReady
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let rand_port_range = Uniform::new_inclusive(1025_u16, u16::max_value());
let mut rand = rand::thread_rng();
for attempt in 0..10 {
let port = rand_port_range.sample(&mut rand); // the range is [0... u16::max]
let zero_addr = SocketAddr::new(self.bind_address, port);
// TODO: allow TTL to be adjusted...
// TODO: this immediate poll might be wrong in some cases...
match Box::pin(Self::bind(zero_addr)).as_mut().poll(cx) {
Poll::Ready(Ok(socket)) => {
debug!("created socket successfully");
return Poll::Ready(Ok(socket));
}
Poll::Ready(Err(err)) => {
debug!("unable to bind port, attempt: {}: {}", attempt, err)
}
Poll::Pending => debug!("unable to bind port, attempt: {}", attempt),
}
}
debug!("could not get next random port, delaying");
// TODO: because no interest is registered anywhere, we must awake.
cx.waker().wake_by_ref();
// returning NotReady here, perhaps the next poll there will be some more socket available.
Poll::Pending
}
}
#[cfg(feature = "tokio-runtime")]
#[async_trait]
impl UdpSocket for tokio::net::UdpSocket {
async fn bind(addr: &SocketAddr) -> io::Result<Self> {
tokio::net::UdpSocket::bind(addr).await
}
async fn
|
(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from(buf).await
}
async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
self.send_to(buf, target).await
}
}
#[cfg(test)]
#[cfg(feature = "tokio-runtime")]
mod tests {
#[cfg(not(target_os = "linux"))] // ignored until Travis-CI fixes IPv6
use std::net::Ipv6Addr;
use std::net::{IpAddr, Ipv4Addr};
use tokio::{net::UdpSocket as TokioUdpSocket, runtime::Runtime};
#[test]
fn test_next_random_socket() {
use crate::tests::next_random_socket_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
next_random_socket_test::<TokioUdpSocket, Runtime>(io_loop)
}
#[test]
fn test_udp_stream_ipv4() {
use crate::tests::udp_stream_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
udp_stream_test::<TokioUdpSocket, Runtime>(
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
io_loop,
);
}
#[test]
#[cfg(not(target_os = "linux"))] // ignored until Travis-CI fixes IPv6
fn test_udp_stream_ipv6() {
use crate::tests::udp_stream_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
udp_stream_test::<TokioUdpSocket, Runtime>(
IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)),
io_loop,
);
}
}
|
recv_from
|
identifier_name
|
udp_stream.rs
|
// Copyright 2015-2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::io;
use std::marker::PhantomData;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::pin::Pin;
use std::task::{Context, Poll};
use async_trait::async_trait;
use futures_channel::mpsc::{unbounded, UnboundedReceiver};
use futures_util::stream::{Fuse, Peekable, Stream, StreamExt};
use futures_util::{future::Future, ready, FutureExt, TryFutureExt};
use log::debug;
use rand;
use rand::distributions::{uniform::Uniform, Distribution};
use crate::xfer::{BufStreamHandle, SerialMessage};
/// Trait for UdpSocket
#[async_trait]
pub trait UdpSocket
where
Self: Sized + Unpin,
{
/// UdpSocket
async fn bind(addr: &SocketAddr) -> io::Result<Self>;
/// Receive data from the socket and returns the number of bytes read and the address from
/// where the data came on success.
async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)>;
/// Send data to the given address.
async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result<usize>;
}
/// A UDP stream of DNS binary packets
#[must_use = "futures do nothing unless polled"]
pub struct UdpStream<S: Send> {
socket: S,
outbound_messages: Peekable<Fuse<UnboundedReceiver<SerialMessage>>>,
}
impl<S: UdpSocket + Send +'static> UdpStream<S> {
/// This method is intended for client connections, see `with_bound` for a method better for
/// straight listening. It is expected that the resolver wrapper will be responsible for
/// creating and managing new UdpStreams such that each new client would have a random port
/// (reduce chance of cache poisoning). This will return a randomly assigned local port.
///
/// # Arguments
///
/// * `name_server` - socket address for the remote server (used to determine IPv4 or IPv6)
///
/// # Return
///
/// a tuple of a Future Stream which will handle sending and receiving messages, and a
/// handle which can be used to send messages into the stream.
#[allow(clippy::type_complexity)]
pub fn new(
name_server: SocketAddr,
) -> (
Box<dyn Future<Output = Result<UdpStream<S>, io::Error>> + Send + Unpin>,
BufStreamHandle,
) {
let (message_sender, outbound_messages) = unbounded();
let message_sender = BufStreamHandle::new(message_sender);
// TODO: allow the bind address to be specified...
// constructs a future for getting the next randomly bound port to a UdpSocket
let next_socket = NextRandomUdpSocket::new(&name_server);
// This set of futures collapses the next udp socket into a stream which can be used for
// sending and receiving udp packets.
let stream = Box::new(next_socket.map_ok(move |socket| UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
}));
(stream, message_sender)
}
/// Initialize the Stream with an already bound socket. Generally this should be only used for
/// server listening sockets. See `new` for a client oriented socket. Specifically, this there
/// is already a bound socket in this context, whereas `new` makes sure to randomize ports
/// for additional cache poison prevention.
///
/// # Arguments
///
/// * `socket` - an already bound UDP socket
///
/// # Return
///
/// a tuple of a Future Stream which will handle sending and receiving messsages, and a
/// handle which can be used to send messages into the stream.
pub fn with_bound(socket: S) -> (Self, BufStreamHandle) {
let (message_sender, outbound_messages) = unbounded();
let message_sender = BufStreamHandle::new(message_sender);
let stream = UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
};
(stream, message_sender)
}
#[allow(unused)]
pub(crate) fn from_parts(
socket: S,
outbound_messages: UnboundedReceiver<SerialMessage>,
) -> Self
|
}
impl<S: Send> UdpStream<S> {
#[allow(clippy::type_complexity)]
fn pollable_split(
&mut self,
) -> (
&mut S,
&mut Peekable<Fuse<UnboundedReceiver<SerialMessage>>>,
) {
(&mut self.socket, &mut self.outbound_messages)
}
}
impl<S: UdpSocket + Send +'static> Stream for UdpStream<S> {
type Item = Result<SerialMessage, io::Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let (socket, outbound_messages) = self.pollable_split();
let mut socket = Pin::new(socket);
let mut outbound_messages = Pin::new(outbound_messages);
// this will not accept incoming data while there is data to send
// makes this self throttling.
while let Poll::Ready(Some(message)) = outbound_messages.as_mut().poll_peek(cx) {
// first try to send
let addr = &message.addr();
// this wiil return if not ready,
// meaning that sending will be prefered over receiving...
// TODO: shouldn't this return the error to send to the sender?
ready!(socket.send_to(message.bytes(), addr).poll_unpin(cx))?;
// message sent, need to pop the message
assert!(outbound_messages.as_mut().poll_next(cx).is_ready());
}
// For QoS, this will only accept one message and output that
// receive all inbound messages
// TODO: this should match edns settings
let mut buf = [0u8; 4096];
let (len, src) = ready!(socket.recv_from(&mut buf).poll_unpin(cx))?;
Poll::Ready(Some(Ok(SerialMessage::new(
buf.iter().take(len).cloned().collect(),
src,
))))
}
}
#[must_use = "futures do nothing unless polled"]
pub(crate) struct NextRandomUdpSocket<S> {
bind_address: IpAddr,
marker: PhantomData<S>,
}
impl<S: UdpSocket> NextRandomUdpSocket<S> {
/// Creates a future for randomly binding to a local socket address for client connections.
pub(crate) fn new(name_server: &SocketAddr) -> NextRandomUdpSocket<S> {
let zero_addr: IpAddr = match *name_server {
SocketAddr::V4(..) => IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
SocketAddr::V6(..) => IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)),
};
NextRandomUdpSocket {
bind_address: zero_addr,
marker: PhantomData,
}
}
async fn bind(zero_addr: SocketAddr) -> Result<S, io::Error> {
S::bind(&zero_addr).await
}
}
impl<S: UdpSocket> Future for NextRandomUdpSocket<S> {
type Output = Result<S, io::Error>;
/// polls until there is an available next random UDP port.
///
/// if there is no port available after 10 attempts, returns NotReady
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let rand_port_range = Uniform::new_inclusive(1025_u16, u16::max_value());
let mut rand = rand::thread_rng();
for attempt in 0..10 {
let port = rand_port_range.sample(&mut rand); // the range is [0... u16::max]
let zero_addr = SocketAddr::new(self.bind_address, port);
// TODO: allow TTL to be adjusted...
// TODO: this immediate poll might be wrong in some cases...
match Box::pin(Self::bind(zero_addr)).as_mut().poll(cx) {
Poll::Ready(Ok(socket)) => {
debug!("created socket successfully");
return Poll::Ready(Ok(socket));
}
Poll::Ready(Err(err)) => {
debug!("unable to bind port, attempt: {}: {}", attempt, err)
}
Poll::Pending => debug!("unable to bind port, attempt: {}", attempt),
}
}
debug!("could not get next random port, delaying");
// TODO: because no interest is registered anywhere, we must awake.
cx.waker().wake_by_ref();
// returning NotReady here, perhaps the next poll there will be some more socket available.
Poll::Pending
}
}
#[cfg(feature = "tokio-runtime")]
#[async_trait]
impl UdpSocket for tokio::net::UdpSocket {
async fn bind(addr: &SocketAddr) -> io::Result<Self> {
tokio::net::UdpSocket::bind(addr).await
}
async fn recv_from(&mut self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.recv_from(buf).await
}
async fn send_to(&mut self, buf: &[u8], target: &SocketAddr) -> io::Result<usize> {
self.send_to(buf, target).await
}
}
#[cfg(test)]
#[cfg(feature = "tokio-runtime")]
mod tests {
#[cfg(not(target_os = "linux"))] // ignored until Travis-CI fixes IPv6
use std::net::Ipv6Addr;
use std::net::{IpAddr, Ipv4Addr};
use tokio::{net::UdpSocket as TokioUdpSocket, runtime::Runtime};
#[test]
fn test_next_random_socket() {
use crate::tests::next_random_socket_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
next_random_socket_test::<TokioUdpSocket, Runtime>(io_loop)
}
#[test]
fn test_udp_stream_ipv4() {
use crate::tests::udp_stream_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
udp_stream_test::<TokioUdpSocket, Runtime>(
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
io_loop,
);
}
#[test]
#[cfg(not(target_os = "linux"))] // ignored until Travis-CI fixes IPv6
fn test_udp_stream_ipv6() {
use crate::tests::udp_stream_test;
let io_loop = Runtime::new().expect("failed to create tokio runtime");
udp_stream_test::<TokioUdpSocket, Runtime>(
IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)),
io_loop,
);
}
}
|
{
UdpStream {
socket,
outbound_messages: outbound_messages.fuse().peekable(),
}
}
|
identifier_body
|
window.rs
|
use std::rc::Rc;
use super::*;
use crate::{proc_macros::*, widget_base::*};
/// The enumeration of valid window events.
#[derive(Clone, Event)]
pub enum WindowEvent {
Resize { width: f64, height: f64 },
ActiveChanged(bool),
None,
}
pub type WindowHandlerFn = dyn Fn(&mut StatesContext, WindowEvent) -> bool +'static;
/// The structure handling windows events.
#[derive(IntoHandler)]
pub struct
|
{
/// A reference counted handler.
pub handler: Rc<WindowHandlerFn>,
}
impl EventHandler for WindowEventHandler {
fn handle_event(&self, states: &mut StatesContext, event: &EventBox) -> bool {
if let Ok(event) = event.downcast_ref::<WindowEvent>() {
return (self.handler)(states, event.clone());
}
false
}
fn handles_event(&self, event: &EventBox) -> bool {
event.is_type::<WindowEvent>()
}
}
|
WindowEventHandler
|
identifier_name
|
window.rs
|
use std::rc::Rc;
use super::*;
use crate::{proc_macros::*, widget_base::*};
/// The enumeration of valid window events.
#[derive(Clone, Event)]
pub enum WindowEvent {
Resize { width: f64, height: f64 },
ActiveChanged(bool),
None,
}
pub type WindowHandlerFn = dyn Fn(&mut StatesContext, WindowEvent) -> bool +'static;
/// The structure handling windows events.
#[derive(IntoHandler)]
pub struct WindowEventHandler {
|
/// A reference counted handler.
pub handler: Rc<WindowHandlerFn>,
}
impl EventHandler for WindowEventHandler {
fn handle_event(&self, states: &mut StatesContext, event: &EventBox) -> bool {
if let Ok(event) = event.downcast_ref::<WindowEvent>() {
return (self.handler)(states, event.clone());
}
false
}
fn handles_event(&self, event: &EventBox) -> bool {
event.is_type::<WindowEvent>()
}
}
|
random_line_split
|
|
window.rs
|
use std::rc::Rc;
use super::*;
use crate::{proc_macros::*, widget_base::*};
/// The enumeration of valid window events.
#[derive(Clone, Event)]
pub enum WindowEvent {
Resize { width: f64, height: f64 },
ActiveChanged(bool),
None,
}
pub type WindowHandlerFn = dyn Fn(&mut StatesContext, WindowEvent) -> bool +'static;
/// The structure handling windows events.
#[derive(IntoHandler)]
pub struct WindowEventHandler {
/// A reference counted handler.
pub handler: Rc<WindowHandlerFn>,
}
impl EventHandler for WindowEventHandler {
fn handle_event(&self, states: &mut StatesContext, event: &EventBox) -> bool {
if let Ok(event) = event.downcast_ref::<WindowEvent>()
|
false
}
fn handles_event(&self, event: &EventBox) -> bool {
event.is_type::<WindowEvent>()
}
}
|
{
return (self.handler)(states, event.clone());
}
|
conditional_block
|
error.rs
|
use crate::error::{Error, ErrorKind, ExpectedNumberOfArgs};
use std::error;
use std::fmt;
use std::io;
#[derive(Debug)]
struct DummyError;
impl fmt::Display for DummyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "dummy")
}
}
impl error::Error for DummyError {}
fn dummy_io_error() -> Box<Error>
|
#[test]
fn error_message_formating() {
let err = Error::new(ErrorKind::WrongNumberOfArgs {
expected: ExpectedNumberOfArgs::Range(0, 1),
actual: 2,
kind: "diff".to_string(),
});
assert!(matches!(err.kind(), ErrorKind::WrongNumberOfArgs {.. }));
let msg = format!("{}", err);
assert!(msg.contains("Invalid number of arguments"), "{:?}", msg);
}
#[test]
fn print_error_to_stderr() {
dummy_io_error().eprintln();
}
#[test]
fn inner_error_as_source() {
use error::Error;
let err = crate::error::Error::new(ErrorKind::DiffDotsNotFound);
assert!(err.source().is_none());
let err = dummy_io_error();
let inner = err.source().unwrap();
assert!(format!("{}", inner).contains("dummy"), "{:?}", inner);
}
|
{
let inner = DummyError;
let io_err = io::Error::new(io::ErrorKind::Other, inner);
Error::new(ErrorKind::IoError(io_err))
}
|
identifier_body
|
error.rs
|
use crate::error::{Error, ErrorKind, ExpectedNumberOfArgs};
use std::error;
use std::fmt;
use std::io;
#[derive(Debug)]
struct DummyError;
impl fmt::Display for DummyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "dummy")
}
}
impl error::Error for DummyError {}
fn dummy_io_error() -> Box<Error> {
let inner = DummyError;
let io_err = io::Error::new(io::ErrorKind::Other, inner);
Error::new(ErrorKind::IoError(io_err))
}
#[test]
fn error_message_formating() {
let err = Error::new(ErrorKind::WrongNumberOfArgs {
expected: ExpectedNumberOfArgs::Range(0, 1),
actual: 2,
kind: "diff".to_string(),
});
assert!(matches!(err.kind(), ErrorKind::WrongNumberOfArgs {.. }));
let msg = format!("{}", err);
assert!(msg.contains("Invalid number of arguments"), "{:?}", msg);
}
#[test]
fn print_error_to_stderr() {
dummy_io_error().eprintln();
}
#[test]
fn
|
() {
use error::Error;
let err = crate::error::Error::new(ErrorKind::DiffDotsNotFound);
assert!(err.source().is_none());
let err = dummy_io_error();
let inner = err.source().unwrap();
assert!(format!("{}", inner).contains("dummy"), "{:?}", inner);
}
|
inner_error_as_source
|
identifier_name
|
error.rs
|
use crate::error::{Error, ErrorKind, ExpectedNumberOfArgs};
use std::error;
use std::fmt;
use std::io;
#[derive(Debug)]
struct DummyError;
impl fmt::Display for DummyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "dummy")
}
}
impl error::Error for DummyError {}
fn dummy_io_error() -> Box<Error> {
let inner = DummyError;
let io_err = io::Error::new(io::ErrorKind::Other, inner);
Error::new(ErrorKind::IoError(io_err))
}
#[test]
fn error_message_formating() {
let err = Error::new(ErrorKind::WrongNumberOfArgs {
expected: ExpectedNumberOfArgs::Range(0, 1),
actual: 2,
kind: "diff".to_string(),
});
assert!(matches!(err.kind(), ErrorKind::WrongNumberOfArgs {.. }));
let msg = format!("{}", err);
assert!(msg.contains("Invalid number of arguments"), "{:?}", msg);
}
#[test]
fn print_error_to_stderr() {
dummy_io_error().eprintln();
}
#[test]
fn inner_error_as_source() {
use error::Error;
let err = crate::error::Error::new(ErrorKind::DiffDotsNotFound);
assert!(err.source().is_none());
let err = dummy_io_error();
let inner = err.source().unwrap();
assert!(format!("{}", inner).contains("dummy"), "{:?}", inner);
|
}
|
random_line_split
|
|
ref_mode.rs
|
use super::{c_type::is_mut_ptr, record_type::RecordType};
use crate::{config::gobjects::GObject, env, library};
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum RefMode {
None,
ByRef,
ByRefMut,
ByRefImmut, //immutable reference with mutable pointer in sys
ByRefConst, //instance parameters in trait function with const pointer in sys
ByRefFake,
}
impl RefMode {
#[inline]
pub fn of(
env: &env::Env,
tid: library::TypeId,
direction: library::ParameterDirection,
) -> RefMode {
let library = &env.library;
if let Some(&GObject {
ref_mode: Some(ref_mode),
..
}) = env.config.objects.get(&tid.full_name(library))
{
if direction == library::ParameterDirection::In {
return ref_mode;
} else {
return RefMode::None;
}
}
use crate::library::Type::*;
match *library.type_(tid) {
Fundamental(library::Fundamental::Utf8)
| Fundamental(library::Fundamental::Filename)
| Fundamental(library::Fundamental::OsString)
| Class(..)
| Interface(..)
| List(..)
| SList(..)
| PtrArray(..)
| CArray(..) => {
if direction == library::ParameterDirection::In {
RefMode::ByRef
} else {
RefMode::None
}
}
Record(ref record) => {
if direction == library::ParameterDirection::In {
if let RecordType::Refcounted = RecordType::of(record) {
RefMode::ByRef
} else {
RefMode::ByRefMut
}
} else {
RefMode::None
}
|
if direction == library::ParameterDirection::In {
RefMode::ByRefMut
} else {
RefMode::None
}
}
Alias(ref alias) => RefMode::of(env, alias.typ, direction),
_ => RefMode::None,
}
}
pub fn without_unneeded_mut(
env: &env::Env,
par: &library::Parameter,
immutable: bool,
self_in_trait: bool,
) -> RefMode {
use self::RefMode::*;
let ref_mode = RefMode::of(env, par.typ, par.direction);
match ref_mode {
ByRefMut if!is_mut_ptr(&*par.c_type) => ByRef,
ByRefMut if immutable => ByRefImmut,
ByRef if self_in_trait &&!is_mut_ptr(&*par.c_type) => ByRefConst,
ref_mode => ref_mode,
}
}
pub fn is_ref(self) -> bool {
use self::RefMode::*;
match self {
None => false,
ByRef => true,
ByRefMut => true,
ByRefImmut => true,
ByRefConst => true,
ByRefFake => true,
}
}
}
|
}
Union(..) => {
|
random_line_split
|
ref_mode.rs
|
use super::{c_type::is_mut_ptr, record_type::RecordType};
use crate::{config::gobjects::GObject, env, library};
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum
|
{
None,
ByRef,
ByRefMut,
ByRefImmut, //immutable reference with mutable pointer in sys
ByRefConst, //instance parameters in trait function with const pointer in sys
ByRefFake,
}
impl RefMode {
#[inline]
pub fn of(
env: &env::Env,
tid: library::TypeId,
direction: library::ParameterDirection,
) -> RefMode {
let library = &env.library;
if let Some(&GObject {
ref_mode: Some(ref_mode),
..
}) = env.config.objects.get(&tid.full_name(library))
{
if direction == library::ParameterDirection::In {
return ref_mode;
} else {
return RefMode::None;
}
}
use crate::library::Type::*;
match *library.type_(tid) {
Fundamental(library::Fundamental::Utf8)
| Fundamental(library::Fundamental::Filename)
| Fundamental(library::Fundamental::OsString)
| Class(..)
| Interface(..)
| List(..)
| SList(..)
| PtrArray(..)
| CArray(..) => {
if direction == library::ParameterDirection::In {
RefMode::ByRef
} else {
RefMode::None
}
}
Record(ref record) => {
if direction == library::ParameterDirection::In {
if let RecordType::Refcounted = RecordType::of(record) {
RefMode::ByRef
} else {
RefMode::ByRefMut
}
} else {
RefMode::None
}
}
Union(..) => {
if direction == library::ParameterDirection::In {
RefMode::ByRefMut
} else {
RefMode::None
}
}
Alias(ref alias) => RefMode::of(env, alias.typ, direction),
_ => RefMode::None,
}
}
pub fn without_unneeded_mut(
env: &env::Env,
par: &library::Parameter,
immutable: bool,
self_in_trait: bool,
) -> RefMode {
use self::RefMode::*;
let ref_mode = RefMode::of(env, par.typ, par.direction);
match ref_mode {
ByRefMut if!is_mut_ptr(&*par.c_type) => ByRef,
ByRefMut if immutable => ByRefImmut,
ByRef if self_in_trait &&!is_mut_ptr(&*par.c_type) => ByRefConst,
ref_mode => ref_mode,
}
}
pub fn is_ref(self) -> bool {
use self::RefMode::*;
match self {
None => false,
ByRef => true,
ByRefMut => true,
ByRefImmut => true,
ByRefConst => true,
ByRefFake => true,
}
}
}
|
RefMode
|
identifier_name
|
ref_mode.rs
|
use super::{c_type::is_mut_ptr, record_type::RecordType};
use crate::{config::gobjects::GObject, env, library};
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum RefMode {
None,
ByRef,
ByRefMut,
ByRefImmut, //immutable reference with mutable pointer in sys
ByRefConst, //instance parameters in trait function with const pointer in sys
ByRefFake,
}
impl RefMode {
#[inline]
pub fn of(
env: &env::Env,
tid: library::TypeId,
direction: library::ParameterDirection,
) -> RefMode {
let library = &env.library;
if let Some(&GObject {
ref_mode: Some(ref_mode),
..
}) = env.config.objects.get(&tid.full_name(library))
{
if direction == library::ParameterDirection::In {
return ref_mode;
} else {
return RefMode::None;
}
}
use crate::library::Type::*;
match *library.type_(tid) {
Fundamental(library::Fundamental::Utf8)
| Fundamental(library::Fundamental::Filename)
| Fundamental(library::Fundamental::OsString)
| Class(..)
| Interface(..)
| List(..)
| SList(..)
| PtrArray(..)
| CArray(..) => {
if direction == library::ParameterDirection::In {
RefMode::ByRef
} else {
RefMode::None
}
}
Record(ref record) =>
|
Union(..) => {
if direction == library::ParameterDirection::In {
RefMode::ByRefMut
} else {
RefMode::None
}
}
Alias(ref alias) => RefMode::of(env, alias.typ, direction),
_ => RefMode::None,
}
}
pub fn without_unneeded_mut(
env: &env::Env,
par: &library::Parameter,
immutable: bool,
self_in_trait: bool,
) -> RefMode {
use self::RefMode::*;
let ref_mode = RefMode::of(env, par.typ, par.direction);
match ref_mode {
ByRefMut if!is_mut_ptr(&*par.c_type) => ByRef,
ByRefMut if immutable => ByRefImmut,
ByRef if self_in_trait &&!is_mut_ptr(&*par.c_type) => ByRefConst,
ref_mode => ref_mode,
}
}
pub fn is_ref(self) -> bool {
use self::RefMode::*;
match self {
None => false,
ByRef => true,
ByRefMut => true,
ByRefImmut => true,
ByRefConst => true,
ByRefFake => true,
}
}
}
|
{
if direction == library::ParameterDirection::In {
if let RecordType::Refcounted = RecordType::of(record) {
RefMode::ByRef
} else {
RefMode::ByRefMut
}
} else {
RefMode::None
}
}
|
conditional_block
|
arc_slice.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! A thin atomically-reference-counted slice.
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use servo_arc::ThinArc;
use std::ops::Deref;
use std::ptr::NonNull;
use std::{iter, mem};
/// A canary that we stash in ArcSlices.
///
/// Given we cannot use a zero-sized-type for the header, since well, C++
/// doesn't have zsts, and we want to use cbindgen for this type, we may as well
/// assert some sanity at runtime.
///
/// We use an u64, to guarantee that we can use a single singleton for every
/// empty slice, even if the types they hold are aligned differently.
const ARC_SLICE_CANARY: u64 = 0xf3f3f3f3f3f3f3f3;
/// A wrapper type for a refcounted slice using ThinArc.
///
/// cbindgen:derive-eq=false
/// cbindgen:derive-neq=false
#[repr(C)]
#[derive(Debug, Eq, PartialEq, ToShmem)]
pub struct ArcSlice<T>(#[shmem(field_bound)] ThinArc<u64, T>);
impl<T> Deref for ArcSlice<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &Self::Target {
debug_assert_eq!(self.0.header.header, ARC_SLICE_CANARY);
&self.0.slice
}
}
impl<T> Clone for ArcSlice<T> {
fn clone(&self) -> Self {
ArcSlice(self.0.clone())
}
}
lazy_static! {
// ThinArc doesn't support alignments greater than align_of::<u64>.
static ref EMPTY_ARC_SLICE: ArcSlice<u64> = {
ArcSlice::from_iter_leaked(iter::empty())
};
}
impl<T> Default for ArcSlice<T> {
#[allow(unsafe_code)]
fn
|
() -> Self {
debug_assert!(
mem::align_of::<T>() <= mem::align_of::<u64>(),
"Need to increase the alignment of EMPTY_ARC_SLICE"
);
unsafe {
let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone();
let empty: Self = mem::transmute(empty);
debug_assert_eq!(empty.len(), 0);
empty
}
}
}
impl<T: Serialize> Serialize for ArcSlice<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.deref().serialize(serializer)
}
}
impl<'de, T: Deserialize<'de>> Deserialize<'de> for ArcSlice<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = Vec::deserialize(deserializer)?;
Ok(ArcSlice::from_iter(r.into_iter()))
}
}
impl<T> ArcSlice<T> {
/// Creates an Arc for a slice using the given iterator to generate the
/// slice.
#[inline]
pub fn from_iter<I>(items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
{
if items.len() == 0 {
return Self::default();
}
ArcSlice(ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items))
}
/// Creates an Arc for a slice using the given iterator to generate the
/// slice, and marks the arc as intentionally leaked from the refcount
/// logging point of view.
#[inline]
pub fn from_iter_leaked<I>(items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
{
let thin_arc = ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items);
thin_arc.with_arc(|a| a.mark_as_intentionally_leaked());
ArcSlice(thin_arc)
}
/// Creates a value that can be passed via FFI, and forgets this value
/// altogether.
#[inline]
#[allow(unsafe_code)]
pub fn forget(self) -> ForgottenArcSlicePtr<T> {
let ret = unsafe {
ForgottenArcSlicePtr(NonNull::new_unchecked(self.0.ptr() as *const _ as *mut _))
};
mem::forget(self);
ret
}
/// Leaks an empty arc slice pointer, and returns it. Only to be used to
/// construct ArcSlices from FFI.
#[inline]
pub fn leaked_empty_ptr() -> *mut std::os::raw::c_void {
let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone();
let ptr = empty.0.ptr();
std::mem::forget(empty);
ptr as *mut _
}
}
/// The inner pointer of an ArcSlice<T>, to be sent via FFI.
/// The type of the pointer is a bit of a lie, we just want to preserve the type
/// but these pointers cannot be constructed outside of this crate, so we're
/// good.
#[repr(C)]
pub struct ForgottenArcSlicePtr<T>(NonNull<T>);
|
default
|
identifier_name
|
arc_slice.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! A thin atomically-reference-counted slice.
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use servo_arc::ThinArc;
use std::ops::Deref;
use std::ptr::NonNull;
use std::{iter, mem};
/// A canary that we stash in ArcSlices.
///
/// Given we cannot use a zero-sized-type for the header, since well, C++
/// doesn't have zsts, and we want to use cbindgen for this type, we may as well
/// assert some sanity at runtime.
///
/// We use an u64, to guarantee that we can use a single singleton for every
/// empty slice, even if the types they hold are aligned differently.
const ARC_SLICE_CANARY: u64 = 0xf3f3f3f3f3f3f3f3;
/// A wrapper type for a refcounted slice using ThinArc.
///
/// cbindgen:derive-eq=false
/// cbindgen:derive-neq=false
#[repr(C)]
#[derive(Debug, Eq, PartialEq, ToShmem)]
pub struct ArcSlice<T>(#[shmem(field_bound)] ThinArc<u64, T>);
impl<T> Deref for ArcSlice<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &Self::Target {
debug_assert_eq!(self.0.header.header, ARC_SLICE_CANARY);
&self.0.slice
}
}
impl<T> Clone for ArcSlice<T> {
fn clone(&self) -> Self {
ArcSlice(self.0.clone())
}
}
lazy_static! {
// ThinArc doesn't support alignments greater than align_of::<u64>.
static ref EMPTY_ARC_SLICE: ArcSlice<u64> = {
ArcSlice::from_iter_leaked(iter::empty())
};
}
impl<T> Default for ArcSlice<T> {
#[allow(unsafe_code)]
fn default() -> Self {
debug_assert!(
mem::align_of::<T>() <= mem::align_of::<u64>(),
"Need to increase the alignment of EMPTY_ARC_SLICE"
);
unsafe {
let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone();
let empty: Self = mem::transmute(empty);
debug_assert_eq!(empty.len(), 0);
empty
}
}
}
impl<T: Serialize> Serialize for ArcSlice<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.deref().serialize(serializer)
}
}
impl<'de, T: Deserialize<'de>> Deserialize<'de> for ArcSlice<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = Vec::deserialize(deserializer)?;
Ok(ArcSlice::from_iter(r.into_iter()))
}
}
impl<T> ArcSlice<T> {
/// Creates an Arc for a slice using the given iterator to generate the
/// slice.
#[inline]
pub fn from_iter<I>(items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
|
/// Creates an Arc for a slice using the given iterator to generate the
/// slice, and marks the arc as intentionally leaked from the refcount
/// logging point of view.
#[inline]
pub fn from_iter_leaked<I>(items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
{
let thin_arc = ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items);
thin_arc.with_arc(|a| a.mark_as_intentionally_leaked());
ArcSlice(thin_arc)
}
/// Creates a value that can be passed via FFI, and forgets this value
/// altogether.
#[inline]
#[allow(unsafe_code)]
pub fn forget(self) -> ForgottenArcSlicePtr<T> {
let ret = unsafe {
ForgottenArcSlicePtr(NonNull::new_unchecked(self.0.ptr() as *const _ as *mut _))
};
mem::forget(self);
ret
}
/// Leaks an empty arc slice pointer, and returns it. Only to be used to
/// construct ArcSlices from FFI.
#[inline]
pub fn leaked_empty_ptr() -> *mut std::os::raw::c_void {
let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone();
let ptr = empty.0.ptr();
std::mem::forget(empty);
ptr as *mut _
}
}
/// The inner pointer of an ArcSlice<T>, to be sent via FFI.
/// The type of the pointer is a bit of a lie, we just want to preserve the type
/// but these pointers cannot be constructed outside of this crate, so we're
/// good.
#[repr(C)]
pub struct ForgottenArcSlicePtr<T>(NonNull<T>);
|
{
if items.len() == 0 {
return Self::default();
}
ArcSlice(ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items))
}
|
identifier_body
|
arc_slice.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! A thin atomically-reference-counted slice.
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use servo_arc::ThinArc;
use std::ops::Deref;
use std::ptr::NonNull;
use std::{iter, mem};
/// A canary that we stash in ArcSlices.
///
/// Given we cannot use a zero-sized-type for the header, since well, C++
/// doesn't have zsts, and we want to use cbindgen for this type, we may as well
/// assert some sanity at runtime.
///
/// We use an u64, to guarantee that we can use a single singleton for every
/// empty slice, even if the types they hold are aligned differently.
const ARC_SLICE_CANARY: u64 = 0xf3f3f3f3f3f3f3f3;
/// A wrapper type for a refcounted slice using ThinArc.
///
/// cbindgen:derive-eq=false
/// cbindgen:derive-neq=false
#[repr(C)]
#[derive(Debug, Eq, PartialEq, ToShmem)]
pub struct ArcSlice<T>(#[shmem(field_bound)] ThinArc<u64, T>);
impl<T> Deref for ArcSlice<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &Self::Target {
debug_assert_eq!(self.0.header.header, ARC_SLICE_CANARY);
&self.0.slice
}
}
impl<T> Clone for ArcSlice<T> {
fn clone(&self) -> Self {
ArcSlice(self.0.clone())
}
}
lazy_static! {
// ThinArc doesn't support alignments greater than align_of::<u64>.
static ref EMPTY_ARC_SLICE: ArcSlice<u64> = {
ArcSlice::from_iter_leaked(iter::empty())
};
}
impl<T> Default for ArcSlice<T> {
#[allow(unsafe_code)]
fn default() -> Self {
debug_assert!(
mem::align_of::<T>() <= mem::align_of::<u64>(),
"Need to increase the alignment of EMPTY_ARC_SLICE"
);
unsafe {
let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone();
let empty: Self = mem::transmute(empty);
debug_assert_eq!(empty.len(), 0);
empty
}
}
}
impl<T: Serialize> Serialize for ArcSlice<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.deref().serialize(serializer)
}
}
impl<'de, T: Deserialize<'de>> Deserialize<'de> for ArcSlice<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = Vec::deserialize(deserializer)?;
Ok(ArcSlice::from_iter(r.into_iter()))
}
}
impl<T> ArcSlice<T> {
/// Creates an Arc for a slice using the given iterator to generate the
/// slice.
#[inline]
pub fn from_iter<I>(items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
{
if items.len() == 0
|
ArcSlice(ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items))
}
/// Creates an Arc for a slice using the given iterator to generate the
/// slice, and marks the arc as intentionally leaked from the refcount
/// logging point of view.
#[inline]
pub fn from_iter_leaked<I>(items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
{
let thin_arc = ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items);
thin_arc.with_arc(|a| a.mark_as_intentionally_leaked());
ArcSlice(thin_arc)
}
/// Creates a value that can be passed via FFI, and forgets this value
/// altogether.
#[inline]
#[allow(unsafe_code)]
pub fn forget(self) -> ForgottenArcSlicePtr<T> {
let ret = unsafe {
ForgottenArcSlicePtr(NonNull::new_unchecked(self.0.ptr() as *const _ as *mut _))
};
mem::forget(self);
ret
}
/// Leaks an empty arc slice pointer, and returns it. Only to be used to
/// construct ArcSlices from FFI.
#[inline]
pub fn leaked_empty_ptr() -> *mut std::os::raw::c_void {
let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone();
let ptr = empty.0.ptr();
std::mem::forget(empty);
ptr as *mut _
}
}
/// The inner pointer of an ArcSlice<T>, to be sent via FFI.
/// The type of the pointer is a bit of a lie, we just want to preserve the type
/// but these pointers cannot be constructed outside of this crate, so we're
/// good.
#[repr(C)]
pub struct ForgottenArcSlicePtr<T>(NonNull<T>);
|
{
return Self::default();
}
|
conditional_block
|
arc_slice.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! A thin atomically-reference-counted slice.
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use servo_arc::ThinArc;
use std::ops::Deref;
use std::ptr::NonNull;
use std::{iter, mem};
/// A canary that we stash in ArcSlices.
///
/// Given we cannot use a zero-sized-type for the header, since well, C++
/// doesn't have zsts, and we want to use cbindgen for this type, we may as well
/// assert some sanity at runtime.
///
/// We use an u64, to guarantee that we can use a single singleton for every
/// empty slice, even if the types they hold are aligned differently.
const ARC_SLICE_CANARY: u64 = 0xf3f3f3f3f3f3f3f3;
/// A wrapper type for a refcounted slice using ThinArc.
///
/// cbindgen:derive-eq=false
/// cbindgen:derive-neq=false
#[repr(C)]
#[derive(Debug, Eq, PartialEq, ToShmem)]
pub struct ArcSlice<T>(#[shmem(field_bound)] ThinArc<u64, T>);
impl<T> Deref for ArcSlice<T> {
type Target = [T];
#[inline]
fn deref(&self) -> &Self::Target {
|
}
impl<T> Clone for ArcSlice<T> {
fn clone(&self) -> Self {
ArcSlice(self.0.clone())
}
}
lazy_static! {
// ThinArc doesn't support alignments greater than align_of::<u64>.
static ref EMPTY_ARC_SLICE: ArcSlice<u64> = {
ArcSlice::from_iter_leaked(iter::empty())
};
}
impl<T> Default for ArcSlice<T> {
#[allow(unsafe_code)]
fn default() -> Self {
debug_assert!(
mem::align_of::<T>() <= mem::align_of::<u64>(),
"Need to increase the alignment of EMPTY_ARC_SLICE"
);
unsafe {
let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone();
let empty: Self = mem::transmute(empty);
debug_assert_eq!(empty.len(), 0);
empty
}
}
}
impl<T: Serialize> Serialize for ArcSlice<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.deref().serialize(serializer)
}
}
impl<'de, T: Deserialize<'de>> Deserialize<'de> for ArcSlice<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = Vec::deserialize(deserializer)?;
Ok(ArcSlice::from_iter(r.into_iter()))
}
}
impl<T> ArcSlice<T> {
/// Creates an Arc for a slice using the given iterator to generate the
/// slice.
#[inline]
pub fn from_iter<I>(items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
{
if items.len() == 0 {
return Self::default();
}
ArcSlice(ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items))
}
/// Creates an Arc for a slice using the given iterator to generate the
/// slice, and marks the arc as intentionally leaked from the refcount
/// logging point of view.
#[inline]
pub fn from_iter_leaked<I>(items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
{
let thin_arc = ThinArc::from_header_and_iter(ARC_SLICE_CANARY, items);
thin_arc.with_arc(|a| a.mark_as_intentionally_leaked());
ArcSlice(thin_arc)
}
/// Creates a value that can be passed via FFI, and forgets this value
/// altogether.
#[inline]
#[allow(unsafe_code)]
pub fn forget(self) -> ForgottenArcSlicePtr<T> {
let ret = unsafe {
ForgottenArcSlicePtr(NonNull::new_unchecked(self.0.ptr() as *const _ as *mut _))
};
mem::forget(self);
ret
}
/// Leaks an empty arc slice pointer, and returns it. Only to be used to
/// construct ArcSlices from FFI.
#[inline]
pub fn leaked_empty_ptr() -> *mut std::os::raw::c_void {
let empty: ArcSlice<_> = EMPTY_ARC_SLICE.clone();
let ptr = empty.0.ptr();
std::mem::forget(empty);
ptr as *mut _
}
}
/// The inner pointer of an ArcSlice<T>, to be sent via FFI.
/// The type of the pointer is a bit of a lie, we just want to preserve the type
/// but these pointers cannot be constructed outside of this crate, so we're
/// good.
#[repr(C)]
pub struct ForgottenArcSlicePtr<T>(NonNull<T>);
|
debug_assert_eq!(self.0.header.header, ARC_SLICE_CANARY);
&self.0.slice
}
|
random_line_split
|
sendfn-spawn-with-fn-arg.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::Cell;
use std::task;
pub fn main() { test05(); }
fn test05_start(f: ~fn(int)) {
f(22);
}
fn test05() {
let three = ~3;
|
};
let fn_to_send = Cell::new(fn_to_send);
task::spawn(|| {
test05_start(fn_to_send.take());
});
}
|
let fn_to_send: ~fn(int) = |n| {
error2!("{}", *three + n); // will copy x into the closure
assert_eq!(*three, 3);
|
random_line_split
|
sendfn-spawn-with-fn-arg.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::Cell;
use std::task;
pub fn main() { test05(); }
fn test05_start(f: ~fn(int)) {
f(22);
}
fn test05()
|
{
let three = ~3;
let fn_to_send: ~fn(int) = |n| {
error2!("{}", *three + n); // will copy x into the closure
assert_eq!(*three, 3);
};
let fn_to_send = Cell::new(fn_to_send);
task::spawn(|| {
test05_start(fn_to_send.take());
});
}
|
identifier_body
|
|
sendfn-spawn-with-fn-arg.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::Cell;
use std::task;
pub fn main() { test05(); }
fn
|
(f: ~fn(int)) {
f(22);
}
fn test05() {
let three = ~3;
let fn_to_send: ~fn(int) = |n| {
error2!("{}", *three + n); // will copy x into the closure
assert_eq!(*three, 3);
};
let fn_to_send = Cell::new(fn_to_send);
task::spawn(|| {
test05_start(fn_to_send.take());
});
}
|
test05_start
|
identifier_name
|
log_utils.rs
|
use super::{
log_messages::{CommandLogMessage, ErrorLogMessage, SessionLogMessage},
LogSender,
};
pub async fn log_command(log_tx: &LogSender, player_name: String, message: String)
|
pub async fn log_error(log_tx: &LogSender, message: String) {
if let Err(error) = log_tx.send(Box::new(ErrorLogMessage::new(message))).await {
println!("Could not send log message: {:?}", error);
}
}
pub async fn log_session_event(log_tx: &LogSender, source: String, message: String) {
if let Err(error) = log_tx
.send(Box::new(SessionLogMessage::new(source, message)))
.await
{
println!("Could not send log message: {:?}", error);
}
}
|
{
if let Err(error) = log_tx
.send(Box::new(CommandLogMessage::new(player_name, message)))
.await
{
println!("Could not send log message: {:?}", error);
}
}
|
identifier_body
|
log_utils.rs
|
use super::{
log_messages::{CommandLogMessage, ErrorLogMessage, SessionLogMessage},
LogSender,
};
pub async fn log_command(log_tx: &LogSender, player_name: String, message: String) {
if let Err(error) = log_tx
.send(Box::new(CommandLogMessage::new(player_name, message)))
.await
{
println!("Could not send log message: {:?}", error);
}
}
pub async fn
|
(log_tx: &LogSender, message: String) {
if let Err(error) = log_tx.send(Box::new(ErrorLogMessage::new(message))).await {
println!("Could not send log message: {:?}", error);
}
}
pub async fn log_session_event(log_tx: &LogSender, source: String, message: String) {
if let Err(error) = log_tx
.send(Box::new(SessionLogMessage::new(source, message)))
.await
{
println!("Could not send log message: {:?}", error);
}
}
|
log_error
|
identifier_name
|
log_utils.rs
|
use super::{
log_messages::{CommandLogMessage, ErrorLogMessage, SessionLogMessage},
LogSender,
};
pub async fn log_command(log_tx: &LogSender, player_name: String, message: String) {
if let Err(error) = log_tx
.send(Box::new(CommandLogMessage::new(player_name, message)))
.await
{
println!("Could not send log message: {:?}", error);
}
}
pub async fn log_error(log_tx: &LogSender, message: String) {
if let Err(error) = log_tx.send(Box::new(ErrorLogMessage::new(message))).await
|
}
pub async fn log_session_event(log_tx: &LogSender, source: String, message: String) {
if let Err(error) = log_tx
.send(Box::new(SessionLogMessage::new(source, message)))
.await
{
println!("Could not send log message: {:?}", error);
}
}
|
{
println!("Could not send log message: {:?}", error);
}
|
conditional_block
|
log_utils.rs
|
use super::{
log_messages::{CommandLogMessage, ErrorLogMessage, SessionLogMessage},
LogSender,
|
.send(Box::new(CommandLogMessage::new(player_name, message)))
.await
{
println!("Could not send log message: {:?}", error);
}
}
pub async fn log_error(log_tx: &LogSender, message: String) {
if let Err(error) = log_tx.send(Box::new(ErrorLogMessage::new(message))).await {
println!("Could not send log message: {:?}", error);
}
}
pub async fn log_session_event(log_tx: &LogSender, source: String, message: String) {
if let Err(error) = log_tx
.send(Box::new(SessionLogMessage::new(source, message)))
.await
{
println!("Could not send log message: {:?}", error);
}
}
|
};
pub async fn log_command(log_tx: &LogSender, player_name: String, message: String) {
if let Err(error) = log_tx
|
random_line_split
|
entry.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use driver::config;
use driver::session::Session;
use syntax::ast::{Crate, Name, NodeId, Item, ItemFn};
use syntax::ast_map;
use syntax::attr;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit;
use syntax::visit::Visitor;
struct EntryContext<'a> {
session: &'a Session,
ast_map: &'a ast_map::Map,
// The interned Name for "main".
main_name: Name,
// The top-level function called'main'
main_fn: Option<(NodeId, Span)>,
// The function that has attribute named'main'
attr_main_fn: Option<(NodeId, Span)>,
// The function that has the attribute'start' on it
start_fn: Option<(NodeId, Span)>,
// The functions that one might think are'main' but aren't, e.g.
// main functions not defined at the top level. For diagnostics.
non_main_fns: Vec<(NodeId, Span)>,
}
impl<'a> Visitor<()> for EntryContext<'a> {
fn visit_item(&mut self, item: &Item, _:()) {
find_item(item, self);
}
}
pub fn find_entry_point(session: &Session, krate: &Crate, ast_map: &ast_map::Map) {
let any_exe = session.crate_types.borrow().iter().any(|ty| {
*ty == config::CrateTypeExecutable
});
if!any_exe {
// No need to find a main function
return
}
// If the user wants no main function at all, then stop here.
if attr::contains_name(krate.attrs.as_slice(), "no_main") {
session.entry_type.set(Some(config::EntryNone));
return
}
let mut ctxt = EntryContext {
session: session,
main_name: token::intern("main"),
ast_map: ast_map,
main_fn: None,
attr_main_fn: None,
start_fn: None,
non_main_fns: Vec::new(),
};
visit::walk_crate(&mut ctxt, krate, ());
configure_main(&mut ctxt);
}
fn find_item(item: &Item, ctxt: &mut EntryContext)
|
if attr::contains_name(item.attrs.as_slice(), "main") {
if ctxt.attr_main_fn.is_none() {
ctxt.attr_main_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple'main' functions");
}
}
if attr::contains_name(item.attrs.as_slice(), "start") {
if ctxt.start_fn.is_none() {
ctxt.start_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple'start' functions");
}
}
}
_ => ()
}
visit::walk_item(ctxt, item, ());
}
fn configure_main(this: &mut EntryContext) {
if this.start_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.start_fn;
this.session.entry_type.set(Some(config::EntryStart));
} else if this.attr_main_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.attr_main_fn;
this.session.entry_type.set(Some(config::EntryMain));
} else if this.main_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.main_fn;
this.session.entry_type.set(Some(config::EntryMain));
} else {
// No main function
this.session.err("main function not found");
if!this.non_main_fns.is_empty() {
// There were some functions named'main' though. Try to give the user a hint.
this.session.note("the main function must be defined at the crate level \
but you have one or more functions named'main' that are not \
defined at the crate level. Either move the definition or \
attach the `#[main]` attribute to override this behavior.");
for &(_, span) in this.non_main_fns.iter() {
this.session.span_note(span, "here is a function named'main'");
}
this.session.abort_if_errors();
}
}
}
|
{
match item.node {
ItemFn(..) => {
if item.ident.name == ctxt.main_name {
ctxt.ast_map.with_path(item.id, |mut path| {
if path.count() == 1 {
// This is a top-level function so can be 'main'
if ctxt.main_fn.is_none() {
ctxt.main_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple 'main' functions");
}
} else {
// This isn't main
ctxt.non_main_fns.push((item.id, item.span));
}
});
}
|
identifier_body
|
entry.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use driver::config;
use driver::session::Session;
use syntax::ast::{Crate, Name, NodeId, Item, ItemFn};
use syntax::ast_map;
use syntax::attr;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit;
use syntax::visit::Visitor;
struct EntryContext<'a> {
session: &'a Session,
ast_map: &'a ast_map::Map,
// The interned Name for "main".
main_name: Name,
// The top-level function called'main'
main_fn: Option<(NodeId, Span)>,
// The function that has attribute named'main'
attr_main_fn: Option<(NodeId, Span)>,
// The function that has the attribute'start' on it
start_fn: Option<(NodeId, Span)>,
|
}
impl<'a> Visitor<()> for EntryContext<'a> {
fn visit_item(&mut self, item: &Item, _:()) {
find_item(item, self);
}
}
pub fn find_entry_point(session: &Session, krate: &Crate, ast_map: &ast_map::Map) {
let any_exe = session.crate_types.borrow().iter().any(|ty| {
*ty == config::CrateTypeExecutable
});
if!any_exe {
// No need to find a main function
return
}
// If the user wants no main function at all, then stop here.
if attr::contains_name(krate.attrs.as_slice(), "no_main") {
session.entry_type.set(Some(config::EntryNone));
return
}
let mut ctxt = EntryContext {
session: session,
main_name: token::intern("main"),
ast_map: ast_map,
main_fn: None,
attr_main_fn: None,
start_fn: None,
non_main_fns: Vec::new(),
};
visit::walk_crate(&mut ctxt, krate, ());
configure_main(&mut ctxt);
}
fn find_item(item: &Item, ctxt: &mut EntryContext) {
match item.node {
ItemFn(..) => {
if item.ident.name == ctxt.main_name {
ctxt.ast_map.with_path(item.id, |mut path| {
if path.count() == 1 {
// This is a top-level function so can be'main'
if ctxt.main_fn.is_none() {
ctxt.main_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple'main' functions");
}
} else {
// This isn't main
ctxt.non_main_fns.push((item.id, item.span));
}
});
}
if attr::contains_name(item.attrs.as_slice(), "main") {
if ctxt.attr_main_fn.is_none() {
ctxt.attr_main_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple'main' functions");
}
}
if attr::contains_name(item.attrs.as_slice(), "start") {
if ctxt.start_fn.is_none() {
ctxt.start_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple'start' functions");
}
}
}
_ => ()
}
visit::walk_item(ctxt, item, ());
}
fn configure_main(this: &mut EntryContext) {
if this.start_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.start_fn;
this.session.entry_type.set(Some(config::EntryStart));
} else if this.attr_main_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.attr_main_fn;
this.session.entry_type.set(Some(config::EntryMain));
} else if this.main_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.main_fn;
this.session.entry_type.set(Some(config::EntryMain));
} else {
// No main function
this.session.err("main function not found");
if!this.non_main_fns.is_empty() {
// There were some functions named'main' though. Try to give the user a hint.
this.session.note("the main function must be defined at the crate level \
but you have one or more functions named'main' that are not \
defined at the crate level. Either move the definition or \
attach the `#[main]` attribute to override this behavior.");
for &(_, span) in this.non_main_fns.iter() {
this.session.span_note(span, "here is a function named'main'");
}
this.session.abort_if_errors();
}
}
}
|
// The functions that one might think are 'main' but aren't, e.g.
// main functions not defined at the top level. For diagnostics.
non_main_fns: Vec<(NodeId, Span)> ,
|
random_line_split
|
entry.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use driver::config;
use driver::session::Session;
use syntax::ast::{Crate, Name, NodeId, Item, ItemFn};
use syntax::ast_map;
use syntax::attr;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit;
use syntax::visit::Visitor;
struct EntryContext<'a> {
session: &'a Session,
ast_map: &'a ast_map::Map,
// The interned Name for "main".
main_name: Name,
// The top-level function called'main'
main_fn: Option<(NodeId, Span)>,
// The function that has attribute named'main'
attr_main_fn: Option<(NodeId, Span)>,
// The function that has the attribute'start' on it
start_fn: Option<(NodeId, Span)>,
// The functions that one might think are'main' but aren't, e.g.
// main functions not defined at the top level. For diagnostics.
non_main_fns: Vec<(NodeId, Span)>,
}
impl<'a> Visitor<()> for EntryContext<'a> {
fn visit_item(&mut self, item: &Item, _:()) {
find_item(item, self);
}
}
pub fn find_entry_point(session: &Session, krate: &Crate, ast_map: &ast_map::Map) {
let any_exe = session.crate_types.borrow().iter().any(|ty| {
*ty == config::CrateTypeExecutable
});
if!any_exe {
// No need to find a main function
return
}
// If the user wants no main function at all, then stop here.
if attr::contains_name(krate.attrs.as_slice(), "no_main") {
session.entry_type.set(Some(config::EntryNone));
return
}
let mut ctxt = EntryContext {
session: session,
main_name: token::intern("main"),
ast_map: ast_map,
main_fn: None,
attr_main_fn: None,
start_fn: None,
non_main_fns: Vec::new(),
};
visit::walk_crate(&mut ctxt, krate, ());
configure_main(&mut ctxt);
}
fn
|
(item: &Item, ctxt: &mut EntryContext) {
match item.node {
ItemFn(..) => {
if item.ident.name == ctxt.main_name {
ctxt.ast_map.with_path(item.id, |mut path| {
if path.count() == 1 {
// This is a top-level function so can be'main'
if ctxt.main_fn.is_none() {
ctxt.main_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple'main' functions");
}
} else {
// This isn't main
ctxt.non_main_fns.push((item.id, item.span));
}
});
}
if attr::contains_name(item.attrs.as_slice(), "main") {
if ctxt.attr_main_fn.is_none() {
ctxt.attr_main_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple'main' functions");
}
}
if attr::contains_name(item.attrs.as_slice(), "start") {
if ctxt.start_fn.is_none() {
ctxt.start_fn = Some((item.id, item.span));
} else {
ctxt.session.span_err(
item.span,
"multiple'start' functions");
}
}
}
_ => ()
}
visit::walk_item(ctxt, item, ());
}
fn configure_main(this: &mut EntryContext) {
if this.start_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.start_fn;
this.session.entry_type.set(Some(config::EntryStart));
} else if this.attr_main_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.attr_main_fn;
this.session.entry_type.set(Some(config::EntryMain));
} else if this.main_fn.is_some() {
*this.session.entry_fn.borrow_mut() = this.main_fn;
this.session.entry_type.set(Some(config::EntryMain));
} else {
// No main function
this.session.err("main function not found");
if!this.non_main_fns.is_empty() {
// There were some functions named'main' though. Try to give the user a hint.
this.session.note("the main function must be defined at the crate level \
but you have one or more functions named'main' that are not \
defined at the crate level. Either move the definition or \
attach the `#[main]` attribute to override this behavior.");
for &(_, span) in this.non_main_fns.iter() {
this.session.span_note(span, "here is a function named'main'");
}
this.session.abort_if_errors();
}
}
}
|
find_item
|
identifier_name
|
client.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Hash-addressed content resolver & fetcher.
use std::{io, fs};
use std::io::Write;
use std::sync::Arc;
use std::path::PathBuf;
use hash::keccak_buffer;
use fetch::{Fetch, Response, Error as FetchError, Client as FetchClient};
use futures::Future;
use parity_reactor::Remote;
use urlhint::{ContractClient, URLHintContract, URLHint, URLHintResult};
use bigint::hash::H256;
/// API for fetching by hash.
pub trait HashFetch: Send + Sync +'static {
/// Fetch hash-addressed content.
/// Parameters:
/// 1. `hash` - content hash
/// 2. `on_done` - callback function invoked when the content is ready (or there was error during fetch)
///
/// This function may fail immediately when fetch cannot be initialized or content cannot be resolved.
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>);
}
/// Hash-fetching error.
#[derive(Debug)]
pub enum Error {
/// Hash could not be resolved to a valid content address.
NoResolution,
/// Downloaded content hash does not match.
HashMismatch {
/// Expected hash
expected: H256,
/// Computed hash
got: H256,
},
/// Server didn't respond with OK status.
InvalidStatus,
/// IO Error while validating hash.
IO(io::Error),
/// Error during fetch.
Fetch(FetchError),
}
#[cfg(test)]
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
use Error::*;
match (self, other) {
(&HashMismatch { expected, got }, &HashMismatch { expected: e, got: g }) => {
expected == e && got == g
},
(&NoResolution, &NoResolution) => true,
(&InvalidStatus, &InvalidStatus) => true,
(&IO(_), &IO(_)) => true,
(&Fetch(_), &Fetch(_)) => true,
_ => false,
}
}
}
impl From<FetchError> for Error {
fn from(error: FetchError) -> Self {
Error::Fetch(error)
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Self
|
}
fn validate_hash(path: PathBuf, hash: H256, result: Result<Response, FetchError>) -> Result<PathBuf, Error> {
let response = result?;
if!response.is_success() {
return Err(Error::InvalidStatus);
}
// Read the response
let mut reader = io::BufReader::new(response);
let mut writer = io::BufWriter::new(fs::File::create(&path)?);
io::copy(&mut reader, &mut writer)?;
writer.flush()?;
// And validate the hash
let mut file_reader = io::BufReader::new(fs::File::open(&path)?);
let content_hash = keccak_buffer(&mut file_reader)?;
if content_hash!= hash {
Err(Error::HashMismatch{ got: content_hash, expected: hash })
} else {
Ok(path)
}
}
/// Default Hash-fetching client using on-chain contract to resolve hashes to URLs.
pub struct Client<F: Fetch +'static = FetchClient> {
contract: URLHintContract,
fetch: F,
remote: Remote,
random_path: Arc<Fn() -> PathBuf + Sync + Send>,
}
impl Client {
/// Creates new instance of the `Client` given on-chain contract client and task runner.
pub fn new(contract: Arc<ContractClient>, remote: Remote) -> Self {
Client::with_fetch(contract, FetchClient::new().unwrap(), remote)
}
}
impl<F: Fetch +'static> Client<F> {
/// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner.
pub fn with_fetch(contract: Arc<ContractClient>, fetch: F, remote: Remote) -> Self {
Client {
contract: URLHintContract::new(contract),
fetch: fetch,
remote: remote,
random_path: Arc::new(random_temp_path),
}
}
}
impl<F: Fetch +'static> HashFetch for Client<F> {
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>) {
debug!(target: "fetch", "Fetching: {:?}", hash);
let random_path = self.random_path.clone();
let remote_fetch = self.fetch.clone();
let future = self.contract.resolve(hash.to_vec())
.map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution })
.and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution))
.map(|content| match content {
URLHintResult::Dapp(dapp) => {
dapp.url()
},
URLHintResult::Content(content) => {
content.url
},
})
.and_then(move |url| {
debug!(target: "fetch", "Resolved {:?} to {:?}. Fetching...", hash, url);
let future = remote_fetch.fetch(&url).then(move |result| {
debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash);
let path = random_path();
let res = validate_hash(path.clone(), hash, result);
if let Err(ref err) = res {
trace!(target: "fetch", "Error: {:?}", err);
// Remove temporary file in case of error
let _ = fs::remove_file(&path);
}
res
});
remote_fetch.process(future)
})
.then(move |res| { on_done(res); Ok(()) as Result<(), ()> });
self.remote.spawn(future);
}
}
fn random_temp_path() -> PathBuf {
use ::rand::Rng;
use ::std::env;
let mut rng = ::rand::OsRng::new().expect("Reliable random source is required to work.");
let file: String = rng.gen_ascii_chars().take(12).collect();
let mut path = env::temp_dir();
path.push(file);
path
}
#[cfg(test)]
mod tests {
use rustc_hex::FromHex;
use std::sync::{Arc, mpsc};
use parking_lot::Mutex;
use futures::future;
use fetch::{self, Fetch};
use parity_reactor::Remote;
use urlhint::tests::{FakeRegistrar, URLHINT};
use super::{Error, Client, HashFetch, random_temp_path};
#[derive(Clone)]
struct FakeFetch {
return_success: bool
}
impl Fetch for FakeFetch {
type Result = future::Ok<fetch::Response, fetch::Error>;
fn new() -> Result<Self, fetch::Error> where Self: Sized {
Ok(FakeFetch { return_success: true })
}
fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result {
assert_eq!(url, "https://parity.io/assets/images/ethcore-black-horizontal.png");
future::ok(if self.return_success {
let cursor = ::std::io::Cursor::new(b"result");
fetch::Response::from_reader(cursor)
} else {
fetch::Response::not_found()
})
}
}
fn registrar() -> FakeRegistrar {
let mut registrar = FakeRegistrar::new();
registrar.responses = Mutex::new(vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003c68747470733a2f2f7061726974792e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e6700000000".from_hex().unwrap()),
]);
registrar
}
#[test]
fn should_return_error_if_hash_not_found() {
// given
let contract = Arc::new(FakeRegistrar::new());
let fetch = FakeFetch { return_success: false };
let client = Client::with_fetch(contract.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert_eq!(result.unwrap_err(), Error::NoResolution);
}
#[test]
fn should_return_error_if_response_is_not_successful() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: false };
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert_eq!(result.unwrap_err(), Error::InvalidStatus);
}
#[test]
fn should_return_hash_mismatch() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: true };
let mut client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
let path = random_temp_path();
let path2 = path.clone();
client.random_path = Arc::new(move || path2.clone());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
let hash = "0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into();
assert_eq!(result.unwrap_err(), Error::HashMismatch { expected: 2.into(), got: hash });
assert!(!path.exists(), "Temporary file should be removed.");
}
#[test]
fn should_return_path_if_hash_matches() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: true };
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch("0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert!(result.is_ok(), "Should return path, got: {:?}", result);
}
}
|
{
Error::IO(error)
}
|
identifier_body
|
client.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Hash-addressed content resolver & fetcher.
use std::{io, fs};
use std::io::Write;
use std::sync::Arc;
use std::path::PathBuf;
use hash::keccak_buffer;
use fetch::{Fetch, Response, Error as FetchError, Client as FetchClient};
use futures::Future;
use parity_reactor::Remote;
use urlhint::{ContractClient, URLHintContract, URLHint, URLHintResult};
use bigint::hash::H256;
/// API for fetching by hash.
pub trait HashFetch: Send + Sync +'static {
/// Fetch hash-addressed content.
/// Parameters:
/// 1. `hash` - content hash
/// 2. `on_done` - callback function invoked when the content is ready (or there was error during fetch)
///
/// This function may fail immediately when fetch cannot be initialized or content cannot be resolved.
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>);
}
/// Hash-fetching error.
#[derive(Debug)]
pub enum Error {
/// Hash could not be resolved to a valid content address.
NoResolution,
/// Downloaded content hash does not match.
HashMismatch {
/// Expected hash
expected: H256,
/// Computed hash
got: H256,
},
/// Server didn't respond with OK status.
InvalidStatus,
/// IO Error while validating hash.
IO(io::Error),
/// Error during fetch.
Fetch(FetchError),
}
#[cfg(test)]
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
use Error::*;
match (self, other) {
(&HashMismatch { expected, got }, &HashMismatch { expected: e, got: g }) => {
expected == e && got == g
},
(&NoResolution, &NoResolution) => true,
(&InvalidStatus, &InvalidStatus) => true,
(&IO(_), &IO(_)) => true,
(&Fetch(_), &Fetch(_)) => true,
_ => false,
}
}
}
impl From<FetchError> for Error {
fn from(error: FetchError) -> Self {
Error::Fetch(error)
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Self {
Error::IO(error)
}
|
return Err(Error::InvalidStatus);
}
// Read the response
let mut reader = io::BufReader::new(response);
let mut writer = io::BufWriter::new(fs::File::create(&path)?);
io::copy(&mut reader, &mut writer)?;
writer.flush()?;
// And validate the hash
let mut file_reader = io::BufReader::new(fs::File::open(&path)?);
let content_hash = keccak_buffer(&mut file_reader)?;
if content_hash!= hash {
Err(Error::HashMismatch{ got: content_hash, expected: hash })
} else {
Ok(path)
}
}
/// Default Hash-fetching client using on-chain contract to resolve hashes to URLs.
pub struct Client<F: Fetch +'static = FetchClient> {
contract: URLHintContract,
fetch: F,
remote: Remote,
random_path: Arc<Fn() -> PathBuf + Sync + Send>,
}
impl Client {
/// Creates new instance of the `Client` given on-chain contract client and task runner.
pub fn new(contract: Arc<ContractClient>, remote: Remote) -> Self {
Client::with_fetch(contract, FetchClient::new().unwrap(), remote)
}
}
impl<F: Fetch +'static> Client<F> {
/// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner.
pub fn with_fetch(contract: Arc<ContractClient>, fetch: F, remote: Remote) -> Self {
Client {
contract: URLHintContract::new(contract),
fetch: fetch,
remote: remote,
random_path: Arc::new(random_temp_path),
}
}
}
impl<F: Fetch +'static> HashFetch for Client<F> {
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>) {
debug!(target: "fetch", "Fetching: {:?}", hash);
let random_path = self.random_path.clone();
let remote_fetch = self.fetch.clone();
let future = self.contract.resolve(hash.to_vec())
.map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution })
.and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution))
.map(|content| match content {
URLHintResult::Dapp(dapp) => {
dapp.url()
},
URLHintResult::Content(content) => {
content.url
},
})
.and_then(move |url| {
debug!(target: "fetch", "Resolved {:?} to {:?}. Fetching...", hash, url);
let future = remote_fetch.fetch(&url).then(move |result| {
debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash);
let path = random_path();
let res = validate_hash(path.clone(), hash, result);
if let Err(ref err) = res {
trace!(target: "fetch", "Error: {:?}", err);
// Remove temporary file in case of error
let _ = fs::remove_file(&path);
}
res
});
remote_fetch.process(future)
})
.then(move |res| { on_done(res); Ok(()) as Result<(), ()> });
self.remote.spawn(future);
}
}
fn random_temp_path() -> PathBuf {
use ::rand::Rng;
use ::std::env;
let mut rng = ::rand::OsRng::new().expect("Reliable random source is required to work.");
let file: String = rng.gen_ascii_chars().take(12).collect();
let mut path = env::temp_dir();
path.push(file);
path
}
#[cfg(test)]
mod tests {
use rustc_hex::FromHex;
use std::sync::{Arc, mpsc};
use parking_lot::Mutex;
use futures::future;
use fetch::{self, Fetch};
use parity_reactor::Remote;
use urlhint::tests::{FakeRegistrar, URLHINT};
use super::{Error, Client, HashFetch, random_temp_path};
#[derive(Clone)]
struct FakeFetch {
return_success: bool
}
impl Fetch for FakeFetch {
type Result = future::Ok<fetch::Response, fetch::Error>;
fn new() -> Result<Self, fetch::Error> where Self: Sized {
Ok(FakeFetch { return_success: true })
}
fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result {
assert_eq!(url, "https://parity.io/assets/images/ethcore-black-horizontal.png");
future::ok(if self.return_success {
let cursor = ::std::io::Cursor::new(b"result");
fetch::Response::from_reader(cursor)
} else {
fetch::Response::not_found()
})
}
}
fn registrar() -> FakeRegistrar {
let mut registrar = FakeRegistrar::new();
registrar.responses = Mutex::new(vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003c68747470733a2f2f7061726974792e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e6700000000".from_hex().unwrap()),
]);
registrar
}
#[test]
fn should_return_error_if_hash_not_found() {
// given
let contract = Arc::new(FakeRegistrar::new());
let fetch = FakeFetch { return_success: false };
let client = Client::with_fetch(contract.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert_eq!(result.unwrap_err(), Error::NoResolution);
}
#[test]
fn should_return_error_if_response_is_not_successful() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: false };
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert_eq!(result.unwrap_err(), Error::InvalidStatus);
}
#[test]
fn should_return_hash_mismatch() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: true };
let mut client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
let path = random_temp_path();
let path2 = path.clone();
client.random_path = Arc::new(move || path2.clone());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
let hash = "0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into();
assert_eq!(result.unwrap_err(), Error::HashMismatch { expected: 2.into(), got: hash });
assert!(!path.exists(), "Temporary file should be removed.");
}
#[test]
fn should_return_path_if_hash_matches() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: true };
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch("0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert!(result.is_ok(), "Should return path, got: {:?}", result);
}
}
|
}
fn validate_hash(path: PathBuf, hash: H256, result: Result<Response, FetchError>) -> Result<PathBuf, Error> {
let response = result?;
if !response.is_success() {
|
random_line_split
|
client.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Hash-addressed content resolver & fetcher.
use std::{io, fs};
use std::io::Write;
use std::sync::Arc;
use std::path::PathBuf;
use hash::keccak_buffer;
use fetch::{Fetch, Response, Error as FetchError, Client as FetchClient};
use futures::Future;
use parity_reactor::Remote;
use urlhint::{ContractClient, URLHintContract, URLHint, URLHintResult};
use bigint::hash::H256;
/// API for fetching by hash.
pub trait HashFetch: Send + Sync +'static {
/// Fetch hash-addressed content.
/// Parameters:
/// 1. `hash` - content hash
/// 2. `on_done` - callback function invoked when the content is ready (or there was error during fetch)
///
/// This function may fail immediately when fetch cannot be initialized or content cannot be resolved.
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>);
}
/// Hash-fetching error.
#[derive(Debug)]
pub enum Error {
/// Hash could not be resolved to a valid content address.
NoResolution,
/// Downloaded content hash does not match.
HashMismatch {
/// Expected hash
expected: H256,
/// Computed hash
got: H256,
},
/// Server didn't respond with OK status.
InvalidStatus,
/// IO Error while validating hash.
IO(io::Error),
/// Error during fetch.
Fetch(FetchError),
}
#[cfg(test)]
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
use Error::*;
match (self, other) {
(&HashMismatch { expected, got }, &HashMismatch { expected: e, got: g }) => {
expected == e && got == g
},
(&NoResolution, &NoResolution) => true,
(&InvalidStatus, &InvalidStatus) => true,
(&IO(_), &IO(_)) => true,
(&Fetch(_), &Fetch(_)) => true,
_ => false,
}
}
}
impl From<FetchError> for Error {
fn from(error: FetchError) -> Self {
Error::Fetch(error)
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Self {
Error::IO(error)
}
}
fn validate_hash(path: PathBuf, hash: H256, result: Result<Response, FetchError>) -> Result<PathBuf, Error> {
let response = result?;
if!response.is_success() {
return Err(Error::InvalidStatus);
}
// Read the response
let mut reader = io::BufReader::new(response);
let mut writer = io::BufWriter::new(fs::File::create(&path)?);
io::copy(&mut reader, &mut writer)?;
writer.flush()?;
// And validate the hash
let mut file_reader = io::BufReader::new(fs::File::open(&path)?);
let content_hash = keccak_buffer(&mut file_reader)?;
if content_hash!= hash {
Err(Error::HashMismatch{ got: content_hash, expected: hash })
} else
|
}
/// Default Hash-fetching client using on-chain contract to resolve hashes to URLs.
pub struct Client<F: Fetch +'static = FetchClient> {
contract: URLHintContract,
fetch: F,
remote: Remote,
random_path: Arc<Fn() -> PathBuf + Sync + Send>,
}
impl Client {
/// Creates new instance of the `Client` given on-chain contract client and task runner.
pub fn new(contract: Arc<ContractClient>, remote: Remote) -> Self {
Client::with_fetch(contract, FetchClient::new().unwrap(), remote)
}
}
impl<F: Fetch +'static> Client<F> {
/// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner.
pub fn with_fetch(contract: Arc<ContractClient>, fetch: F, remote: Remote) -> Self {
Client {
contract: URLHintContract::new(contract),
fetch: fetch,
remote: remote,
random_path: Arc::new(random_temp_path),
}
}
}
impl<F: Fetch +'static> HashFetch for Client<F> {
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>) {
debug!(target: "fetch", "Fetching: {:?}", hash);
let random_path = self.random_path.clone();
let remote_fetch = self.fetch.clone();
let future = self.contract.resolve(hash.to_vec())
.map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution })
.and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution))
.map(|content| match content {
URLHintResult::Dapp(dapp) => {
dapp.url()
},
URLHintResult::Content(content) => {
content.url
},
})
.and_then(move |url| {
debug!(target: "fetch", "Resolved {:?} to {:?}. Fetching...", hash, url);
let future = remote_fetch.fetch(&url).then(move |result| {
debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash);
let path = random_path();
let res = validate_hash(path.clone(), hash, result);
if let Err(ref err) = res {
trace!(target: "fetch", "Error: {:?}", err);
// Remove temporary file in case of error
let _ = fs::remove_file(&path);
}
res
});
remote_fetch.process(future)
})
.then(move |res| { on_done(res); Ok(()) as Result<(), ()> });
self.remote.spawn(future);
}
}
fn random_temp_path() -> PathBuf {
use ::rand::Rng;
use ::std::env;
let mut rng = ::rand::OsRng::new().expect("Reliable random source is required to work.");
let file: String = rng.gen_ascii_chars().take(12).collect();
let mut path = env::temp_dir();
path.push(file);
path
}
#[cfg(test)]
mod tests {
use rustc_hex::FromHex;
use std::sync::{Arc, mpsc};
use parking_lot::Mutex;
use futures::future;
use fetch::{self, Fetch};
use parity_reactor::Remote;
use urlhint::tests::{FakeRegistrar, URLHINT};
use super::{Error, Client, HashFetch, random_temp_path};
#[derive(Clone)]
struct FakeFetch {
return_success: bool
}
impl Fetch for FakeFetch {
type Result = future::Ok<fetch::Response, fetch::Error>;
fn new() -> Result<Self, fetch::Error> where Self: Sized {
Ok(FakeFetch { return_success: true })
}
fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result {
assert_eq!(url, "https://parity.io/assets/images/ethcore-black-horizontal.png");
future::ok(if self.return_success {
let cursor = ::std::io::Cursor::new(b"result");
fetch::Response::from_reader(cursor)
} else {
fetch::Response::not_found()
})
}
}
fn registrar() -> FakeRegistrar {
let mut registrar = FakeRegistrar::new();
registrar.responses = Mutex::new(vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003c68747470733a2f2f7061726974792e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e6700000000".from_hex().unwrap()),
]);
registrar
}
#[test]
fn should_return_error_if_hash_not_found() {
// given
let contract = Arc::new(FakeRegistrar::new());
let fetch = FakeFetch { return_success: false };
let client = Client::with_fetch(contract.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert_eq!(result.unwrap_err(), Error::NoResolution);
}
#[test]
fn should_return_error_if_response_is_not_successful() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: false };
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert_eq!(result.unwrap_err(), Error::InvalidStatus);
}
#[test]
fn should_return_hash_mismatch() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: true };
let mut client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
let path = random_temp_path();
let path2 = path.clone();
client.random_path = Arc::new(move || path2.clone());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
let hash = "0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into();
assert_eq!(result.unwrap_err(), Error::HashMismatch { expected: 2.into(), got: hash });
assert!(!path.exists(), "Temporary file should be removed.");
}
#[test]
fn should_return_path_if_hash_matches() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: true };
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch("0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert!(result.is_ok(), "Should return path, got: {:?}", result);
}
}
|
{
Ok(path)
}
|
conditional_block
|
client.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Hash-addressed content resolver & fetcher.
use std::{io, fs};
use std::io::Write;
use std::sync::Arc;
use std::path::PathBuf;
use hash::keccak_buffer;
use fetch::{Fetch, Response, Error as FetchError, Client as FetchClient};
use futures::Future;
use parity_reactor::Remote;
use urlhint::{ContractClient, URLHintContract, URLHint, URLHintResult};
use bigint::hash::H256;
/// API for fetching by hash.
pub trait HashFetch: Send + Sync +'static {
/// Fetch hash-addressed content.
/// Parameters:
/// 1. `hash` - content hash
/// 2. `on_done` - callback function invoked when the content is ready (or there was error during fetch)
///
/// This function may fail immediately when fetch cannot be initialized or content cannot be resolved.
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>);
}
/// Hash-fetching error.
#[derive(Debug)]
pub enum Error {
/// Hash could not be resolved to a valid content address.
NoResolution,
/// Downloaded content hash does not match.
HashMismatch {
/// Expected hash
expected: H256,
/// Computed hash
got: H256,
},
/// Server didn't respond with OK status.
InvalidStatus,
/// IO Error while validating hash.
IO(io::Error),
/// Error during fetch.
Fetch(FetchError),
}
#[cfg(test)]
impl PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
use Error::*;
match (self, other) {
(&HashMismatch { expected, got }, &HashMismatch { expected: e, got: g }) => {
expected == e && got == g
},
(&NoResolution, &NoResolution) => true,
(&InvalidStatus, &InvalidStatus) => true,
(&IO(_), &IO(_)) => true,
(&Fetch(_), &Fetch(_)) => true,
_ => false,
}
}
}
impl From<FetchError> for Error {
fn from(error: FetchError) -> Self {
Error::Fetch(error)
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Self {
Error::IO(error)
}
}
fn validate_hash(path: PathBuf, hash: H256, result: Result<Response, FetchError>) -> Result<PathBuf, Error> {
let response = result?;
if!response.is_success() {
return Err(Error::InvalidStatus);
}
// Read the response
let mut reader = io::BufReader::new(response);
let mut writer = io::BufWriter::new(fs::File::create(&path)?);
io::copy(&mut reader, &mut writer)?;
writer.flush()?;
// And validate the hash
let mut file_reader = io::BufReader::new(fs::File::open(&path)?);
let content_hash = keccak_buffer(&mut file_reader)?;
if content_hash!= hash {
Err(Error::HashMismatch{ got: content_hash, expected: hash })
} else {
Ok(path)
}
}
/// Default Hash-fetching client using on-chain contract to resolve hashes to URLs.
pub struct Client<F: Fetch +'static = FetchClient> {
contract: URLHintContract,
fetch: F,
remote: Remote,
random_path: Arc<Fn() -> PathBuf + Sync + Send>,
}
impl Client {
/// Creates new instance of the `Client` given on-chain contract client and task runner.
pub fn
|
(contract: Arc<ContractClient>, remote: Remote) -> Self {
Client::with_fetch(contract, FetchClient::new().unwrap(), remote)
}
}
impl<F: Fetch +'static> Client<F> {
/// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner.
pub fn with_fetch(contract: Arc<ContractClient>, fetch: F, remote: Remote) -> Self {
Client {
contract: URLHintContract::new(contract),
fetch: fetch,
remote: remote,
random_path: Arc::new(random_temp_path),
}
}
}
impl<F: Fetch +'static> HashFetch for Client<F> {
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>) {
debug!(target: "fetch", "Fetching: {:?}", hash);
let random_path = self.random_path.clone();
let remote_fetch = self.fetch.clone();
let future = self.contract.resolve(hash.to_vec())
.map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution })
.and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution))
.map(|content| match content {
URLHintResult::Dapp(dapp) => {
dapp.url()
},
URLHintResult::Content(content) => {
content.url
},
})
.and_then(move |url| {
debug!(target: "fetch", "Resolved {:?} to {:?}. Fetching...", hash, url);
let future = remote_fetch.fetch(&url).then(move |result| {
debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash);
let path = random_path();
let res = validate_hash(path.clone(), hash, result);
if let Err(ref err) = res {
trace!(target: "fetch", "Error: {:?}", err);
// Remove temporary file in case of error
let _ = fs::remove_file(&path);
}
res
});
remote_fetch.process(future)
})
.then(move |res| { on_done(res); Ok(()) as Result<(), ()> });
self.remote.spawn(future);
}
}
fn random_temp_path() -> PathBuf {
use ::rand::Rng;
use ::std::env;
let mut rng = ::rand::OsRng::new().expect("Reliable random source is required to work.");
let file: String = rng.gen_ascii_chars().take(12).collect();
let mut path = env::temp_dir();
path.push(file);
path
}
#[cfg(test)]
mod tests {
use rustc_hex::FromHex;
use std::sync::{Arc, mpsc};
use parking_lot::Mutex;
use futures::future;
use fetch::{self, Fetch};
use parity_reactor::Remote;
use urlhint::tests::{FakeRegistrar, URLHINT};
use super::{Error, Client, HashFetch, random_temp_path};
#[derive(Clone)]
struct FakeFetch {
return_success: bool
}
impl Fetch for FakeFetch {
type Result = future::Ok<fetch::Response, fetch::Error>;
fn new() -> Result<Self, fetch::Error> where Self: Sized {
Ok(FakeFetch { return_success: true })
}
fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result {
assert_eq!(url, "https://parity.io/assets/images/ethcore-black-horizontal.png");
future::ok(if self.return_success {
let cursor = ::std::io::Cursor::new(b"result");
fetch::Response::from_reader(cursor)
} else {
fetch::Response::not_found()
})
}
}
fn registrar() -> FakeRegistrar {
let mut registrar = FakeRegistrar::new();
registrar.responses = Mutex::new(vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003c68747470733a2f2f7061726974792e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e6700000000".from_hex().unwrap()),
]);
registrar
}
#[test]
fn should_return_error_if_hash_not_found() {
// given
let contract = Arc::new(FakeRegistrar::new());
let fetch = FakeFetch { return_success: false };
let client = Client::with_fetch(contract.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert_eq!(result.unwrap_err(), Error::NoResolution);
}
#[test]
fn should_return_error_if_response_is_not_successful() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: false };
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert_eq!(result.unwrap_err(), Error::InvalidStatus);
}
#[test]
fn should_return_hash_mismatch() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: true };
let mut client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
let path = random_temp_path();
let path2 = path.clone();
client.random_path = Arc::new(move || path2.clone());
// when
let (tx, rx) = mpsc::channel();
client.fetch(2.into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
let hash = "0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into();
assert_eq!(result.unwrap_err(), Error::HashMismatch { expected: 2.into(), got: hash });
assert!(!path.exists(), "Temporary file should be removed.");
}
#[test]
fn should_return_path_if_hash_matches() {
// given
let registrar = Arc::new(registrar());
let fetch = FakeFetch { return_success: true };
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
// when
let (tx, rx) = mpsc::channel();
client.fetch("0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into(), Box::new(move |result| {
tx.send(result).unwrap();
}));
// then
let result = rx.recv().unwrap();
assert!(result.is_ok(), "Should return path, got: {:?}", result);
}
}
|
new
|
identifier_name
|
std-smallintmap.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Microbenchmark for the smallintmap library
extern crate collections;
extern crate time;
use collections::SmallIntMap;
use std::os;
use std::uint;
fn append_sequential(min: uint, max: uint, map: &mut SmallIntMap<uint>)
|
fn check_sequential(min: uint, max: uint, map: &SmallIntMap<uint>) {
for i in range(min, max) {
assert_eq!(*map.get(&i), i + 22u);
}
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
vec!("".to_owned(), "100000".to_owned(), "100".to_owned())
} else if args.len() <= 1u {
vec!("".to_owned(), "10000".to_owned(), "50".to_owned())
} else {
args.move_iter().collect()
};
let max = from_str::<uint>(*args.get(1)).unwrap();
let rep = from_str::<uint>(*args.get(2)).unwrap();
let mut checkf = 0.0;
let mut appendf = 0.0;
for _ in range(0u, rep) {
let mut map = SmallIntMap::new();
let start = time::precise_time_s();
append_sequential(0u, max, &mut map);
let mid = time::precise_time_s();
check_sequential(0u, max, &map);
let end = time::precise_time_s();
checkf += (end - mid) as f64;
appendf += (mid - start) as f64;
}
let maxf = max as f64;
println!("insert(): {:?} seconds\n", checkf);
println!(" : {} op/sec\n", maxf/checkf);
println!("get() : {:?} seconds\n", appendf);
println!(" : {} op/sec\n", maxf/appendf);
}
|
{
for i in range(min, max) {
map.insert(i, i + 22u);
}
}
|
identifier_body
|
std-smallintmap.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Microbenchmark for the smallintmap library
extern crate collections;
extern crate time;
use collections::SmallIntMap;
use std::os;
use std::uint;
fn append_sequential(min: uint, max: uint, map: &mut SmallIntMap<uint>) {
for i in range(min, max) {
map.insert(i, i + 22u);
}
}
fn check_sequential(min: uint, max: uint, map: &SmallIntMap<uint>) {
for i in range(min, max) {
assert_eq!(*map.get(&i), i + 22u);
}
}
fn main() {
|
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
vec!("".to_owned(), "100000".to_owned(), "100".to_owned())
} else if args.len() <= 1u {
vec!("".to_owned(), "10000".to_owned(), "50".to_owned())
} else {
args.move_iter().collect()
};
let max = from_str::<uint>(*args.get(1)).unwrap();
let rep = from_str::<uint>(*args.get(2)).unwrap();
let mut checkf = 0.0;
let mut appendf = 0.0;
for _ in range(0u, rep) {
let mut map = SmallIntMap::new();
let start = time::precise_time_s();
append_sequential(0u, max, &mut map);
let mid = time::precise_time_s();
check_sequential(0u, max, &map);
let end = time::precise_time_s();
checkf += (end - mid) as f64;
appendf += (mid - start) as f64;
}
let maxf = max as f64;
println!("insert(): {:?} seconds\n", checkf);
println!(" : {} op/sec\n", maxf/checkf);
println!("get() : {:?} seconds\n", appendf);
println!(" : {} op/sec\n", maxf/appendf);
}
|
random_line_split
|
|
std-smallintmap.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Microbenchmark for the smallintmap library
extern crate collections;
extern crate time;
use collections::SmallIntMap;
use std::os;
use std::uint;
fn append_sequential(min: uint, max: uint, map: &mut SmallIntMap<uint>) {
for i in range(min, max) {
map.insert(i, i + 22u);
}
}
fn check_sequential(min: uint, max: uint, map: &SmallIntMap<uint>) {
for i in range(min, max) {
assert_eq!(*map.get(&i), i + 22u);
}
}
fn
|
() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
vec!("".to_owned(), "100000".to_owned(), "100".to_owned())
} else if args.len() <= 1u {
vec!("".to_owned(), "10000".to_owned(), "50".to_owned())
} else {
args.move_iter().collect()
};
let max = from_str::<uint>(*args.get(1)).unwrap();
let rep = from_str::<uint>(*args.get(2)).unwrap();
let mut checkf = 0.0;
let mut appendf = 0.0;
for _ in range(0u, rep) {
let mut map = SmallIntMap::new();
let start = time::precise_time_s();
append_sequential(0u, max, &mut map);
let mid = time::precise_time_s();
check_sequential(0u, max, &map);
let end = time::precise_time_s();
checkf += (end - mid) as f64;
appendf += (mid - start) as f64;
}
let maxf = max as f64;
println!("insert(): {:?} seconds\n", checkf);
println!(" : {} op/sec\n", maxf/checkf);
println!("get() : {:?} seconds\n", appendf);
println!(" : {} op/sec\n", maxf/appendf);
}
|
main
|
identifier_name
|
std-smallintmap.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Microbenchmark for the smallintmap library
extern crate collections;
extern crate time;
use collections::SmallIntMap;
use std::os;
use std::uint;
fn append_sequential(min: uint, max: uint, map: &mut SmallIntMap<uint>) {
for i in range(min, max) {
map.insert(i, i + 22u);
}
}
fn check_sequential(min: uint, max: uint, map: &SmallIntMap<uint>) {
for i in range(min, max) {
assert_eq!(*map.get(&i), i + 22u);
}
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some()
|
else if args.len() <= 1u {
vec!("".to_owned(), "10000".to_owned(), "50".to_owned())
} else {
args.move_iter().collect()
};
let max = from_str::<uint>(*args.get(1)).unwrap();
let rep = from_str::<uint>(*args.get(2)).unwrap();
let mut checkf = 0.0;
let mut appendf = 0.0;
for _ in range(0u, rep) {
let mut map = SmallIntMap::new();
let start = time::precise_time_s();
append_sequential(0u, max, &mut map);
let mid = time::precise_time_s();
check_sequential(0u, max, &map);
let end = time::precise_time_s();
checkf += (end - mid) as f64;
appendf += (mid - start) as f64;
}
let maxf = max as f64;
println!("insert(): {:?} seconds\n", checkf);
println!(" : {} op/sec\n", maxf/checkf);
println!("get() : {:?} seconds\n", appendf);
println!(" : {} op/sec\n", maxf/appendf);
}
|
{
vec!("".to_owned(), "100000".to_owned(), "100".to_owned())
}
|
conditional_block
|
scope.rs
|
use std::collections::HashMap;
use ast::Value;
#[derive(Clone)]
pub struct Scope {
// Maps variable names to values.
map: HashMap<String, Value>,
// The scope containing this one.
pub parent: Option<Box<Scope>>,
}
impl Scope {
pub fn new(parent: Option<Self>) -> Self {
Scope { map: HashMap::new(), parent: parent.map(Box::new) }
}
// Assign a value to a given variable.
pub fn assign(&mut self, var: &str, val: Value) {
self.map.insert(String::from(var), val);
}
// Lookup the value associated with a given variable.
pub fn lookup(&self, var: &str) -> Option<&Value> {
self.map.get(var)
}
// Returns whether the variable is defined int the scope.
pub fn contains_var(&self, var: &str) -> bool {
self.map.contains_key(var)
}
// Returns number of variables in the scope.
pub fn len(&self) -> usize {
self.map.len()
}
// Returns a reference to the containing scope.
pub fn
|
(&self) -> Option<&Box<Scope>> {
self.parent.as_ref()
}
}
|
parent
|
identifier_name
|
scope.rs
|
use std::collections::HashMap;
use ast::Value;
#[derive(Clone)]
pub struct Scope {
// Maps variable names to values.
map: HashMap<String, Value>,
// The scope containing this one.
pub parent: Option<Box<Scope>>,
}
impl Scope {
pub fn new(parent: Option<Self>) -> Self {
|
}
// Assign a value to a given variable.
pub fn assign(&mut self, var: &str, val: Value) {
self.map.insert(String::from(var), val);
}
// Lookup the value associated with a given variable.
pub fn lookup(&self, var: &str) -> Option<&Value> {
self.map.get(var)
}
// Returns whether the variable is defined int the scope.
pub fn contains_var(&self, var: &str) -> bool {
self.map.contains_key(var)
}
// Returns number of variables in the scope.
pub fn len(&self) -> usize {
self.map.len()
}
// Returns a reference to the containing scope.
pub fn parent(&self) -> Option<&Box<Scope>> {
self.parent.as_ref()
}
}
|
Scope { map: HashMap::new(), parent: parent.map(Box::new) }
|
random_line_split
|
scope.rs
|
use std::collections::HashMap;
use ast::Value;
#[derive(Clone)]
pub struct Scope {
// Maps variable names to values.
map: HashMap<String, Value>,
// The scope containing this one.
pub parent: Option<Box<Scope>>,
}
impl Scope {
pub fn new(parent: Option<Self>) -> Self {
Scope { map: HashMap::new(), parent: parent.map(Box::new) }
}
// Assign a value to a given variable.
pub fn assign(&mut self, var: &str, val: Value) {
self.map.insert(String::from(var), val);
}
// Lookup the value associated with a given variable.
pub fn lookup(&self, var: &str) -> Option<&Value>
|
// Returns whether the variable is defined int the scope.
pub fn contains_var(&self, var: &str) -> bool {
self.map.contains_key(var)
}
// Returns number of variables in the scope.
pub fn len(&self) -> usize {
self.map.len()
}
// Returns a reference to the containing scope.
pub fn parent(&self) -> Option<&Box<Scope>> {
self.parent.as_ref()
}
}
|
{
self.map.get(var)
}
|
identifier_body
|
lib.rs
|
// Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate chrono;
extern crate fern;
extern crate jsonrpc_lite;
extern crate languageserver_types as lsp_types;
extern crate serde;
extern crate url;
extern crate xi_core_lib as xi_core;
extern crate xi_plugin_lib;
extern crate xi_rope;
extern crate xi_rpc;
use xi_plugin_lib::mainloop;
use xi_plugin_lib::Plugin;
pub mod conversion_utils;
pub mod language_server_client;
pub mod lsp_plugin;
pub mod parse_helper;
mod result_queue;
pub mod types;
mod utils;
pub use crate::lsp_plugin::LspPlugin;
pub use crate::types::Config;
pub fn
|
<P: Plugin>(plugin: &mut P) {
mainloop(plugin).unwrap();
}
|
start_mainloop
|
identifier_name
|
lib.rs
|
// Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate chrono;
extern crate fern;
extern crate jsonrpc_lite;
extern crate languageserver_types as lsp_types;
extern crate serde;
extern crate url;
extern crate xi_core_lib as xi_core;
extern crate xi_plugin_lib;
extern crate xi_rope;
extern crate xi_rpc;
use xi_plugin_lib::mainloop;
use xi_plugin_lib::Plugin;
pub mod conversion_utils;
pub mod language_server_client;
pub mod lsp_plugin;
pub mod parse_helper;
mod result_queue;
pub mod types;
mod utils;
pub use crate::lsp_plugin::LspPlugin;
|
pub use crate::types::Config;
pub fn start_mainloop<P: Plugin>(plugin: &mut P) {
mainloop(plugin).unwrap();
}
|
random_line_split
|
|
lib.rs
|
// Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate chrono;
extern crate fern;
extern crate jsonrpc_lite;
extern crate languageserver_types as lsp_types;
extern crate serde;
extern crate url;
extern crate xi_core_lib as xi_core;
extern crate xi_plugin_lib;
extern crate xi_rope;
extern crate xi_rpc;
use xi_plugin_lib::mainloop;
use xi_plugin_lib::Plugin;
pub mod conversion_utils;
pub mod language_server_client;
pub mod lsp_plugin;
pub mod parse_helper;
mod result_queue;
pub mod types;
mod utils;
pub use crate::lsp_plugin::LspPlugin;
pub use crate::types::Config;
pub fn start_mainloop<P: Plugin>(plugin: &mut P)
|
{
mainloop(plugin).unwrap();
}
|
identifier_body
|
|
renderer.rs
|
// This file is part of Mooneye GB.
// Copyright (C) 2014-2020 Joonas Javanainen <[email protected]>
//
// Mooneye GB is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Mooneye GB is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Mooneye GB. If not, see <http://www.gnu.org/licenses/>.
use anyhow::Error;
use glium::backend::Facade;
use glium::index::PrimitiveType;
use glium::texture::pixel_buffer::PixelBuffer;
use glium::texture::texture2d::Texture2d;
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
use glium::uniforms::{MagnifySamplerFilter, MinifySamplerFilter};
use glium::{implement_vertex, program, uniform};
use glium::{DrawParameters, IndexBuffer, Program, Surface, VertexBuffer};
use mooneye_gb;
use nalgebra::{Matrix4, Vector4};
type Texture = Texture2d;
#[derive(Copy, Clone)]
pub struct Vertex {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
pub struct Renderer {
vertex_buffer: VertexBuffer<Vertex>,
index_buffer: IndexBuffer<u16>,
pixel_buffer: PixelBuffer<u8>,
program: Program,
texture_even: Texture,
texture_odd: Texture,
matrix: Matrix4<f32>,
palette: Matrix4<f32>,
frame_state: FrameState,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum FrameState {
Even,
Odd,
}
impl FrameState {
fn flip(&mut self) {
*self = match self {
FrameState::Even => FrameState::Odd,
FrameState::Odd => FrameState::Even,
}
}
}
const TEXTURE_WIDTH: u32 = 256;
const TEXTURE_HEIGHT: u32 = 256;
const TEX_OFFSET_X: f32 = mooneye_gb::SCREEN_WIDTH as f32 / TEXTURE_WIDTH as f32;
const TEX_OFFSET_Y: f32 = mooneye_gb::SCREEN_HEIGHT as f32 / TEXTURE_HEIGHT as f32;
fn upload_pixels(texture: &mut Texture, pixel_buffer: &PixelBuffer<u8>)
|
const ASPECT_RATIO: f32 = mooneye_gb::SCREEN_WIDTH as f32 / mooneye_gb::SCREEN_HEIGHT as f32;
fn aspect_ratio_correction(width: u32, height: u32) -> (f32, f32) {
let fb_aspect_ratio = width as f32 / height as f32;
let scale = ASPECT_RATIO / fb_aspect_ratio;
if fb_aspect_ratio >= ASPECT_RATIO {
(scale, 1.0)
} else {
(1.0, 1.0 / scale)
}
}
impl Renderer {
pub fn new<F: Facade>(display: &F) -> Result<Renderer, Error> {
let vertexes = [
Vertex {
position: [-1.0, -1.0],
tex_coords: [0.0, TEX_OFFSET_Y],
},
Vertex {
position: [-1.0, 1.0],
tex_coords: [0.0, 0.0],
},
Vertex {
position: [1.0, 1.0],
tex_coords: [TEX_OFFSET_X, 0.0],
},
Vertex {
position: [1.0, -1.0],
tex_coords: [TEX_OFFSET_X, TEX_OFFSET_Y],
},
];
let vertex_buffer = VertexBuffer::immutable(display, &vertexes)?;
let index_buffer =
IndexBuffer::immutable(display, PrimitiveType::TriangleStrip, &[1u16, 2, 0, 3])?;
let program = program!(
display,
140 => {
vertex: include_str!("shader/vert_140.glsl"),
fragment: include_str!("shader/frag_140.glsl"),
outputs_srgb: true
},
110 => {
vertex: include_str!("shader/vert_110.glsl"),
fragment: include_str!("shader/frag_110.glsl"),
outputs_srgb: true
}
)?;
let pixel_buffer = PixelBuffer::new_empty(
display,
mooneye_gb::SCREEN_WIDTH * mooneye_gb::SCREEN_HEIGHT,
);
pixel_buffer.write(&[0; mooneye_gb::SCREEN_PIXELS]);
let mut texture_even = Texture::empty_with_format(
display,
UncompressedFloatFormat::U8,
MipmapsOption::NoMipmap,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
)?;
let mut texture_odd = Texture::empty_with_format(
display,
UncompressedFloatFormat::U8,
MipmapsOption::NoMipmap,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
)?;
upload_pixels(&mut texture_even, &pixel_buffer);
upload_pixels(&mut texture_odd, &pixel_buffer);
let (width, height) = display.get_context().get_framebuffer_dimensions();
let (x_scale, y_scale) = aspect_ratio_correction(width, height);
let matrix = Matrix4::from_diagonal(&Vector4::new(x_scale, y_scale, 1.0, 1.0));
let palette = Matrix4::new(
255.0, 181.0, 107.0, 33.0, 247.0, 174.0, 105.0, 32.0, 123.0, 74.0, 49.0, 16.0, 1.0, 1.0, 1.0,
1.0,
) / 255.0;
Ok(Renderer {
vertex_buffer,
index_buffer,
pixel_buffer,
program,
texture_even,
texture_odd,
matrix,
palette,
frame_state: FrameState::Even,
})
}
pub fn draw<S: Surface>(&self, frame: &mut S) -> Result<(), Error> {
let matrix: &[[f32; 4]; 4] = self.matrix.as_ref();
let palette: &[[f32; 4]; 4] = self.palette.as_ref();
let (tex_front, tex_back) = match self.frame_state {
FrameState::Even => (&self.texture_even, &self.texture_odd),
FrameState::Odd => (&self.texture_odd, &self.texture_even),
};
let uniforms = uniform! {
matrix: *matrix,
palette: *palette,
tex_front: tex_front.sampled()
.minify_filter(MinifySamplerFilter::Nearest)
.magnify_filter(MagnifySamplerFilter::Nearest),
tex_back: tex_back.sampled()
.minify_filter(MinifySamplerFilter::Nearest)
.magnify_filter(MagnifySamplerFilter::Nearest),
};
let params = DrawParameters {
..Default::default()
};
frame.draw(
&self.vertex_buffer,
&self.index_buffer,
&self.program,
&uniforms,
¶ms,
)?;
Ok(())
}
pub fn update_dimensions<F: Facade>(&mut self, display: &F) {
let (width, height) = display.get_context().get_framebuffer_dimensions();
let (x_scale, y_scale) = aspect_ratio_correction(width, height);
self.matrix.m11 = x_scale;
self.matrix.m22 = y_scale;
}
pub fn update_pixels(&mut self, pixels: &mooneye_gb::ScreenBuffer) {
let mut buffer = [0u8; mooneye_gb::SCREEN_PIXELS];
for idx in 0..mooneye_gb::SCREEN_PIXELS {
buffer[idx] = pixels[idx] as u8;
}
self.pixel_buffer.write(&buffer);
self.frame_state.flip();
let texture = match self.frame_state {
FrameState::Odd => &mut self.texture_odd,
FrameState::Even => &mut self.texture_even,
};
upload_pixels(texture, &self.pixel_buffer);
}
}
|
{
texture.main_level().raw_upload_from_pixel_buffer(
pixel_buffer.as_slice(),
0..mooneye_gb::SCREEN_WIDTH as u32,
0..mooneye_gb::SCREEN_HEIGHT as u32,
0..1,
);
}
|
identifier_body
|
renderer.rs
|
// This file is part of Mooneye GB.
// Copyright (C) 2014-2020 Joonas Javanainen <[email protected]>
//
// Mooneye GB is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Mooneye GB is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Mooneye GB. If not, see <http://www.gnu.org/licenses/>.
use anyhow::Error;
use glium::backend::Facade;
use glium::index::PrimitiveType;
use glium::texture::pixel_buffer::PixelBuffer;
use glium::texture::texture2d::Texture2d;
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
use glium::uniforms::{MagnifySamplerFilter, MinifySamplerFilter};
use glium::{implement_vertex, program, uniform};
use glium::{DrawParameters, IndexBuffer, Program, Surface, VertexBuffer};
use mooneye_gb;
use nalgebra::{Matrix4, Vector4};
type Texture = Texture2d;
#[derive(Copy, Clone)]
pub struct Vertex {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
pub struct Renderer {
vertex_buffer: VertexBuffer<Vertex>,
index_buffer: IndexBuffer<u16>,
pixel_buffer: PixelBuffer<u8>,
program: Program,
texture_even: Texture,
texture_odd: Texture,
matrix: Matrix4<f32>,
palette: Matrix4<f32>,
frame_state: FrameState,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum FrameState {
Even,
Odd,
}
impl FrameState {
fn flip(&mut self) {
*self = match self {
FrameState::Even => FrameState::Odd,
FrameState::Odd => FrameState::Even,
}
}
}
const TEXTURE_WIDTH: u32 = 256;
const TEXTURE_HEIGHT: u32 = 256;
const TEX_OFFSET_X: f32 = mooneye_gb::SCREEN_WIDTH as f32 / TEXTURE_WIDTH as f32;
const TEX_OFFSET_Y: f32 = mooneye_gb::SCREEN_HEIGHT as f32 / TEXTURE_HEIGHT as f32;
fn upload_pixels(texture: &mut Texture, pixel_buffer: &PixelBuffer<u8>) {
texture.main_level().raw_upload_from_pixel_buffer(
pixel_buffer.as_slice(),
0..mooneye_gb::SCREEN_WIDTH as u32,
0..mooneye_gb::SCREEN_HEIGHT as u32,
0..1,
);
}
const ASPECT_RATIO: f32 = mooneye_gb::SCREEN_WIDTH as f32 / mooneye_gb::SCREEN_HEIGHT as f32;
fn aspect_ratio_correction(width: u32, height: u32) -> (f32, f32) {
let fb_aspect_ratio = width as f32 / height as f32;
let scale = ASPECT_RATIO / fb_aspect_ratio;
if fb_aspect_ratio >= ASPECT_RATIO {
(scale, 1.0)
} else {
(1.0, 1.0 / scale)
}
}
impl Renderer {
pub fn new<F: Facade>(display: &F) -> Result<Renderer, Error> {
let vertexes = [
Vertex {
position: [-1.0, -1.0],
tex_coords: [0.0, TEX_OFFSET_Y],
},
Vertex {
position: [-1.0, 1.0],
tex_coords: [0.0, 0.0],
},
Vertex {
position: [1.0, 1.0],
tex_coords: [TEX_OFFSET_X, 0.0],
},
Vertex {
position: [1.0, -1.0],
tex_coords: [TEX_OFFSET_X, TEX_OFFSET_Y],
},
];
let vertex_buffer = VertexBuffer::immutable(display, &vertexes)?;
let index_buffer =
IndexBuffer::immutable(display, PrimitiveType::TriangleStrip, &[1u16, 2, 0, 3])?;
let program = program!(
display,
140 => {
vertex: include_str!("shader/vert_140.glsl"),
fragment: include_str!("shader/frag_140.glsl"),
outputs_srgb: true
},
110 => {
vertex: include_str!("shader/vert_110.glsl"),
fragment: include_str!("shader/frag_110.glsl"),
outputs_srgb: true
}
)?;
let pixel_buffer = PixelBuffer::new_empty(
display,
mooneye_gb::SCREEN_WIDTH * mooneye_gb::SCREEN_HEIGHT,
);
pixel_buffer.write(&[0; mooneye_gb::SCREEN_PIXELS]);
let mut texture_even = Texture::empty_with_format(
display,
UncompressedFloatFormat::U8,
MipmapsOption::NoMipmap,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
)?;
let mut texture_odd = Texture::empty_with_format(
display,
UncompressedFloatFormat::U8,
MipmapsOption::NoMipmap,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
)?;
upload_pixels(&mut texture_even, &pixel_buffer);
upload_pixels(&mut texture_odd, &pixel_buffer);
let (width, height) = display.get_context().get_framebuffer_dimensions();
let (x_scale, y_scale) = aspect_ratio_correction(width, height);
let matrix = Matrix4::from_diagonal(&Vector4::new(x_scale, y_scale, 1.0, 1.0));
let palette = Matrix4::new(
255.0, 181.0, 107.0, 33.0, 247.0, 174.0, 105.0, 32.0, 123.0, 74.0, 49.0, 16.0, 1.0, 1.0, 1.0,
1.0,
) / 255.0;
Ok(Renderer {
vertex_buffer,
index_buffer,
pixel_buffer,
program,
texture_even,
texture_odd,
matrix,
palette,
frame_state: FrameState::Even,
})
}
pub fn draw<S: Surface>(&self, frame: &mut S) -> Result<(), Error> {
let matrix: &[[f32; 4]; 4] = self.matrix.as_ref();
let palette: &[[f32; 4]; 4] = self.palette.as_ref();
let (tex_front, tex_back) = match self.frame_state {
FrameState::Even => (&self.texture_even, &self.texture_odd),
FrameState::Odd => (&self.texture_odd, &self.texture_even),
};
let uniforms = uniform! {
matrix: *matrix,
palette: *palette,
tex_front: tex_front.sampled()
.minify_filter(MinifySamplerFilter::Nearest)
.magnify_filter(MagnifySamplerFilter::Nearest),
tex_back: tex_back.sampled()
.minify_filter(MinifySamplerFilter::Nearest)
.magnify_filter(MagnifySamplerFilter::Nearest),
};
let params = DrawParameters {
..Default::default()
|
frame.draw(
&self.vertex_buffer,
&self.index_buffer,
&self.program,
&uniforms,
¶ms,
)?;
Ok(())
}
pub fn update_dimensions<F: Facade>(&mut self, display: &F) {
let (width, height) = display.get_context().get_framebuffer_dimensions();
let (x_scale, y_scale) = aspect_ratio_correction(width, height);
self.matrix.m11 = x_scale;
self.matrix.m22 = y_scale;
}
pub fn update_pixels(&mut self, pixels: &mooneye_gb::ScreenBuffer) {
let mut buffer = [0u8; mooneye_gb::SCREEN_PIXELS];
for idx in 0..mooneye_gb::SCREEN_PIXELS {
buffer[idx] = pixels[idx] as u8;
}
self.pixel_buffer.write(&buffer);
self.frame_state.flip();
let texture = match self.frame_state {
FrameState::Odd => &mut self.texture_odd,
FrameState::Even => &mut self.texture_even,
};
upload_pixels(texture, &self.pixel_buffer);
}
}
|
};
|
random_line_split
|
renderer.rs
|
// This file is part of Mooneye GB.
// Copyright (C) 2014-2020 Joonas Javanainen <[email protected]>
//
// Mooneye GB is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Mooneye GB is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Mooneye GB. If not, see <http://www.gnu.org/licenses/>.
use anyhow::Error;
use glium::backend::Facade;
use glium::index::PrimitiveType;
use glium::texture::pixel_buffer::PixelBuffer;
use glium::texture::texture2d::Texture2d;
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
use glium::uniforms::{MagnifySamplerFilter, MinifySamplerFilter};
use glium::{implement_vertex, program, uniform};
use glium::{DrawParameters, IndexBuffer, Program, Surface, VertexBuffer};
use mooneye_gb;
use nalgebra::{Matrix4, Vector4};
type Texture = Texture2d;
#[derive(Copy, Clone)]
pub struct Vertex {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
pub struct Renderer {
vertex_buffer: VertexBuffer<Vertex>,
index_buffer: IndexBuffer<u16>,
pixel_buffer: PixelBuffer<u8>,
program: Program,
texture_even: Texture,
texture_odd: Texture,
matrix: Matrix4<f32>,
palette: Matrix4<f32>,
frame_state: FrameState,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum FrameState {
Even,
Odd,
}
impl FrameState {
fn flip(&mut self) {
*self = match self {
FrameState::Even => FrameState::Odd,
FrameState::Odd => FrameState::Even,
}
}
}
const TEXTURE_WIDTH: u32 = 256;
const TEXTURE_HEIGHT: u32 = 256;
const TEX_OFFSET_X: f32 = mooneye_gb::SCREEN_WIDTH as f32 / TEXTURE_WIDTH as f32;
const TEX_OFFSET_Y: f32 = mooneye_gb::SCREEN_HEIGHT as f32 / TEXTURE_HEIGHT as f32;
fn upload_pixels(texture: &mut Texture, pixel_buffer: &PixelBuffer<u8>) {
texture.main_level().raw_upload_from_pixel_buffer(
pixel_buffer.as_slice(),
0..mooneye_gb::SCREEN_WIDTH as u32,
0..mooneye_gb::SCREEN_HEIGHT as u32,
0..1,
);
}
const ASPECT_RATIO: f32 = mooneye_gb::SCREEN_WIDTH as f32 / mooneye_gb::SCREEN_HEIGHT as f32;
fn
|
(width: u32, height: u32) -> (f32, f32) {
let fb_aspect_ratio = width as f32 / height as f32;
let scale = ASPECT_RATIO / fb_aspect_ratio;
if fb_aspect_ratio >= ASPECT_RATIO {
(scale, 1.0)
} else {
(1.0, 1.0 / scale)
}
}
impl Renderer {
pub fn new<F: Facade>(display: &F) -> Result<Renderer, Error> {
let vertexes = [
Vertex {
position: [-1.0, -1.0],
tex_coords: [0.0, TEX_OFFSET_Y],
},
Vertex {
position: [-1.0, 1.0],
tex_coords: [0.0, 0.0],
},
Vertex {
position: [1.0, 1.0],
tex_coords: [TEX_OFFSET_X, 0.0],
},
Vertex {
position: [1.0, -1.0],
tex_coords: [TEX_OFFSET_X, TEX_OFFSET_Y],
},
];
let vertex_buffer = VertexBuffer::immutable(display, &vertexes)?;
let index_buffer =
IndexBuffer::immutable(display, PrimitiveType::TriangleStrip, &[1u16, 2, 0, 3])?;
let program = program!(
display,
140 => {
vertex: include_str!("shader/vert_140.glsl"),
fragment: include_str!("shader/frag_140.glsl"),
outputs_srgb: true
},
110 => {
vertex: include_str!("shader/vert_110.glsl"),
fragment: include_str!("shader/frag_110.glsl"),
outputs_srgb: true
}
)?;
let pixel_buffer = PixelBuffer::new_empty(
display,
mooneye_gb::SCREEN_WIDTH * mooneye_gb::SCREEN_HEIGHT,
);
pixel_buffer.write(&[0; mooneye_gb::SCREEN_PIXELS]);
let mut texture_even = Texture::empty_with_format(
display,
UncompressedFloatFormat::U8,
MipmapsOption::NoMipmap,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
)?;
let mut texture_odd = Texture::empty_with_format(
display,
UncompressedFloatFormat::U8,
MipmapsOption::NoMipmap,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
)?;
upload_pixels(&mut texture_even, &pixel_buffer);
upload_pixels(&mut texture_odd, &pixel_buffer);
let (width, height) = display.get_context().get_framebuffer_dimensions();
let (x_scale, y_scale) = aspect_ratio_correction(width, height);
let matrix = Matrix4::from_diagonal(&Vector4::new(x_scale, y_scale, 1.0, 1.0));
let palette = Matrix4::new(
255.0, 181.0, 107.0, 33.0, 247.0, 174.0, 105.0, 32.0, 123.0, 74.0, 49.0, 16.0, 1.0, 1.0, 1.0,
1.0,
) / 255.0;
Ok(Renderer {
vertex_buffer,
index_buffer,
pixel_buffer,
program,
texture_even,
texture_odd,
matrix,
palette,
frame_state: FrameState::Even,
})
}
pub fn draw<S: Surface>(&self, frame: &mut S) -> Result<(), Error> {
let matrix: &[[f32; 4]; 4] = self.matrix.as_ref();
let palette: &[[f32; 4]; 4] = self.palette.as_ref();
let (tex_front, tex_back) = match self.frame_state {
FrameState::Even => (&self.texture_even, &self.texture_odd),
FrameState::Odd => (&self.texture_odd, &self.texture_even),
};
let uniforms = uniform! {
matrix: *matrix,
palette: *palette,
tex_front: tex_front.sampled()
.minify_filter(MinifySamplerFilter::Nearest)
.magnify_filter(MagnifySamplerFilter::Nearest),
tex_back: tex_back.sampled()
.minify_filter(MinifySamplerFilter::Nearest)
.magnify_filter(MagnifySamplerFilter::Nearest),
};
let params = DrawParameters {
..Default::default()
};
frame.draw(
&self.vertex_buffer,
&self.index_buffer,
&self.program,
&uniforms,
¶ms,
)?;
Ok(())
}
pub fn update_dimensions<F: Facade>(&mut self, display: &F) {
let (width, height) = display.get_context().get_framebuffer_dimensions();
let (x_scale, y_scale) = aspect_ratio_correction(width, height);
self.matrix.m11 = x_scale;
self.matrix.m22 = y_scale;
}
pub fn update_pixels(&mut self, pixels: &mooneye_gb::ScreenBuffer) {
let mut buffer = [0u8; mooneye_gb::SCREEN_PIXELS];
for idx in 0..mooneye_gb::SCREEN_PIXELS {
buffer[idx] = pixels[idx] as u8;
}
self.pixel_buffer.write(&buffer);
self.frame_state.flip();
let texture = match self.frame_state {
FrameState::Odd => &mut self.texture_odd,
FrameState::Even => &mut self.texture_even,
};
upload_pixels(texture, &self.pixel_buffer);
}
}
|
aspect_ratio_correction
|
identifier_name
|
issue-3979.rs
|
// xfail-test
// Reason: ICE with explicit self
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Positioned {
fn SetX(&mut self, int);
fn X(&self) -> int;
}
trait Movable: Positioned {
fn translate(&mut self, dx: int) {
let x = self.X();
self.SetX(x + dx);
}
}
struct
|
{ x: int, y: int }
impl Positioned for Point {
fn SetX(&mut self, x: int) {
self.x = x;
}
fn X(&self) -> int {
self.x
}
}
impl Movable for Point;
pub fn main() {
let mut p = Point{ x: 1, y: 2};
p.translate(3);
assert_eq!(p.X(), 4);
}
|
Point
|
identifier_name
|
issue-3979.rs
|
// xfail-test
// Reason: ICE with explicit self
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Positioned {
fn SetX(&mut self, int);
fn X(&self) -> int;
}
trait Movable: Positioned {
fn translate(&mut self, dx: int) {
let x = self.X();
self.SetX(x + dx);
}
}
struct Point { x: int, y: int }
impl Positioned for Point {
fn SetX(&mut self, x: int) {
self.x = x;
}
fn X(&self) -> int {
self.x
}
}
impl Movable for Point;
pub fn main()
|
{
let mut p = Point{ x: 1, y: 2};
p.translate(3);
assert_eq!(p.X(), 4);
}
|
identifier_body
|
|
issue-3979.rs
|
// xfail-test
// Reason: ICE with explicit self
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Positioned {
fn SetX(&mut self, int);
fn X(&self) -> int;
}
trait Movable: Positioned {
fn translate(&mut self, dx: int) {
let x = self.X();
self.SetX(x + dx);
}
}
struct Point { x: int, y: int }
impl Positioned for Point {
fn SetX(&mut self, x: int) {
self.x = x;
}
fn X(&self) -> int {
self.x
}
}
impl Movable for Point;
|
pub fn main() {
let mut p = Point{ x: 1, y: 2};
p.translate(3);
assert_eq!(p.X(), 4);
}
|
random_line_split
|
|
import-glob-circular.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: unresolved
mod circ1 {
pub use circ2::f2;
pub fn f1()
|
pub fn common() -> uint { return 0u; }
}
mod circ2 {
pub use circ1::f1;
pub fn f2() { info!("f2"); }
pub fn common() -> uint { return 1u; }
}
mod test {
use circ1::*;
fn test() { f1066(); }
}
|
{ info!("f1"); }
|
identifier_body
|
import-glob-circular.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
mod circ1 {
pub use circ2::f2;
pub fn f1() { info!("f1"); }
pub fn common() -> uint { return 0u; }
}
mod circ2 {
pub use circ1::f1;
pub fn f2() { info!("f2"); }
pub fn common() -> uint { return 1u; }
}
mod test {
use circ1::*;
fn test() { f1066(); }
}
|
// error-pattern: unresolved
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.