file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs | //! A Rust library for allocation-limited computation of the Discrete Cosine Transform.
//!
//! 1D DCTs are allocation-free but 2D requires allocation.
//!
//! Features:
//!
//! * `simd`: use SIMD types to speed computation (2D DCT only)
//! * `cos-approx`: use a Taylor series approximation of cosine instead of the stdlib
//! implementation (which is usually much slower but also higher precision)
use std::f64::consts::{PI, SQRT_2};
use std::ops::Range;
/// An allocation-free one-dimensional Discrete Cosine Transform.
///
/// Each iteration produces the next DCT value in the sequence.
#[derive(Clone, Debug)]
pub struct DCT1D<'a> {
data: &'a [f64],
curr: Range<usize>,
}
impl<'a> DCT1D<'a> {
/// Create a new DCT 1D adaptor from a 1D vector of data.
pub fn new(data: &[f64]) -> DCT1D {
let curr = 0 .. data.len();
DCT1D {
data: data,
curr: curr,
}
}
// Converted from the C implementation here:
// http://unix4lyfe.org/dct/listing2.c
// Source page:
// http://unix4lyfe.org/dct/ (Accessed 8/10/2014)
fn next_dct_val(&mut self) -> Option<f64> {
self.curr.next().map(|u| {
let mut z = 0.0;
let data_len = self.data.len();
for (x_idx, &x) in self.data.iter().enumerate() {
z += x * cos(
PI * u as f64 * (2 * x_idx + 1) as f64
/ (2 * data_len) as f64
);
}
if u == 0 {
z *= 1.0 / SQRT_2;
}
z / 2.0
})
}
}
impl<'a> Iterator for DCT1D<'a> {
type Item = f64;
fn next(&mut self) -> Option<f64> {
self.next_dct_val()
}
}
/// An implementation of cosine that switches to a Taylor-series approximation when throughput is
/// preferred over precision.
#[inline(always)]
pub fn cos(x: f64) -> f64 {
// This branch should be optimized out.
if cfg!(feature = "cos-approx") {
// Normalize to [0, pi] or else the Taylor series spits out very wrong results.
let x = (x.abs() + PI) % (2.0 * PI) - PI;
// Approximate the cosine of `val` using a 4-term Taylor series.
// Can be expanded for higher precision.
let x2 = x.powi(2);
let x4 = x.powi(4);
let x6 = x.powi(6);
let x8 = x.powi(8);
1.0 - (x2 / 2.0) + (x4 / 24.0) - (x6 / 720.0) + (x8 / 40320.0)
} else {
x.cos()
}
}
/// Perform a 2D DCT on a 1D-packed vector with a given rowstride.
///
/// E.g. a vector of length 9 with a rowstride of 3 will be processed as a 3x3 matrix.
///
/// Returns a vector of the same size packed in the same way.
pub fn dct_2d(packed_2d: &[f64], rowstride: usize) -> Vec<f64> {
assert_eq!(packed_2d.len() % rowstride, 0);
let mut row_dct: Vec<f64> = packed_2d
.chunks(rowstride)
.flat_map(DCT1D::new)
.collect();
swap_rows_columns(&mut row_dct, rowstride);
let mut column_dct: Vec<f64> = packed_2d
.chunks(rowstride)
.flat_map(DCT1D::new)
.collect();
swap_rows_columns(&mut column_dct, rowstride);
column_dct
}
fn swap_rows_columns(data: &mut [f64], rowstride: usize) {
let height = data.len() / rowstride;
for y in 0 .. height {
for x in 0 .. rowstride {
data.swap(y * rowstride + x, x * rowstride + y);
}
}
}
#[cfg_attr(all(test, feature = "cos-approx"), test)]
#[cfg_attr(not(all(test, feature = "cos-approx")), allow(dead_code))]
fn test_cos_approx() {
const ERROR: f64 = 0.05;
fn test_cos_approx(x: f64) {
let approx = cos(x);
let cos = x.cos();
assert!(
approx.abs_sub(x.cos()) <= ERROR,
"Approximation cos({x}) = {approx} was outside a tolerance of {error}; control value: {cos}",
x = x, approx = approx, error = ERROR, cos = cos,
);
}
let test_values = [PI, PI / 2.0, PI / 4.0, 1.0, -1.0, 2.0 * PI, 3.0 * PI, 4.0 / 3.0 * PI];
for &x in &test_values {
test_cos_approx(x);
test_cos_approx(-x);
}
}
/*
#[cfg(feature = "simd")]
mod dct_simd {
use simdty::f64x2;
use std::f64::consts::{PI, SQRT_2};
macro_rules! valx2 ( ($val:expr) => ( ::simdty::f64x2($val, $val) ) );
const PI: f64x2 = valx2!(PI);
const ONE_DIV_SQRT_2: f64x2 = valx2!(1 / SQRT_2);
const SQRT_2: f64x2 = valx2!(SQRT_2);
pub dct_rows(vals: &[Vec<f64>]) -> Vec<Vec<f64>> {
let mut out = Vec::with_capacity(vals.len());
for pair in vals.iter().chunks(2) {
if pair.len() == 2 {
let vals = pair[0].iter().cloned().zip(pair[1].iter().cloned()) |
}
}
fn dct_1dx2(vec: Vec<f64x2>) -> Vec<f64x2> {
let mut out = Vec::with_capacity(vec.len());
for u in 0 .. vec.len() {
let mut z = valx2!(0.0);
for x in 0 .. vec.len() {
z += vec[x] * cos_approx(
PI * valx2!(
u as f64 * (2 * x + 1) as f64
/ (2 * vec.len()) as f64
)
);
}
if u == 0 {
z *= ONE_DIV_SQRT_2;
}
out.insert(u, z / valx2!(2.0));
}
out
}
fn cos_approx(x2: f64x2) -> f64x2 {
#[inline(always)]
fn powi(val: f64x2, pow: i32) -> f64x2 {
unsafe { llvmint::powi_v2f64(val, pow) }
}
let x2 = powi(val, 2);
let x4 = powi(val, 4);
let x6 = powi(val, 6);
let x8 = powi(val, 8);
valx2!(1.0) - (x2 / valx2!(2.0)) + (x4 / valx2!(24.0))
- (x6 / valx2!(720.0)) + (x8 / valx2!(40320.0))
}
}
*/ | .map(f64x2)
.collect();
dct_1dx2(vals); | random_line_split |
lib.rs | //! A Rust library for allocation-limited computation of the Discrete Cosine Transform.
//!
//! 1D DCTs are allocation-free but 2D requires allocation.
//!
//! Features:
//!
//! * `simd`: use SIMD types to speed computation (2D DCT only)
//! * `cos-approx`: use a Taylor series approximation of cosine instead of the stdlib
//! implementation (which is usually much slower but also higher precision)
use std::f64::consts::{PI, SQRT_2};
use std::ops::Range;
/// An allocation-free one-dimensional Discrete Cosine Transform.
///
/// Each iteration produces the next DCT value in the sequence.
#[derive(Clone, Debug)]
pub struct DCT1D<'a> {
data: &'a [f64],
curr: Range<usize>,
}
impl<'a> DCT1D<'a> {
/// Create a new DCT 1D adaptor from a 1D vector of data.
pub fn new(data: &[f64]) -> DCT1D {
let curr = 0 .. data.len();
DCT1D {
data: data,
curr: curr,
}
}
// Converted from the C implementation here:
// http://unix4lyfe.org/dct/listing2.c
// Source page:
// http://unix4lyfe.org/dct/ (Accessed 8/10/2014)
fn next_dct_val(&mut self) -> Option<f64> {
self.curr.next().map(|u| {
let mut z = 0.0;
let data_len = self.data.len();
for (x_idx, &x) in self.data.iter().enumerate() {
z += x * cos(
PI * u as f64 * (2 * x_idx + 1) as f64
/ (2 * data_len) as f64
);
}
if u == 0 {
z *= 1.0 / SQRT_2;
}
z / 2.0
})
}
}
impl<'a> Iterator for DCT1D<'a> {
type Item = f64;
fn next(&mut self) -> Option<f64> {
self.next_dct_val()
}
}
/// An implementation of cosine that switches to a Taylor-series approximation when throughput is
/// preferred over precision.
#[inline(always)]
pub fn cos(x: f64) -> f64 {
// This branch should be optimized out.
if cfg!(feature = "cos-approx") {
// Normalize to [0, pi] or else the Taylor series spits out very wrong results.
let x = (x.abs() + PI) % (2.0 * PI) - PI;
// Approximate the cosine of `val` using a 4-term Taylor series.
// Can be expanded for higher precision.
let x2 = x.powi(2);
let x4 = x.powi(4);
let x6 = x.powi(6);
let x8 = x.powi(8);
1.0 - (x2 / 2.0) + (x4 / 24.0) - (x6 / 720.0) + (x8 / 40320.0)
} else {
x.cos()
}
}
/// Perform a 2D DCT on a 1D-packed vector with a given rowstride.
///
/// E.g. a vector of length 9 with a rowstride of 3 will be processed as a 3x3 matrix.
///
/// Returns a vector of the same size packed in the same way.
pub fn dct_2d(packed_2d: &[f64], rowstride: usize) -> Vec<f64> {
assert_eq!(packed_2d.len() % rowstride, 0);
let mut row_dct: Vec<f64> = packed_2d
.chunks(rowstride)
.flat_map(DCT1D::new)
.collect();
swap_rows_columns(&mut row_dct, rowstride);
let mut column_dct: Vec<f64> = packed_2d
.chunks(rowstride)
.flat_map(DCT1D::new)
.collect();
swap_rows_columns(&mut column_dct, rowstride);
column_dct
}
fn swap_rows_columns(data: &mut [f64], rowstride: usize) {
let height = data.len() / rowstride;
for y in 0 .. height {
for x in 0 .. rowstride {
data.swap(y * rowstride + x, x * rowstride + y);
}
}
}
#[cfg_attr(all(test, feature = "cos-approx"), test)]
#[cfg_attr(not(all(test, feature = "cos-approx")), allow(dead_code))]
fn test_cos_approx() {
const ERROR: f64 = 0.05;
fn test_cos_approx(x: f64) |
let test_values = [PI, PI / 2.0, PI / 4.0, 1.0, -1.0, 2.0 * PI, 3.0 * PI, 4.0 / 3.0 * PI];
for &x in &test_values {
test_cos_approx(x);
test_cos_approx(-x);
}
}
/*
#[cfg(feature = "simd")]
mod dct_simd {
use simdty::f64x2;
use std::f64::consts::{PI, SQRT_2};
macro_rules! valx2 ( ($val:expr) => ( ::simdty::f64x2($val, $val) ) );
const PI: f64x2 = valx2!(PI);
const ONE_DIV_SQRT_2: f64x2 = valx2!(1 / SQRT_2);
const SQRT_2: f64x2 = valx2!(SQRT_2);
pub dct_rows(vals: &[Vec<f64>]) -> Vec<Vec<f64>> {
let mut out = Vec::with_capacity(vals.len());
for pair in vals.iter().chunks(2) {
if pair.len() == 2 {
let vals = pair[0].iter().cloned().zip(pair[1].iter().cloned())
.map(f64x2)
.collect();
dct_1dx2(vals);
}
}
fn dct_1dx2(vec: Vec<f64x2>) -> Vec<f64x2> {
let mut out = Vec::with_capacity(vec.len());
for u in 0 .. vec.len() {
let mut z = valx2!(0.0);
for x in 0 .. vec.len() {
z += vec[x] * cos_approx(
PI * valx2!(
u as f64 * (2 * x + 1) as f64
/ (2 * vec.len()) as f64
)
);
}
if u == 0 {
z *= ONE_DIV_SQRT_2;
}
out.insert(u, z / valx2!(2.0));
}
out
}
fn cos_approx(x2: f64x2) -> f64x2 {
#[inline(always)]
fn powi(val: f64x2, pow: i32) -> f64x2 {
unsafe { llvmint::powi_v2f64(val, pow) }
}
let x2 = powi(val, 2);
let x4 = powi(val, 4);
let x6 = powi(val, 6);
let x8 = powi(val, 8);
valx2!(1.0) - (x2 / valx2!(2.0)) + (x4 / valx2!(24.0))
- (x6 / valx2!(720.0)) + (x8 / valx2!(40320.0))
}
}
*/
| {
let approx = cos(x);
let cos = x.cos();
assert!(
approx.abs_sub(x.cos()) <= ERROR,
"Approximation cos({x}) = {approx} was outside a tolerance of {error}; control value: {cos}",
x = x, approx = approx, error = ERROR, cos = cos,
);
} | identifier_body |
lib.rs | //! A Rust library for allocation-limited computation of the Discrete Cosine Transform.
//!
//! 1D DCTs are allocation-free but 2D requires allocation.
//!
//! Features:
//!
//! * `simd`: use SIMD types to speed computation (2D DCT only)
//! * `cos-approx`: use a Taylor series approximation of cosine instead of the stdlib
//! implementation (which is usually much slower but also higher precision)
use std::f64::consts::{PI, SQRT_2};
use std::ops::Range;
/// An allocation-free one-dimensional Discrete Cosine Transform.
///
/// Each iteration produces the next DCT value in the sequence.
#[derive(Clone, Debug)]
pub struct DCT1D<'a> {
data: &'a [f64],
curr: Range<usize>,
}
impl<'a> DCT1D<'a> {
/// Create a new DCT 1D adaptor from a 1D vector of data.
pub fn new(data: &[f64]) -> DCT1D {
let curr = 0 .. data.len();
DCT1D {
data: data,
curr: curr,
}
}
// Converted from the C implementation here:
// http://unix4lyfe.org/dct/listing2.c
// Source page:
// http://unix4lyfe.org/dct/ (Accessed 8/10/2014)
fn next_dct_val(&mut self) -> Option<f64> {
self.curr.next().map(|u| {
let mut z = 0.0;
let data_len = self.data.len();
for (x_idx, &x) in self.data.iter().enumerate() {
z += x * cos(
PI * u as f64 * (2 * x_idx + 1) as f64
/ (2 * data_len) as f64
);
}
if u == 0 {
z *= 1.0 / SQRT_2;
}
z / 2.0
})
}
}
impl<'a> Iterator for DCT1D<'a> {
type Item = f64;
fn next(&mut self) -> Option<f64> {
self.next_dct_val()
}
}
/// An implementation of cosine that switches to a Taylor-series approximation when throughput is
/// preferred over precision.
#[inline(always)]
pub fn cos(x: f64) -> f64 {
// This branch should be optimized out.
if cfg!(feature = "cos-approx") | else {
x.cos()
}
}
/// Perform a 2D DCT on a 1D-packed vector with a given rowstride.
///
/// E.g. a vector of length 9 with a rowstride of 3 will be processed as a 3x3 matrix.
///
/// Returns a vector of the same size packed in the same way.
pub fn dct_2d(packed_2d: &[f64], rowstride: usize) -> Vec<f64> {
assert_eq!(packed_2d.len() % rowstride, 0);
let mut row_dct: Vec<f64> = packed_2d
.chunks(rowstride)
.flat_map(DCT1D::new)
.collect();
swap_rows_columns(&mut row_dct, rowstride);
let mut column_dct: Vec<f64> = packed_2d
.chunks(rowstride)
.flat_map(DCT1D::new)
.collect();
swap_rows_columns(&mut column_dct, rowstride);
column_dct
}
fn swap_rows_columns(data: &mut [f64], rowstride: usize) {
let height = data.len() / rowstride;
for y in 0 .. height {
for x in 0 .. rowstride {
data.swap(y * rowstride + x, x * rowstride + y);
}
}
}
#[cfg_attr(all(test, feature = "cos-approx"), test)]
#[cfg_attr(not(all(test, feature = "cos-approx")), allow(dead_code))]
fn test_cos_approx() {
const ERROR: f64 = 0.05;
fn test_cos_approx(x: f64) {
let approx = cos(x);
let cos = x.cos();
assert!(
approx.abs_sub(x.cos()) <= ERROR,
"Approximation cos({x}) = {approx} was outside a tolerance of {error}; control value: {cos}",
x = x, approx = approx, error = ERROR, cos = cos,
);
}
let test_values = [PI, PI / 2.0, PI / 4.0, 1.0, -1.0, 2.0 * PI, 3.0 * PI, 4.0 / 3.0 * PI];
for &x in &test_values {
test_cos_approx(x);
test_cos_approx(-x);
}
}
/*
#[cfg(feature = "simd")]
mod dct_simd {
use simdty::f64x2;
use std::f64::consts::{PI, SQRT_2};
macro_rules! valx2 ( ($val:expr) => ( ::simdty::f64x2($val, $val) ) );
const PI: f64x2 = valx2!(PI);
const ONE_DIV_SQRT_2: f64x2 = valx2!(1 / SQRT_2);
const SQRT_2: f64x2 = valx2!(SQRT_2);
pub dct_rows(vals: &[Vec<f64>]) -> Vec<Vec<f64>> {
let mut out = Vec::with_capacity(vals.len());
for pair in vals.iter().chunks(2) {
if pair.len() == 2 {
let vals = pair[0].iter().cloned().zip(pair[1].iter().cloned())
.map(f64x2)
.collect();
dct_1dx2(vals);
}
}
fn dct_1dx2(vec: Vec<f64x2>) -> Vec<f64x2> {
let mut out = Vec::with_capacity(vec.len());
for u in 0 .. vec.len() {
let mut z = valx2!(0.0);
for x in 0 .. vec.len() {
z += vec[x] * cos_approx(
PI * valx2!(
u as f64 * (2 * x + 1) as f64
/ (2 * vec.len()) as f64
)
);
}
if u == 0 {
z *= ONE_DIV_SQRT_2;
}
out.insert(u, z / valx2!(2.0));
}
out
}
fn cos_approx(x2: f64x2) -> f64x2 {
#[inline(always)]
fn powi(val: f64x2, pow: i32) -> f64x2 {
unsafe { llvmint::powi_v2f64(val, pow) }
}
let x2 = powi(val, 2);
let x4 = powi(val, 4);
let x6 = powi(val, 6);
let x8 = powi(val, 8);
valx2!(1.0) - (x2 / valx2!(2.0)) + (x4 / valx2!(24.0))
- (x6 / valx2!(720.0)) + (x8 / valx2!(40320.0))
}
}
*/
| {
// Normalize to [0, pi] or else the Taylor series spits out very wrong results.
let x = (x.abs() + PI) % (2.0 * PI) - PI;
// Approximate the cosine of `val` using a 4-term Taylor series.
// Can be expanded for higher precision.
let x2 = x.powi(2);
let x4 = x.powi(4);
let x6 = x.powi(6);
let x8 = x.powi(8);
1.0 - (x2 / 2.0) + (x4 / 24.0) - (x6 / 720.0) + (x8 / 40320.0)
} | conditional_block |
lib.rs | //! A Rust library for allocation-limited computation of the Discrete Cosine Transform.
//!
//! 1D DCTs are allocation-free but 2D requires allocation.
//!
//! Features:
//!
//! * `simd`: use SIMD types to speed computation (2D DCT only)
//! * `cos-approx`: use a Taylor series approximation of cosine instead of the stdlib
//! implementation (which is usually much slower but also higher precision)
use std::f64::consts::{PI, SQRT_2};
use std::ops::Range;
/// An allocation-free one-dimensional Discrete Cosine Transform.
///
/// Each iteration produces the next DCT value in the sequence.
#[derive(Clone, Debug)]
pub struct DCT1D<'a> {
data: &'a [f64],
curr: Range<usize>,
}
impl<'a> DCT1D<'a> {
/// Create a new DCT 1D adaptor from a 1D vector of data.
pub fn new(data: &[f64]) -> DCT1D {
let curr = 0 .. data.len();
DCT1D {
data: data,
curr: curr,
}
}
// Converted from the C implementation here:
// http://unix4lyfe.org/dct/listing2.c
// Source page:
// http://unix4lyfe.org/dct/ (Accessed 8/10/2014)
fn next_dct_val(&mut self) -> Option<f64> {
self.curr.next().map(|u| {
let mut z = 0.0;
let data_len = self.data.len();
for (x_idx, &x) in self.data.iter().enumerate() {
z += x * cos(
PI * u as f64 * (2 * x_idx + 1) as f64
/ (2 * data_len) as f64
);
}
if u == 0 {
z *= 1.0 / SQRT_2;
}
z / 2.0
})
}
}
impl<'a> Iterator for DCT1D<'a> {
type Item = f64;
fn next(&mut self) -> Option<f64> {
self.next_dct_val()
}
}
/// An implementation of cosine that switches to a Taylor-series approximation when throughput is
/// preferred over precision.
#[inline(always)]
pub fn cos(x: f64) -> f64 {
// This branch should be optimized out.
if cfg!(feature = "cos-approx") {
// Normalize to [0, pi] or else the Taylor series spits out very wrong results.
let x = (x.abs() + PI) % (2.0 * PI) - PI;
// Approximate the cosine of `val` using a 4-term Taylor series.
// Can be expanded for higher precision.
let x2 = x.powi(2);
let x4 = x.powi(4);
let x6 = x.powi(6);
let x8 = x.powi(8);
1.0 - (x2 / 2.0) + (x4 / 24.0) - (x6 / 720.0) + (x8 / 40320.0)
} else {
x.cos()
}
}
/// Perform a 2D DCT on a 1D-packed vector with a given rowstride.
///
/// E.g. a vector of length 9 with a rowstride of 3 will be processed as a 3x3 matrix.
///
/// Returns a vector of the same size packed in the same way.
pub fn | (packed_2d: &[f64], rowstride: usize) -> Vec<f64> {
assert_eq!(packed_2d.len() % rowstride, 0);
let mut row_dct: Vec<f64> = packed_2d
.chunks(rowstride)
.flat_map(DCT1D::new)
.collect();
swap_rows_columns(&mut row_dct, rowstride);
let mut column_dct: Vec<f64> = packed_2d
.chunks(rowstride)
.flat_map(DCT1D::new)
.collect();
swap_rows_columns(&mut column_dct, rowstride);
column_dct
}
fn swap_rows_columns(data: &mut [f64], rowstride: usize) {
let height = data.len() / rowstride;
for y in 0 .. height {
for x in 0 .. rowstride {
data.swap(y * rowstride + x, x * rowstride + y);
}
}
}
#[cfg_attr(all(test, feature = "cos-approx"), test)]
#[cfg_attr(not(all(test, feature = "cos-approx")), allow(dead_code))]
fn test_cos_approx() {
const ERROR: f64 = 0.05;
fn test_cos_approx(x: f64) {
let approx = cos(x);
let cos = x.cos();
assert!(
approx.abs_sub(x.cos()) <= ERROR,
"Approximation cos({x}) = {approx} was outside a tolerance of {error}; control value: {cos}",
x = x, approx = approx, error = ERROR, cos = cos,
);
}
let test_values = [PI, PI / 2.0, PI / 4.0, 1.0, -1.0, 2.0 * PI, 3.0 * PI, 4.0 / 3.0 * PI];
for &x in &test_values {
test_cos_approx(x);
test_cos_approx(-x);
}
}
/*
#[cfg(feature = "simd")]
mod dct_simd {
use simdty::f64x2;
use std::f64::consts::{PI, SQRT_2};
macro_rules! valx2 ( ($val:expr) => ( ::simdty::f64x2($val, $val) ) );
const PI: f64x2 = valx2!(PI);
const ONE_DIV_SQRT_2: f64x2 = valx2!(1 / SQRT_2);
const SQRT_2: f64x2 = valx2!(SQRT_2);
pub dct_rows(vals: &[Vec<f64>]) -> Vec<Vec<f64>> {
let mut out = Vec::with_capacity(vals.len());
for pair in vals.iter().chunks(2) {
if pair.len() == 2 {
let vals = pair[0].iter().cloned().zip(pair[1].iter().cloned())
.map(f64x2)
.collect();
dct_1dx2(vals);
}
}
fn dct_1dx2(vec: Vec<f64x2>) -> Vec<f64x2> {
let mut out = Vec::with_capacity(vec.len());
for u in 0 .. vec.len() {
let mut z = valx2!(0.0);
for x in 0 .. vec.len() {
z += vec[x] * cos_approx(
PI * valx2!(
u as f64 * (2 * x + 1) as f64
/ (2 * vec.len()) as f64
)
);
}
if u == 0 {
z *= ONE_DIV_SQRT_2;
}
out.insert(u, z / valx2!(2.0));
}
out
}
fn cos_approx(x2: f64x2) -> f64x2 {
#[inline(always)]
fn powi(val: f64x2, pow: i32) -> f64x2 {
unsafe { llvmint::powi_v2f64(val, pow) }
}
let x2 = powi(val, 2);
let x4 = powi(val, 4);
let x6 = powi(val, 6);
let x8 = powi(val, 8);
valx2!(1.0) - (x2 / valx2!(2.0)) + (x4 / valx2!(24.0))
- (x6 / valx2!(720.0)) + (x8 / valx2!(40320.0))
}
}
*/
| dct_2d | identifier_name |
constants.rs | // The MIT License (MIT)
// Copyright © 2014-2018 Miguel Peláez <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
// files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy,
// modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/// Fancy logo
pub const ASCII_ART: &str = r"
_ _ _ __ _
| | (_) |_ ___ __ _ _ __ _ / _| |_
| |__| | _/ -_) _| '_/ _` | _| _|
|____|_|\__\___\__|_| \__,_|_| \__|
";
/// Config file to lookup
pub const CONFIG_FILE: &str = "litecraft.yml";
/// Client version
pub const LITECRAFT_VERSION: &str = "A1";
/// Compatible Minecraft server version
pub const MINECRAFT_VERSION: &str = "1.13.1";
| pub const VERSION_TEXT: &str = "Litecraft A1\nMinecraft 1.13.1"; | /// Debug version string | random_line_split |
stash.py | # coding: utf-8
import time
import sublime
from sublime_plugin import WindowCommand
from .util import noop
from .cmd import GitCmd
from .helpers import GitStashHelper, GitStatusHelper, GitErrorHelper
class GitStashWindowCmd(GitCmd, GitStashHelper, GitErrorHelper):
def pop_or_apply_from_panel(self, action):
repo = self.get_repo()
if not repo:
return
stashes = self.get_stashes(repo)
| if not stashes:
return sublime.error_message('No stashes. Use the Git: Stash command to stash changes')
callback = self.pop_or_apply_callback(repo, action, stashes)
panel = []
for name, title in stashes:
panel.append([title, "stash@{%s}" % name])
self.window.show_quick_panel(panel, callback)
def pop_or_apply_callback(self, repo, action, stashes):
def inner(choice):
if choice != -1:
name, _ = stashes[choice]
exit_code, stdout, stderr = self.git(['stash', action, '-q', 'stash@{%s}' % name], cwd=repo)
if exit_code != 0:
sublime.error_message(self.format_error_message(stderr))
window = sublime.active_window()
if window:
window.run_command('git_status', {'refresh_only': True})
return inner
class GitStashCommand(WindowCommand, GitCmd, GitStatusHelper):
"""
Documentation coming soon.
"""
def run(self, untracked=False):
repo = self.get_repo()
if not repo:
return
def on_done(title):
title = title.strip()
self.git(['stash', 'save', '--include-untracked' if untracked else None, '--', title], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
# update the index
self.git_exit_code(['update-index', '--refresh'], cwd=repo)
# get files status
untracked_files, unstaged_files, _ = self.get_files_status(repo)
# check for if there's something to stash
if not unstaged_files:
if (untracked and not untracked_files) or (not untracked):
return sublime.error_message("No local changes to save")
self.window.show_input_panel('Stash title:', '', on_done, noop, noop)
class GitSnapshotCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
snapshot = time.strftime("Snapshot at %Y-%m-%d %H:%M:%S")
self.git(['stash', 'save', '--', snapshot], cwd=repo)
self.git(['stash', 'apply', '-q', 'stash@{0}'], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
class GitStashPopCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('pop')
class GitStashApplyCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('apply') | random_line_split |
|
stash.py | # coding: utf-8
import time
import sublime
from sublime_plugin import WindowCommand
from .util import noop
from .cmd import GitCmd
from .helpers import GitStashHelper, GitStatusHelper, GitErrorHelper
class GitStashWindowCmd(GitCmd, GitStashHelper, GitErrorHelper):
def pop_or_apply_from_panel(self, action):
repo = self.get_repo()
if not repo:
return
stashes = self.get_stashes(repo)
if not stashes:
return sublime.error_message('No stashes. Use the Git: Stash command to stash changes')
callback = self.pop_or_apply_callback(repo, action, stashes)
panel = []
for name, title in stashes:
panel.append([title, "stash@{%s}" % name])
self.window.show_quick_panel(panel, callback)
def pop_or_apply_callback(self, repo, action, stashes):
def inner(choice):
if choice != -1:
name, _ = stashes[choice]
exit_code, stdout, stderr = self.git(['stash', action, '-q', 'stash@{%s}' % name], cwd=repo)
if exit_code != 0:
sublime.error_message(self.format_error_message(stderr))
window = sublime.active_window()
if window:
window.run_command('git_status', {'refresh_only': True})
return inner
class GitStashCommand(WindowCommand, GitCmd, GitStatusHelper):
"""
Documentation coming soon.
"""
def run(self, untracked=False):
repo = self.get_repo()
if not repo:
return
def on_done(title):
title = title.strip()
self.git(['stash', 'save', '--include-untracked' if untracked else None, '--', title], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
# update the index
self.git_exit_code(['update-index', '--refresh'], cwd=repo)
# get files status
untracked_files, unstaged_files, _ = self.get_files_status(repo)
# check for if there's something to stash
if not unstaged_files:
if (untracked and not untracked_files) or (not untracked):
return sublime.error_message("No local changes to save")
self.window.show_input_panel('Stash title:', '', on_done, noop, noop)
class GitSnapshotCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
repo = self.get_repo()
if not repo:
|
snapshot = time.strftime("Snapshot at %Y-%m-%d %H:%M:%S")
self.git(['stash', 'save', '--', snapshot], cwd=repo)
self.git(['stash', 'apply', '-q', 'stash@{0}'], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
class GitStashPopCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('pop')
class GitStashApplyCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('apply')
| return | conditional_block |
stash.py | # coding: utf-8
import time
import sublime
from sublime_plugin import WindowCommand
from .util import noop
from .cmd import GitCmd
from .helpers import GitStashHelper, GitStatusHelper, GitErrorHelper
class GitStashWindowCmd(GitCmd, GitStashHelper, GitErrorHelper):
def pop_or_apply_from_panel(self, action):
repo = self.get_repo()
if not repo:
return
stashes = self.get_stashes(repo)
if not stashes:
return sublime.error_message('No stashes. Use the Git: Stash command to stash changes')
callback = self.pop_or_apply_callback(repo, action, stashes)
panel = []
for name, title in stashes:
panel.append([title, "stash@{%s}" % name])
self.window.show_quick_panel(panel, callback)
def pop_or_apply_callback(self, repo, action, stashes):
|
class GitStashCommand(WindowCommand, GitCmd, GitStatusHelper):
"""
Documentation coming soon.
"""
def run(self, untracked=False):
repo = self.get_repo()
if not repo:
return
def on_done(title):
title = title.strip()
self.git(['stash', 'save', '--include-untracked' if untracked else None, '--', title], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
# update the index
self.git_exit_code(['update-index', '--refresh'], cwd=repo)
# get files status
untracked_files, unstaged_files, _ = self.get_files_status(repo)
# check for if there's something to stash
if not unstaged_files:
if (untracked and not untracked_files) or (not untracked):
return sublime.error_message("No local changes to save")
self.window.show_input_panel('Stash title:', '', on_done, noop, noop)
class GitSnapshotCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
snapshot = time.strftime("Snapshot at %Y-%m-%d %H:%M:%S")
self.git(['stash', 'save', '--', snapshot], cwd=repo)
self.git(['stash', 'apply', '-q', 'stash@{0}'], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
class GitStashPopCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('pop')
class GitStashApplyCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('apply')
| def inner(choice):
if choice != -1:
name, _ = stashes[choice]
exit_code, stdout, stderr = self.git(['stash', action, '-q', 'stash@{%s}' % name], cwd=repo)
if exit_code != 0:
sublime.error_message(self.format_error_message(stderr))
window = sublime.active_window()
if window:
window.run_command('git_status', {'refresh_only': True})
return inner | identifier_body |
stash.py | # coding: utf-8
import time
import sublime
from sublime_plugin import WindowCommand
from .util import noop
from .cmd import GitCmd
from .helpers import GitStashHelper, GitStatusHelper, GitErrorHelper
class GitStashWindowCmd(GitCmd, GitStashHelper, GitErrorHelper):
def pop_or_apply_from_panel(self, action):
repo = self.get_repo()
if not repo:
return
stashes = self.get_stashes(repo)
if not stashes:
return sublime.error_message('No stashes. Use the Git: Stash command to stash changes')
callback = self.pop_or_apply_callback(repo, action, stashes)
panel = []
for name, title in stashes:
panel.append([title, "stash@{%s}" % name])
self.window.show_quick_panel(panel, callback)
def pop_or_apply_callback(self, repo, action, stashes):
def inner(choice):
if choice != -1:
name, _ = stashes[choice]
exit_code, stdout, stderr = self.git(['stash', action, '-q', 'stash@{%s}' % name], cwd=repo)
if exit_code != 0:
sublime.error_message(self.format_error_message(stderr))
window = sublime.active_window()
if window:
window.run_command('git_status', {'refresh_only': True})
return inner
class GitStashCommand(WindowCommand, GitCmd, GitStatusHelper):
"""
Documentation coming soon.
"""
def run(self, untracked=False):
repo = self.get_repo()
if not repo:
return
def on_done(title):
title = title.strip()
self.git(['stash', 'save', '--include-untracked' if untracked else None, '--', title], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
# update the index
self.git_exit_code(['update-index', '--refresh'], cwd=repo)
# get files status
untracked_files, unstaged_files, _ = self.get_files_status(repo)
# check for if there's something to stash
if not unstaged_files:
if (untracked and not untracked_files) or (not untracked):
return sublime.error_message("No local changes to save")
self.window.show_input_panel('Stash title:', '', on_done, noop, noop)
class GitSnapshotCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
snapshot = time.strftime("Snapshot at %Y-%m-%d %H:%M:%S")
self.git(['stash', 'save', '--', snapshot], cwd=repo)
self.git(['stash', 'apply', '-q', 'stash@{0}'], cwd=repo)
self.window.run_command('git_status', {'refresh_only': True})
class GitStashPopCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def run(self):
self.pop_or_apply_from_panel('pop')
class GitStashApplyCommand(WindowCommand, GitStashWindowCmd):
"""
Documentation coming soon.
"""
def | (self):
self.pop_or_apply_from_panel('apply')
| run | identifier_name |
MarchingCubes.d.ts | import {
BufferGeometry,
Material,
ImmediateRenderObject
} from '../../../src/Three';
export class | extends ImmediateRenderObject {
constructor( resolution: number, material: Material, enableUvs?: boolean, enableColors?: boolean );
enableUvs: boolean;
enableColors: boolean;
resolution: number;
// parameters
isolation: number;
// size of field, 32 is pushing it in Javascript :)
size: number;
size2: number;
size3: number;
halfsize: number;
// deltas
delta: number;
yd: number;
zd: number;
field: Float32Array;
normal_cache: Float32Array;
palette: Float32Array;
maxCount: number;
count: number;
hasPositions: boolean;
hasNormals: boolean;
hasColors: boolean;
hasUvs: boolean;
positionArray: Float32Array;
normalArray: Float32Array;
uvArray: Float32Array;
colorArray: Float32Array;
begin(): void;
end(): void;
addBall( ballx: number, bally: number, ballz: number, strength: number, subtract: number, colors: any ): void;
addPlaneX( strength: number, subtract: number ): void;
addPlaneY( strength: number, subtract: number ): void;
addPlaneZ( strength: number, subtract: number ): void;
setCell ( x: number, y: number, z: number, value: number ): void;
getCell ( x: number, y: number, z: number ): number;
blur( intensity: number ): void;
reset(): void;
render( renderCallback: any ): void;
generateGeometry(): BufferGeometry;
generateBufferGeometry(): BufferGeometry;
}
export const edgeTable: Int32Array[];
export const triTable: Int32Array[];
| MarchingCubes | identifier_name |
MarchingCubes.d.ts | import {
BufferGeometry,
Material,
ImmediateRenderObject
} from '../../../src/Three';
export class MarchingCubes extends ImmediateRenderObject {
constructor( resolution: number, material: Material, enableUvs?: boolean, enableColors?: boolean );
enableUvs: boolean;
enableColors: boolean;
resolution: number;
// parameters
isolation: number;
// size of field, 32 is pushing it in Javascript :)
size: number;
size2: number;
size3: number;
halfsize: number;
// deltas
delta: number;
yd: number;
zd: number;
field: Float32Array;
normal_cache: Float32Array;
palette: Float32Array;
maxCount: number;
count: number;
hasPositions: boolean;
hasNormals: boolean;
hasColors: boolean;
hasUvs: boolean;
positionArray: Float32Array;
normalArray: Float32Array;
uvArray: Float32Array;
colorArray: Float32Array;
begin(): void;
end(): void;
addBall( ballx: number, bally: number, ballz: number, strength: number, subtract: number, colors: any ): void;
addPlaneX( strength: number, subtract: number ): void;
addPlaneY( strength: number, subtract: number ): void;
addPlaneZ( strength: number, subtract: number ): void;
setCell ( x: number, y: number, z: number, value: number ): void;
getCell ( x: number, y: number, z: number ): number;
blur( intensity: number ): void;
reset(): void;
render( renderCallback: any ): void;
generateGeometry(): BufferGeometry; | export const triTable: Int32Array[]; | generateBufferGeometry(): BufferGeometry;
}
export const edgeTable: Int32Array[]; | random_line_split |
real_ints.rs | //! Defines basic operations defined under Real_Ints theory in SMTLIB2.
use std::fmt;
#[macro_use]
use crate::backends::backend::SMTNode;
#[derive(Clone, Debug)]
pub enum OpCodes {
Neg,
Sub,
Add,
Mul,
Div,
Lte,
Lt,
Gte,
Gt,
ToReal,
ToInt,
IsInt,
ConstInt(u64),
ConstReal(f64),
FreeVar(String),
}
impl fmt::Display for OpCodes {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match *self {
OpCodes::Neg => "-".to_owned(),
OpCodes::Sub => "-".to_owned(),
OpCodes::Add => "+".to_owned(),
OpCodes::Mul => "*".to_owned(),
OpCodes::Div => "/".to_owned(),
OpCodes::Lte => "<=".to_owned(),
OpCodes::Lt => "<".to_owned(),
OpCodes::Gte => ">=".to_owned(),
OpCodes::Gt => ">".to_owned(),
OpCodes::ToReal => "to_real".to_owned(),
OpCodes::ToInt => "to_int".to_owned(),
OpCodes::IsInt => "is_int".to_owned(),
OpCodes::ConstInt(ref val) => format!("{}", val),
OpCodes::ConstReal(ref val) => format!("{}", val),
OpCodes::FreeVar(ref name) => format!("{}", name),
};
write!(f, "{}", s)
}
}
impl_smt_node!(OpCodes, define vars [OpCodes::FreeVar(_)], define consts [OpCodes::ConstInt(_), OpCodes::ConstReal(_)]);
#[derive(Clone,Debug)]
pub enum Sorts {
Real,
Int
}
impl fmt::Display for Sorts {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let _s = match *self {
Sorts::Real => "Real",
Sorts::Int => "Int"
};
write!(f, "{}", "s")
}
}
| fmt | identifier_name |
real_ints.rs | //! Defines basic operations defined under Real_Ints theory in SMTLIB2.
use std::fmt;
#[macro_use]
use crate::backends::backend::SMTNode;
#[derive(Clone, Debug)]
pub enum OpCodes { | Neg,
Sub,
Add,
Mul,
Div,
Lte,
Lt,
Gte,
Gt,
ToReal,
ToInt,
IsInt,
ConstInt(u64),
ConstReal(f64),
FreeVar(String),
}
impl fmt::Display for OpCodes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match *self {
OpCodes::Neg => "-".to_owned(),
OpCodes::Sub => "-".to_owned(),
OpCodes::Add => "+".to_owned(),
OpCodes::Mul => "*".to_owned(),
OpCodes::Div => "/".to_owned(),
OpCodes::Lte => "<=".to_owned(),
OpCodes::Lt => "<".to_owned(),
OpCodes::Gte => ">=".to_owned(),
OpCodes::Gt => ">".to_owned(),
OpCodes::ToReal => "to_real".to_owned(),
OpCodes::ToInt => "to_int".to_owned(),
OpCodes::IsInt => "is_int".to_owned(),
OpCodes::ConstInt(ref val) => format!("{}", val),
OpCodes::ConstReal(ref val) => format!("{}", val),
OpCodes::FreeVar(ref name) => format!("{}", name),
};
write!(f, "{}", s)
}
}
impl_smt_node!(OpCodes, define vars [OpCodes::FreeVar(_)], define consts [OpCodes::ConstInt(_), OpCodes::ConstReal(_)]);
#[derive(Clone,Debug)]
pub enum Sorts {
Real,
Int
}
impl fmt::Display for Sorts {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let _s = match *self {
Sorts::Real => "Real",
Sorts::Int => "Int"
};
write!(f, "{}", "s")
}
} | random_line_split |
|
columns.py | #!/usr/bin/env python
"""
This module contains the :class:`Column` class, which defines a "vertical"
array of tabular data. Whereas :class:`.Row` instances are independent of their
parent :class:`.Table`, columns depend on knowledge of both their position in
the parent (column name, data type) as well as the rows that contain their data.
"""
import six
from agate.mapped_sequence import MappedSequence
from agate.utils import NullOrder, memoize
if six.PY3: # pragma: no cover
# pylint: disable=W0622
xrange = range
def null_handler(k):
"""
Key method for sorting nulls correctly.
"""
if k is None:
return NullOrder()
return k
class Column(MappedSequence):
"""
Proxy access to column data. Instances of :class:`Column` should
not be constructed directly. They are created by :class:`.Table`
instances and are unique to them.
Columns are implemented as subclass of :class:`.MappedSequence`. They
deviate from the underlying implementation in that loading of their data
is deferred until it is needed.
:param name:
The name of this column.
:param data_type:
An instance of :class:`.DataType`.
:param rows:
A :class:`.MappedSequence` that contains the :class:`.Row` instances
containing the data for this column.
:param row_names:
An optional list of row names (keys) for this column.
"""
__slots__ = ['_index', '_name', '_data_type', '_rows', '_row_names']
def __init__(self, index, name, data_type, rows, row_names=None):
self._index = index
self._name = name
self._data_type = data_type
self._rows = rows
self._keys = row_names
def __getstate__(self):
"""
Return state values to be pickled.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
return {
'_index': self._index,
'_name': self._name,
'_data_type': self._data_type,
'_rows': self._rows,
'_keys': self._keys
}
def __setstate__(self, data):
""" | self._name = data['_name']
self._data_type = data['_data_type']
self._rows = data['_rows']
self._keys = data['_keys']
@property
def index(self):
"""
This column's index.
"""
return self._index
@property
def name(self):
"""
This column's name.
"""
return self._name
@property
def data_type(self):
"""
This column's data type.
"""
return self._data_type
@memoize
def values(self):
"""
Get the values in this column, as a tuple.
"""
return tuple(row[self._index] for row in self._rows)
@memoize
def values_distinct(self):
"""
Get the distinct values in this column, as a tuple.
"""
return tuple(set(self.values()))
@memoize
def values_without_nulls(self):
"""
Get the values in this column with any null values removed.
"""
return tuple(d for d in self.values() if d is not None)
@memoize
def values_sorted(self):
"""
Get the values in this column sorted.
"""
return sorted(self.values(), key=null_handler)
@memoize
def values_without_nulls_sorted(self):
"""
Get the values in this column with any null values removed and sorted.
"""
return sorted(self.values_without_nulls(), key=null_handler) | Restore pickled state.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
self._index = data['_index'] | random_line_split |
columns.py | #!/usr/bin/env python
"""
This module contains the :class:`Column` class, which defines a "vertical"
array of tabular data. Whereas :class:`.Row` instances are independent of their
parent :class:`.Table`, columns depend on knowledge of both their position in
the parent (column name, data type) as well as the rows that contain their data.
"""
import six
from agate.mapped_sequence import MappedSequence
from agate.utils import NullOrder, memoize
if six.PY3: # pragma: no cover
# pylint: disable=W0622
xrange = range
def null_handler(k):
"""
Key method for sorting nulls correctly.
"""
if k is None:
return NullOrder()
return k
class Column(MappedSequence):
"""
Proxy access to column data. Instances of :class:`Column` should
not be constructed directly. They are created by :class:`.Table`
instances and are unique to them.
Columns are implemented as subclass of :class:`.MappedSequence`. They
deviate from the underlying implementation in that loading of their data
is deferred until it is needed.
:param name:
The name of this column.
:param data_type:
An instance of :class:`.DataType`.
:param rows:
A :class:`.MappedSequence` that contains the :class:`.Row` instances
containing the data for this column.
:param row_names:
An optional list of row names (keys) for this column.
"""
__slots__ = ['_index', '_name', '_data_type', '_rows', '_row_names']
def __init__(self, index, name, data_type, rows, row_names=None):
self._index = index
self._name = name
self._data_type = data_type
self._rows = rows
self._keys = row_names
def __getstate__(self):
"""
Return state values to be pickled.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
return {
'_index': self._index,
'_name': self._name,
'_data_type': self._data_type,
'_rows': self._rows,
'_keys': self._keys
}
def __setstate__(self, data):
"""
Restore pickled state.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
self._index = data['_index']
self._name = data['_name']
self._data_type = data['_data_type']
self._rows = data['_rows']
self._keys = data['_keys']
@property
def index(self):
"""
This column's index.
"""
return self._index
@property
def name(self):
"""
This column's name.
"""
return self._name
@property
def data_type(self):
"""
This column's data type.
"""
return self._data_type
@memoize
def values(self):
"""
Get the values in this column, as a tuple.
"""
return tuple(row[self._index] for row in self._rows)
@memoize
def values_distinct(self):
"""
Get the distinct values in this column, as a tuple.
"""
return tuple(set(self.values()))
@memoize
def values_without_nulls(self):
"""
Get the values in this column with any null values removed.
"""
return tuple(d for d in self.values() if d is not None)
@memoize
def values_sorted(self):
"""
Get the values in this column sorted.
"""
return sorted(self.values(), key=null_handler)
@memoize
def | (self):
"""
Get the values in this column with any null values removed and sorted.
"""
return sorted(self.values_without_nulls(), key=null_handler)
| values_without_nulls_sorted | identifier_name |
columns.py | #!/usr/bin/env python
"""
This module contains the :class:`Column` class, which defines a "vertical"
array of tabular data. Whereas :class:`.Row` instances are independent of their
parent :class:`.Table`, columns depend on knowledge of both their position in
the parent (column name, data type) as well as the rows that contain their data.
"""
import six
from agate.mapped_sequence import MappedSequence
from agate.utils import NullOrder, memoize
if six.PY3: # pragma: no cover
# pylint: disable=W0622
|
def null_handler(k):
"""
Key method for sorting nulls correctly.
"""
if k is None:
return NullOrder()
return k
class Column(MappedSequence):
"""
Proxy access to column data. Instances of :class:`Column` should
not be constructed directly. They are created by :class:`.Table`
instances and are unique to them.
Columns are implemented as subclass of :class:`.MappedSequence`. They
deviate from the underlying implementation in that loading of their data
is deferred until it is needed.
:param name:
The name of this column.
:param data_type:
An instance of :class:`.DataType`.
:param rows:
A :class:`.MappedSequence` that contains the :class:`.Row` instances
containing the data for this column.
:param row_names:
An optional list of row names (keys) for this column.
"""
__slots__ = ['_index', '_name', '_data_type', '_rows', '_row_names']
def __init__(self, index, name, data_type, rows, row_names=None):
self._index = index
self._name = name
self._data_type = data_type
self._rows = rows
self._keys = row_names
def __getstate__(self):
"""
Return state values to be pickled.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
return {
'_index': self._index,
'_name': self._name,
'_data_type': self._data_type,
'_rows': self._rows,
'_keys': self._keys
}
def __setstate__(self, data):
"""
Restore pickled state.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
self._index = data['_index']
self._name = data['_name']
self._data_type = data['_data_type']
self._rows = data['_rows']
self._keys = data['_keys']
@property
def index(self):
"""
This column's index.
"""
return self._index
@property
def name(self):
"""
This column's name.
"""
return self._name
@property
def data_type(self):
"""
This column's data type.
"""
return self._data_type
@memoize
def values(self):
"""
Get the values in this column, as a tuple.
"""
return tuple(row[self._index] for row in self._rows)
@memoize
def values_distinct(self):
"""
Get the distinct values in this column, as a tuple.
"""
return tuple(set(self.values()))
@memoize
def values_without_nulls(self):
"""
Get the values in this column with any null values removed.
"""
return tuple(d for d in self.values() if d is not None)
@memoize
def values_sorted(self):
"""
Get the values in this column sorted.
"""
return sorted(self.values(), key=null_handler)
@memoize
def values_without_nulls_sorted(self):
"""
Get the values in this column with any null values removed and sorted.
"""
return sorted(self.values_without_nulls(), key=null_handler)
| xrange = range | conditional_block |
columns.py | #!/usr/bin/env python
"""
This module contains the :class:`Column` class, which defines a "vertical"
array of tabular data. Whereas :class:`.Row` instances are independent of their
parent :class:`.Table`, columns depend on knowledge of both their position in
the parent (column name, data type) as well as the rows that contain their data.
"""
import six
from agate.mapped_sequence import MappedSequence
from agate.utils import NullOrder, memoize
if six.PY3: # pragma: no cover
# pylint: disable=W0622
xrange = range
def null_handler(k):
"""
Key method for sorting nulls correctly.
"""
if k is None:
return NullOrder()
return k
class Column(MappedSequence):
"""
Proxy access to column data. Instances of :class:`Column` should
not be constructed directly. They are created by :class:`.Table`
instances and are unique to them.
Columns are implemented as subclass of :class:`.MappedSequence`. They
deviate from the underlying implementation in that loading of their data
is deferred until it is needed.
:param name:
The name of this column.
:param data_type:
An instance of :class:`.DataType`.
:param rows:
A :class:`.MappedSequence` that contains the :class:`.Row` instances
containing the data for this column.
:param row_names:
An optional list of row names (keys) for this column.
"""
__slots__ = ['_index', '_name', '_data_type', '_rows', '_row_names']
def __init__(self, index, name, data_type, rows, row_names=None):
self._index = index
self._name = name
self._data_type = data_type
self._rows = rows
self._keys = row_names
def __getstate__(self):
"""
Return state values to be pickled.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
return {
'_index': self._index,
'_name': self._name,
'_data_type': self._data_type,
'_rows': self._rows,
'_keys': self._keys
}
def __setstate__(self, data):
"""
Restore pickled state.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
self._index = data['_index']
self._name = data['_name']
self._data_type = data['_data_type']
self._rows = data['_rows']
self._keys = data['_keys']
@property
def index(self):
"""
This column's index.
"""
return self._index
@property
def name(self):
"""
This column's name.
"""
return self._name
@property
def data_type(self):
|
@memoize
def values(self):
"""
Get the values in this column, as a tuple.
"""
return tuple(row[self._index] for row in self._rows)
@memoize
def values_distinct(self):
"""
Get the distinct values in this column, as a tuple.
"""
return tuple(set(self.values()))
@memoize
def values_without_nulls(self):
"""
Get the values in this column with any null values removed.
"""
return tuple(d for d in self.values() if d is not None)
@memoize
def values_sorted(self):
"""
Get the values in this column sorted.
"""
return sorted(self.values(), key=null_handler)
@memoize
def values_without_nulls_sorted(self):
"""
Get the values in this column with any null values removed and sorted.
"""
return sorted(self.values_without_nulls(), key=null_handler)
| """
This column's data type.
"""
return self._data_type | identifier_body |
http.rs | use crate::real_std::{
fmt, fs,
path::PathBuf,
pin::Pin,
sync::{Arc, Mutex},
};
use {
collect_mac::collect,
futures::{
future::{self, BoxFuture},
prelude::*,
ready,
task::{self, Poll},
},
http::{
header::{HeaderMap, HeaderName, HeaderValue},
StatusCode,
},
hyper::{body::Bytes, Server},
pin_project_lite::pin_project,
};
use crate::base::types::{ArcType, Type};
use crate::{
vm::{
self,
api::{
generic, Collect, Eff, Function, Getable, OpaqueValue, PushAsRef, Pushable, VmType,
WithVM, IO,
},
thread::{ActiveThread, RootedThread, Thread},
ExternModule, Variants,
},
Error,
};
macro_rules! try_future {
($e:expr) => {
try_future!($e, Box::pin)
};
($e:expr, $f:expr) => {
match $e {
Ok(x) => x,
Err(err) => return $f(::futures::future::err(err.into())),
}
};
}
pub struct HttpEffect;
impl VmType for HttpEffect {
type Type = Self;
fn | (vm: &Thread) -> ArcType {
let r = generic::R::make_type(vm);
Type::app(
vm.find_type_info("std.http.types.HttpEffect")
.map(|alias| alias.into_type())
.unwrap_or_else(|_| Type::hole()),
collect![r],
)
}
}
pub type EffectHandler<T> = Eff<HttpEffect, T>;
pub struct Headers(HeaderMap);
impl VmType for Headers {
type Type = Vec<(String, Vec<u8>)>;
fn make_type(vm: &Thread) -> ArcType {
Vec::<(String, Vec<u8>)>::make_type(vm)
}
}
impl<'vm> Pushable<'vm> for Headers {
fn vm_push(self, context: &mut ActiveThread<'vm>) -> vm::Result<()> {
Collect::new(
self.0
.iter()
.map(|(name, value)| (name.as_str(), value.as_bytes())),
)
.vm_push(context)
}
}
impl<'vm, 'value> Getable<'vm, 'value> for Headers {
impl_getable_simple!();
fn from_value(vm: &'vm Thread, value: Variants<'value>) -> Self {
Headers(
Collect::from_value(vm, value)
// TODO Error somehow on invalid headers
.filter_map(|(name, value): (&str, &[u8])| {
match (
HeaderName::from_bytes(name.as_bytes()),
HeaderValue::from_bytes(value),
) {
(Ok(name), Ok(value)) => Some((name, value)),
_ => None,
}
})
.collect(),
)
}
}
// By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon
// threads
#[derive(Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.Body")]
#[gluon(crate_name = "::vm")]
#[gluon_userdata(clone)]
#[gluon_trace(skip)]
// Representation of a http body that is in the prograss of being read
pub struct Body(
Arc<Mutex<Pin<Box<dyn Stream<Item = Result<PushAsRef<Bytes, [u8]>, vm::Error>> + Send>>>>,
);
// Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Body")
}
}
// Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation
// into `&Body` argument
fn read_chunk(body: &Body) -> impl Future<Output = IO<Option<PushAsRef<Bytes, [u8]>>>> {
use futures::future::poll_fn;
let body = body.0.clone();
poll_fn(move |cx| {
let mut stream = body.lock().unwrap();
Poll::Ready(IO::Value(
if let Some(result) = ready!(stream.as_mut().poll_next(cx)) {
match result {
Ok(chunk) => Some(chunk),
Err(err) => return IO::Exception(err.to_string()).into(),
}
} else {
None
},
))
})
}
// A http body that is being written
#[derive(Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.ResponseBody")]
#[gluon(crate_name = "::vm")]
#[gluon_userdata(clone)]
#[gluon_trace(skip)]
pub struct ResponseBody(Arc<Mutex<Option<hyper::body::Sender>>>);
impl fmt::Debug for ResponseBody {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ResponseBody")
}
}
fn write_response(response: &ResponseBody, bytes: &[u8]) -> impl Future<Output = IO<()>> {
use futures::future::poll_fn;
// Turn `bytes´ into a `Bytes` which can be sent to the http body
let mut unsent_chunk = Some(bytes.to_owned().into());
let response = response.0.clone();
poll_fn(move |cx| {
info!("Starting response send");
let mut sender = response.lock().unwrap();
let sender = sender
.as_mut()
.expect("Sender has been dropped while still in use");
let chunk = unsent_chunk
.take()
.expect("Attempt to poll after chunk is sent");
match sender.poll_ready(cx) {
Poll::Pending => {
unsent_chunk = Some(chunk);
return Poll::Pending;
}
Poll::Ready(Ok(_)) => (),
Poll::Ready(Err(err)) => {
info!("Could not send http response {}", err);
return IO::Exception(err.to_string()).into();
}
}
match sender.try_send_data(chunk) {
Ok(()) => Poll::Ready(IO::Value(())),
Err(chunk) => {
unsent_chunk = Some(chunk);
IO::Exception("Could not send http response".into()).into()
}
}
})
}
#[derive(Debug, Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.Uri")]
#[gluon(crate_name = "::vm")]
#[gluon_trace(skip)]
#[gluon_userdata(clone)]
struct Uri(http::Uri);
// Next we define some record types which are marshalled to and from gluon. These have equivalent
// definitions in http_types.glu
field_decl! { http, method, uri, status, body, request, response, headers }
type Request = record_type! {
method => String,
uri => Uri,
body => Body
};
pub type Response = record_type! {
status => u16,
headers => Headers
};
type HttpState = record_type! {
request => Request,
response => ResponseBody
};
#[derive(Getable, VmType)]
#[gluon(crate_name = "::vm")]
struct Settings {
port: u16,
tls_cert: Option<PathBuf>,
}
fn listen(
settings: Settings,
WithVM { vm, value }: WithVM<OpaqueValue<RootedThread, EffectHandler<Response>>>,
) -> impl Future<Output = IO<()>> + Send + 'static {
let vm = vm.root_thread();
listen_(settings, vm, value).map(IO::from)
}
async fn listen_(
settings: Settings,
thread: RootedThread,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
) -> vm::Result<()> {
let thread = match thread.new_thread() {
Ok(thread) => thread,
Err(err) => return Err(err),
};
impl tower_service::Service<hyper::Request<hyper::Body>> for Handler {
type Response = hyper::Response<hyper::Body>;
type Error = Error;
type Future = BoxFuture<'static, Result<http::Response<hyper::Body>, Error>>;
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, request: hyper::Request<hyper::Body>) -> Self::Future {
let (parts, body) = request.into_parts();
self.handle(parts.method, parts.uri, body)
}
}
let addr = format!("0.0.0.0:{}", settings.port).parse().unwrap();
let listener = Handler::new(&thread, handler);
if let Some(cert_path) = &settings.tls_cert {
let identity = fs::read(cert_path).map_err(|err| {
vm::Error::Message(format!(
"Unable to open certificate `{}`: {}",
cert_path.display(),
err
))
})?;
let identity = native_tls::Identity::from_pkcs12(&identity, "")
.map_err(|err| vm::Error::Message(err.to_string()))?;
let acceptor = tokio_native_tls::TlsAcceptor::from(
native_tls::TlsAcceptor::new(identity)
.map_err(|err| vm::Error::Message(err.to_string()))?,
);
let http = hyper::server::conn::Http::new();
let tcp_listener = tokio::net::TcpListener::bind(&addr)
.map_err(|err| vm::Error::Message(err.to_string()))
.await?;
let incoming = tokio_stream::wrappers::TcpListenerStream::new(tcp_listener)
.err_into()
.and_then(|stream| {
acceptor.accept(stream).map_err(|err| {
info!("Unable to accept TLS connection: {}", err);
Box::new(err) as Box<dyn ::std::error::Error + Send + Sync>
})
});
pin_project! {
struct Acceptor<S> {
#[pin]
incoming: S,
}
}
impl<S, T, E> hyper::server::accept::Accept for Acceptor<S>
where
S: Stream<Item = Result<T, E>>,
{
type Conn = T;
type Error = E;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
self.project().incoming.poll_next(cx)
}
}
return hyper::server::Builder::new(Acceptor { incoming }, http)
.serve(hyper::service::make_service_fn(move |_| {
future::ready(Ok::<_, hyper::Error>(listener.clone()))
}))
.map_err(|err| vm::Error::from(format!("Server error: {}", err)))
.await;
}
Server::bind(&addr)
.serve(hyper::service::make_service_fn(move |_| {
future::ready(Ok::<_, hyper::Error>(listener.clone()))
}))
.map_err(|err| vm::Error::from(format!("Server error: {}", err)))
.map_ok(|_| ())
.await
}
type ListenFn = fn(OpaqueValue<RootedThread, EffectHandler<Response>>, HttpState) -> IO<Response>;
#[derive(Clone)]
pub struct Handler {
handle: Function<RootedThread, ListenFn>,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
}
impl Handler {
pub fn new(
thread: &Thread,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
) -> Self {
// Retrieve the `handle` function from the http module which we use to evaluate values of type
// `EffectHandler Response`
let handle: Function<RootedThread, ListenFn> = thread
.get_global("std.http.handle")
.unwrap_or_else(|err| panic!("{}", err));
Self { handle, handler }
}
pub fn handle<E>(
&mut self,
method: http::Method,
uri: http::Uri,
body: impl Stream<Item = Result<Bytes, E>> + Send + 'static,
) -> BoxFuture<'static, crate::Result<hyper::Response<hyper::Body>>>
where
E: fmt::Display + Send + 'static,
{
let child_thread = try_future!(self.handle.vm().new_thread());
let mut handle = try_future!(self.handle.re_root(child_thread));
let gluon_request = record_no_decl! {
method => method.as_str().to_owned(),
uri => Uri(uri),
// Since `Body` implements `Userdata` it can be directly pushed to gluon
body => Body(Arc::new(Mutex::new(Box::pin(
body
.map_err(|err| vm::Error::Message(format!("{}", err)))
// `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is
// marshalled to `Array Byte` in gluon
.map_ok(PushAsRef::<_, [u8]>::new)
))))
};
let (response_sender, response_body) = hyper::Body::channel();
let response_sender = Arc::new(Mutex::new(Some(response_sender)));
let http_state = record_no_decl! {
request => gluon_request,
response => ResponseBody(response_sender.clone())
};
let handler = self.handler.clone();
Box::pin(async move {
handle
.call_async(handler, http_state)
.map(move |result| match result {
Ok(value) => {
match value {
IO::Value(record_p! { status, headers }) => {
// Drop the sender to so that it the receiver stops waiting for
// more chunks
*response_sender.lock().unwrap() = None;
let status = StatusCode::from_u16(status)
.unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let mut response = http::Response::builder()
.status(status)
.body(response_body)
.unwrap();
*response.headers_mut() = headers.0;
Ok(response)
}
IO::Exception(err) => {
info!("{}", err);
Ok(http::Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body("".into())
.unwrap())
}
}
}
Err(err) => {
info!("{}", err);
Ok(http::Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body("".into())
.unwrap())
}
})
.await
})
}
}
// To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a
// separate function which is called before loading `http_types`
pub fn load_types(vm: &Thread) -> vm::Result<ExternModule> {
vm.register_type::<Body>("std.http.types.Body", &[])?;
vm.register_type::<ResponseBody>("std.http.types.ResponseBody", &[])?;
vm.register_type::<Uri>("std.http.types.Uri", &[])?;
ExternModule::new(
vm,
record! {
// Define the types so that they can be used from gluon
type std::http::types::Body => Body,
type std::http::types::ResponseBody => ResponseBody,
type std::http::types::Uri => Uri,
type std::http::Method => String,
type std::http::StatusCode => u16,
type std::http::Request => Request,
type std::http::Response => Response,
type std::http::Headers => Headers,
type std::http::HttpState => HttpState
},
)
}
macro_rules! uri_binds {
($($id: ident)*) => {
record!{
$(
$id => primitive!(1, concat!("std.http.prim.uri.", stringify!($id)), |u: &Uri| (u.0).$id())
),*
}
}
}
mod std {
pub(crate) mod http {
pub(crate) use crate::std_lib::http as prim;
}
}
pub fn load(vm: &Thread) -> vm::Result<ExternModule> {
ExternModule::new(
vm,
record! {
listen => primitive!(2, async fn std::http::prim::listen),
read_chunk => primitive!(1, async fn std::http::prim::read_chunk),
write_response => primitive!(2, async fn std::http::prim::write_response),
port => primitive!(1, "std.http.prim.uri.port", |u: &Uri| (u.0).port().map(|p| p.as_u16())),
uri => uri_binds!(path host query to_string)
},
)
}
| make_type | identifier_name |
http.rs | use crate::real_std::{
fmt, fs,
path::PathBuf,
pin::Pin,
sync::{Arc, Mutex},
};
use {
collect_mac::collect,
futures::{
future::{self, BoxFuture},
prelude::*,
ready,
task::{self, Poll},
},
http::{
header::{HeaderMap, HeaderName, HeaderValue},
StatusCode,
},
hyper::{body::Bytes, Server},
pin_project_lite::pin_project,
};
use crate::base::types::{ArcType, Type};
use crate::{
vm::{
self,
api::{
generic, Collect, Eff, Function, Getable, OpaqueValue, PushAsRef, Pushable, VmType,
WithVM, IO,
},
thread::{ActiveThread, RootedThread, Thread},
ExternModule, Variants,
},
Error,
};
macro_rules! try_future {
($e:expr) => {
try_future!($e, Box::pin)
};
($e:expr, $f:expr) => {
match $e {
Ok(x) => x,
Err(err) => return $f(::futures::future::err(err.into())),
}
};
}
pub struct HttpEffect;
impl VmType for HttpEffect {
type Type = Self;
fn make_type(vm: &Thread) -> ArcType {
let r = generic::R::make_type(vm);
Type::app(
vm.find_type_info("std.http.types.HttpEffect")
.map(|alias| alias.into_type())
.unwrap_or_else(|_| Type::hole()),
collect![r],
)
}
}
pub type EffectHandler<T> = Eff<HttpEffect, T>;
pub struct Headers(HeaderMap);
impl VmType for Headers {
type Type = Vec<(String, Vec<u8>)>;
fn make_type(vm: &Thread) -> ArcType {
Vec::<(String, Vec<u8>)>::make_type(vm)
}
}
impl<'vm> Pushable<'vm> for Headers {
fn vm_push(self, context: &mut ActiveThread<'vm>) -> vm::Result<()> {
Collect::new(
self.0
.iter()
.map(|(name, value)| (name.as_str(), value.as_bytes())),
)
.vm_push(context)
}
}
impl<'vm, 'value> Getable<'vm, 'value> for Headers {
impl_getable_simple!();
fn from_value(vm: &'vm Thread, value: Variants<'value>) -> Self {
Headers(
Collect::from_value(vm, value)
// TODO Error somehow on invalid headers
.filter_map(|(name, value): (&str, &[u8])| {
match (
HeaderName::from_bytes(name.as_bytes()),
HeaderValue::from_bytes(value),
) {
(Ok(name), Ok(value)) => Some((name, value)),
_ => None,
}
})
.collect(),
)
}
}
// By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon
// threads
#[derive(Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.Body")]
#[gluon(crate_name = "::vm")]
#[gluon_userdata(clone)]
#[gluon_trace(skip)]
// Representation of a http body that is in the prograss of being read
pub struct Body(
Arc<Mutex<Pin<Box<dyn Stream<Item = Result<PushAsRef<Bytes, [u8]>, vm::Error>> + Send>>>>,
);
// Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Body")
}
}
// Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation
// into `&Body` argument
fn read_chunk(body: &Body) -> impl Future<Output = IO<Option<PushAsRef<Bytes, [u8]>>>> {
use futures::future::poll_fn;
let body = body.0.clone();
poll_fn(move |cx| {
let mut stream = body.lock().unwrap();
Poll::Ready(IO::Value(
if let Some(result) = ready!(stream.as_mut().poll_next(cx)) {
match result {
Ok(chunk) => Some(chunk),
Err(err) => return IO::Exception(err.to_string()).into(),
}
} else {
None
},
))
})
}
// A http body that is being written
#[derive(Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.ResponseBody")]
#[gluon(crate_name = "::vm")]
#[gluon_userdata(clone)]
#[gluon_trace(skip)]
pub struct ResponseBody(Arc<Mutex<Option<hyper::body::Sender>>>);
impl fmt::Debug for ResponseBody {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ResponseBody")
}
}
fn write_response(response: &ResponseBody, bytes: &[u8]) -> impl Future<Output = IO<()>> {
use futures::future::poll_fn;
// Turn `bytes´ into a `Bytes` which can be sent to the http body
let mut unsent_chunk = Some(bytes.to_owned().into());
let response = response.0.clone();
poll_fn(move |cx| {
info!("Starting response send");
let mut sender = response.lock().unwrap();
let sender = sender
.as_mut()
.expect("Sender has been dropped while still in use");
let chunk = unsent_chunk
.take()
.expect("Attempt to poll after chunk is sent");
match sender.poll_ready(cx) {
Poll::Pending => {
unsent_chunk = Some(chunk);
return Poll::Pending;
}
Poll::Ready(Ok(_)) => (),
Poll::Ready(Err(err)) => {
info!("Could not send http response {}", err);
return IO::Exception(err.to_string()).into();
}
}
match sender.try_send_data(chunk) {
Ok(()) => Poll::Ready(IO::Value(())),
Err(chunk) => {
unsent_chunk = Some(chunk);
IO::Exception("Could not send http response".into()).into()
}
}
})
}
#[derive(Debug, Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.Uri")]
#[gluon(crate_name = "::vm")]
#[gluon_trace(skip)]
#[gluon_userdata(clone)]
struct Uri(http::Uri);
// Next we define some record types which are marshalled to and from gluon. These have equivalent
// definitions in http_types.glu
field_decl! { http, method, uri, status, body, request, response, headers }
type Request = record_type! {
method => String,
uri => Uri,
body => Body
};
pub type Response = record_type! {
status => u16,
headers => Headers
};
type HttpState = record_type! {
request => Request,
response => ResponseBody
};
#[derive(Getable, VmType)]
#[gluon(crate_name = "::vm")]
struct Settings {
port: u16,
tls_cert: Option<PathBuf>,
}
fn listen(
settings: Settings,
WithVM { vm, value }: WithVM<OpaqueValue<RootedThread, EffectHandler<Response>>>,
) -> impl Future<Output = IO<()>> + Send + 'static {
let vm = vm.root_thread();
listen_(settings, vm, value).map(IO::from)
}
async fn listen_(
settings: Settings,
thread: RootedThread,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
) -> vm::Result<()> {
let thread = match thread.new_thread() {
Ok(thread) => thread,
Err(err) => return Err(err),
};
impl tower_service::Service<hyper::Request<hyper::Body>> for Handler {
type Response = hyper::Response<hyper::Body>;
type Error = Error;
type Future = BoxFuture<'static, Result<http::Response<hyper::Body>, Error>>;
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, request: hyper::Request<hyper::Body>) -> Self::Future {
let (parts, body) = request.into_parts();
self.handle(parts.method, parts.uri, body)
}
}
let addr = format!("0.0.0.0:{}", settings.port).parse().unwrap();
let listener = Handler::new(&thread, handler);
if let Some(cert_path) = &settings.tls_cert {
let identity = fs::read(cert_path).map_err(|err| {
vm::Error::Message(format!(
"Unable to open certificate `{}`: {}",
cert_path.display(),
err
))
})?;
let identity = native_tls::Identity::from_pkcs12(&identity, "")
.map_err(|err| vm::Error::Message(err.to_string()))?;
let acceptor = tokio_native_tls::TlsAcceptor::from(
native_tls::TlsAcceptor::new(identity)
.map_err(|err| vm::Error::Message(err.to_string()))?,
);
let http = hyper::server::conn::Http::new();
let tcp_listener = tokio::net::TcpListener::bind(&addr)
.map_err(|err| vm::Error::Message(err.to_string()))
.await?;
let incoming = tokio_stream::wrappers::TcpListenerStream::new(tcp_listener)
.err_into()
.and_then(|stream| {
acceptor.accept(stream).map_err(|err| {
info!("Unable to accept TLS connection: {}", err);
Box::new(err) as Box<dyn ::std::error::Error + Send + Sync>
})
});
pin_project! {
struct Acceptor<S> {
#[pin]
incoming: S,
}
}
impl<S, T, E> hyper::server::accept::Accept for Acceptor<S>
where
S: Stream<Item = Result<T, E>>,
{
type Conn = T;
type Error = E;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
self.project().incoming.poll_next(cx)
}
}
return hyper::server::Builder::new(Acceptor { incoming }, http)
.serve(hyper::service::make_service_fn(move |_| {
future::ready(Ok::<_, hyper::Error>(listener.clone()))
}))
.map_err(|err| vm::Error::from(format!("Server error: {}", err)))
.await;
}
Server::bind(&addr)
.serve(hyper::service::make_service_fn(move |_| {
future::ready(Ok::<_, hyper::Error>(listener.clone()))
}))
.map_err(|err| vm::Error::from(format!("Server error: {}", err)))
.map_ok(|_| ())
.await
}
type ListenFn = fn(OpaqueValue<RootedThread, EffectHandler<Response>>, HttpState) -> IO<Response>;
#[derive(Clone)]
pub struct Handler {
handle: Function<RootedThread, ListenFn>,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
}
impl Handler {
pub fn new(
thread: &Thread,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
) -> Self {
// Retrieve the `handle` function from the http module which we use to evaluate values of type
// `EffectHandler Response`
let handle: Function<RootedThread, ListenFn> = thread
.get_global("std.http.handle")
.unwrap_or_else(|err| panic!("{}", err));
Self { handle, handler }
}
pub fn handle<E>(
&mut self,
method: http::Method,
uri: http::Uri,
body: impl Stream<Item = Result<Bytes, E>> + Send + 'static,
) -> BoxFuture<'static, crate::Result<hyper::Response<hyper::Body>>>
where
E: fmt::Display + Send + 'static,
{
let child_thread = try_future!(self.handle.vm().new_thread());
let mut handle = try_future!(self.handle.re_root(child_thread));
let gluon_request = record_no_decl! {
method => method.as_str().to_owned(),
uri => Uri(uri),
// Since `Body` implements `Userdata` it can be directly pushed to gluon
body => Body(Arc::new(Mutex::new(Box::pin(
body
.map_err(|err| vm::Error::Message(format!("{}", err)))
// `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is
// marshalled to `Array Byte` in gluon
.map_ok(PushAsRef::<_, [u8]>::new)
))))
};
let (response_sender, response_body) = hyper::Body::channel();
let response_sender = Arc::new(Mutex::new(Some(response_sender)));
let http_state = record_no_decl! {
request => gluon_request,
response => ResponseBody(response_sender.clone())
};
let handler = self.handler.clone();
Box::pin(async move {
handle
.call_async(handler, http_state)
.map(move |result| match result {
Ok(value) => {
match value {
IO::Value(record_p! { status, headers }) => {
// Drop the sender to so that it the receiver stops waiting for
// more chunks
*response_sender.lock().unwrap() = None;
let status = StatusCode::from_u16(status)
.unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let mut response = http::Response::builder()
.status(status)
.body(response_body)
.unwrap();
*response.headers_mut() = headers.0;
Ok(response)
}
IO::Exception(err) => {
info!("{}", err);
Ok(http::Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body("".into())
.unwrap())
}
}
}
Err(err) => {
info!("{}", err);
Ok(http::Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body("".into())
.unwrap())
}
})
.await
})
}
}
// To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a
// separate function which is called before loading `http_types`
pub fn load_types(vm: &Thread) -> vm::Result<ExternModule> {
vm.register_type::<Body>("std.http.types.Body", &[])?;
vm.register_type::<ResponseBody>("std.http.types.ResponseBody", &[])?;
vm.register_type::<Uri>("std.http.types.Uri", &[])?;
ExternModule::new(
vm,
record! {
// Define the types so that they can be used from gluon
type std::http::types::Body => Body,
type std::http::types::ResponseBody => ResponseBody,
type std::http::types::Uri => Uri,
type std::http::Method => String,
type std::http::StatusCode => u16,
type std::http::Request => Request,
type std::http::Response => Response,
type std::http::Headers => Headers,
type std::http::HttpState => HttpState
},
)
}
macro_rules! uri_binds {
($($id: ident)*) => {
record!{
$(
$id => primitive!(1, concat!("std.http.prim.uri.", stringify!($id)), |u: &Uri| (u.0).$id())
),*
}
}
}
mod std {
pub(crate) mod http {
pub(crate) use crate::std_lib::http as prim;
}
}
pub fn load(vm: &Thread) -> vm::Result<ExternModule> {
ExternModule::new(
vm,
record! {
listen => primitive!(2, async fn std::http::prim::listen),
read_chunk => primitive!(1, async fn std::http::prim::read_chunk),
write_response => primitive!(2, async fn std::http::prim::write_response),
port => primitive!(1, "std.http.prim.uri.port", |u: &Uri| (u.0).port().map(|p| p.as_u16())),
uri => uri_binds!(path host query to_string)
},
)
} | impl fmt::Debug for Body { | random_line_split |
http.rs | use crate::real_std::{
fmt, fs,
path::PathBuf,
pin::Pin,
sync::{Arc, Mutex},
};
use {
collect_mac::collect,
futures::{
future::{self, BoxFuture},
prelude::*,
ready,
task::{self, Poll},
},
http::{
header::{HeaderMap, HeaderName, HeaderValue},
StatusCode,
},
hyper::{body::Bytes, Server},
pin_project_lite::pin_project,
};
use crate::base::types::{ArcType, Type};
use crate::{
vm::{
self,
api::{
generic, Collect, Eff, Function, Getable, OpaqueValue, PushAsRef, Pushable, VmType,
WithVM, IO,
},
thread::{ActiveThread, RootedThread, Thread},
ExternModule, Variants,
},
Error,
};
macro_rules! try_future {
($e:expr) => {
try_future!($e, Box::pin)
};
($e:expr, $f:expr) => {
match $e {
Ok(x) => x,
Err(err) => return $f(::futures::future::err(err.into())),
}
};
}
pub struct HttpEffect;
impl VmType for HttpEffect {
type Type = Self;
fn make_type(vm: &Thread) -> ArcType {
let r = generic::R::make_type(vm);
Type::app(
vm.find_type_info("std.http.types.HttpEffect")
.map(|alias| alias.into_type())
.unwrap_or_else(|_| Type::hole()),
collect![r],
)
}
}
pub type EffectHandler<T> = Eff<HttpEffect, T>;
pub struct Headers(HeaderMap);
impl VmType for Headers {
type Type = Vec<(String, Vec<u8>)>;
fn make_type(vm: &Thread) -> ArcType {
Vec::<(String, Vec<u8>)>::make_type(vm)
}
}
impl<'vm> Pushable<'vm> for Headers {
fn vm_push(self, context: &mut ActiveThread<'vm>) -> vm::Result<()> {
Collect::new(
self.0
.iter()
.map(|(name, value)| (name.as_str(), value.as_bytes())),
)
.vm_push(context)
}
}
impl<'vm, 'value> Getable<'vm, 'value> for Headers {
impl_getable_simple!();
fn from_value(vm: &'vm Thread, value: Variants<'value>) -> Self {
Headers(
Collect::from_value(vm, value)
// TODO Error somehow on invalid headers
.filter_map(|(name, value): (&str, &[u8])| {
match (
HeaderName::from_bytes(name.as_bytes()),
HeaderValue::from_bytes(value),
) {
(Ok(name), Ok(value)) => Some((name, value)),
_ => None,
}
})
.collect(),
)
}
}
// By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon
// threads
#[derive(Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.Body")]
#[gluon(crate_name = "::vm")]
#[gluon_userdata(clone)]
#[gluon_trace(skip)]
// Representation of a http body that is in the prograss of being read
pub struct Body(
Arc<Mutex<Pin<Box<dyn Stream<Item = Result<PushAsRef<Bytes, [u8]>, vm::Error>> + Send>>>>,
);
// Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Body")
}
}
// Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation
// into `&Body` argument
fn read_chunk(body: &Body) -> impl Future<Output = IO<Option<PushAsRef<Bytes, [u8]>>>> {
use futures::future::poll_fn;
let body = body.0.clone();
poll_fn(move |cx| {
let mut stream = body.lock().unwrap();
Poll::Ready(IO::Value(
if let Some(result) = ready!(stream.as_mut().poll_next(cx)) {
match result {
Ok(chunk) => Some(chunk),
Err(err) => return IO::Exception(err.to_string()).into(),
}
} else {
None
},
))
})
}
// A http body that is being written
#[derive(Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.ResponseBody")]
#[gluon(crate_name = "::vm")]
#[gluon_userdata(clone)]
#[gluon_trace(skip)]
pub struct ResponseBody(Arc<Mutex<Option<hyper::body::Sender>>>);
impl fmt::Debug for ResponseBody {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ResponseBody")
}
}
fn write_response(response: &ResponseBody, bytes: &[u8]) -> impl Future<Output = IO<()>> {
use futures::future::poll_fn;
// Turn `bytes´ into a `Bytes` which can be sent to the http body
let mut unsent_chunk = Some(bytes.to_owned().into());
let response = response.0.clone();
poll_fn(move |cx| {
info!("Starting response send");
let mut sender = response.lock().unwrap();
let sender = sender
.as_mut()
.expect("Sender has been dropped while still in use");
let chunk = unsent_chunk
.take()
.expect("Attempt to poll after chunk is sent");
match sender.poll_ready(cx) {
Poll::Pending => {
unsent_chunk = Some(chunk);
return Poll::Pending;
}
Poll::Ready(Ok(_)) => (),
Poll::Ready(Err(err)) => {
info!("Could not send http response {}", err);
return IO::Exception(err.to_string()).into();
}
}
match sender.try_send_data(chunk) {
Ok(()) => Poll::Ready(IO::Value(())),
Err(chunk) => {
unsent_chunk = Some(chunk);
IO::Exception("Could not send http response".into()).into()
}
}
})
}
#[derive(Debug, Userdata, Trace, VmType, Clone)]
#[gluon(vm_type = "std.http.types.Uri")]
#[gluon(crate_name = "::vm")]
#[gluon_trace(skip)]
#[gluon_userdata(clone)]
struct Uri(http::Uri);
// Next we define some record types which are marshalled to and from gluon. These have equivalent
// definitions in http_types.glu
field_decl! { http, method, uri, status, body, request, response, headers }
type Request = record_type! {
method => String,
uri => Uri,
body => Body
};
pub type Response = record_type! {
status => u16,
headers => Headers
};
type HttpState = record_type! {
request => Request,
response => ResponseBody
};
#[derive(Getable, VmType)]
#[gluon(crate_name = "::vm")]
struct Settings {
port: u16,
tls_cert: Option<PathBuf>,
}
fn listen(
settings: Settings,
WithVM { vm, value }: WithVM<OpaqueValue<RootedThread, EffectHandler<Response>>>,
) -> impl Future<Output = IO<()>> + Send + 'static {
let vm = vm.root_thread();
listen_(settings, vm, value).map(IO::from)
}
async fn listen_(
settings: Settings,
thread: RootedThread,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
) -> vm::Result<()> {
let thread = match thread.new_thread() {
Ok(thread) => thread,
Err(err) => return Err(err),
};
impl tower_service::Service<hyper::Request<hyper::Body>> for Handler {
type Response = hyper::Response<hyper::Body>;
type Error = Error;
type Future = BoxFuture<'static, Result<http::Response<hyper::Body>, Error>>;
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, request: hyper::Request<hyper::Body>) -> Self::Future {
let (parts, body) = request.into_parts();
self.handle(parts.method, parts.uri, body)
}
}
let addr = format!("0.0.0.0:{}", settings.port).parse().unwrap();
let listener = Handler::new(&thread, handler);
if let Some(cert_path) = &settings.tls_cert {
let identity = fs::read(cert_path).map_err(|err| {
vm::Error::Message(format!(
"Unable to open certificate `{}`: {}",
cert_path.display(),
err
))
})?;
let identity = native_tls::Identity::from_pkcs12(&identity, "")
.map_err(|err| vm::Error::Message(err.to_string()))?;
let acceptor = tokio_native_tls::TlsAcceptor::from(
native_tls::TlsAcceptor::new(identity)
.map_err(|err| vm::Error::Message(err.to_string()))?,
);
let http = hyper::server::conn::Http::new();
let tcp_listener = tokio::net::TcpListener::bind(&addr)
.map_err(|err| vm::Error::Message(err.to_string()))
.await?;
let incoming = tokio_stream::wrappers::TcpListenerStream::new(tcp_listener)
.err_into()
.and_then(|stream| {
acceptor.accept(stream).map_err(|err| {
info!("Unable to accept TLS connection: {}", err);
Box::new(err) as Box<dyn ::std::error::Error + Send + Sync>
})
});
pin_project! {
struct Acceptor<S> {
#[pin]
incoming: S,
}
}
impl<S, T, E> hyper::server::accept::Accept for Acceptor<S>
where
S: Stream<Item = Result<T, E>>,
{
type Conn = T;
type Error = E;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
self.project().incoming.poll_next(cx)
}
}
return hyper::server::Builder::new(Acceptor { incoming }, http)
.serve(hyper::service::make_service_fn(move |_| {
future::ready(Ok::<_, hyper::Error>(listener.clone()))
}))
.map_err(|err| vm::Error::from(format!("Server error: {}", err)))
.await;
}
Server::bind(&addr)
.serve(hyper::service::make_service_fn(move |_| {
future::ready(Ok::<_, hyper::Error>(listener.clone()))
}))
.map_err(|err| vm::Error::from(format!("Server error: {}", err)))
.map_ok(|_| ())
.await
}
type ListenFn = fn(OpaqueValue<RootedThread, EffectHandler<Response>>, HttpState) -> IO<Response>;
#[derive(Clone)]
pub struct Handler {
handle: Function<RootedThread, ListenFn>,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
}
impl Handler {
pub fn new(
thread: &Thread,
handler: OpaqueValue<RootedThread, EffectHandler<Response>>,
) -> Self { |
pub fn handle<E>(
&mut self,
method: http::Method,
uri: http::Uri,
body: impl Stream<Item = Result<Bytes, E>> + Send + 'static,
) -> BoxFuture<'static, crate::Result<hyper::Response<hyper::Body>>>
where
E: fmt::Display + Send + 'static,
{
let child_thread = try_future!(self.handle.vm().new_thread());
let mut handle = try_future!(self.handle.re_root(child_thread));
let gluon_request = record_no_decl! {
method => method.as_str().to_owned(),
uri => Uri(uri),
// Since `Body` implements `Userdata` it can be directly pushed to gluon
body => Body(Arc::new(Mutex::new(Box::pin(
body
.map_err(|err| vm::Error::Message(format!("{}", err)))
// `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is
// marshalled to `Array Byte` in gluon
.map_ok(PushAsRef::<_, [u8]>::new)
))))
};
let (response_sender, response_body) = hyper::Body::channel();
let response_sender = Arc::new(Mutex::new(Some(response_sender)));
let http_state = record_no_decl! {
request => gluon_request,
response => ResponseBody(response_sender.clone())
};
let handler = self.handler.clone();
Box::pin(async move {
handle
.call_async(handler, http_state)
.map(move |result| match result {
Ok(value) => {
match value {
IO::Value(record_p! { status, headers }) => {
// Drop the sender to so that it the receiver stops waiting for
// more chunks
*response_sender.lock().unwrap() = None;
let status = StatusCode::from_u16(status)
.unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
let mut response = http::Response::builder()
.status(status)
.body(response_body)
.unwrap();
*response.headers_mut() = headers.0;
Ok(response)
}
IO::Exception(err) => {
info!("{}", err);
Ok(http::Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body("".into())
.unwrap())
}
}
}
Err(err) => {
info!("{}", err);
Ok(http::Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body("".into())
.unwrap())
}
})
.await
})
}
}
// To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a
// separate function which is called before loading `http_types`
pub fn load_types(vm: &Thread) -> vm::Result<ExternModule> {
vm.register_type::<Body>("std.http.types.Body", &[])?;
vm.register_type::<ResponseBody>("std.http.types.ResponseBody", &[])?;
vm.register_type::<Uri>("std.http.types.Uri", &[])?;
ExternModule::new(
vm,
record! {
// Define the types so that they can be used from gluon
type std::http::types::Body => Body,
type std::http::types::ResponseBody => ResponseBody,
type std::http::types::Uri => Uri,
type std::http::Method => String,
type std::http::StatusCode => u16,
type std::http::Request => Request,
type std::http::Response => Response,
type std::http::Headers => Headers,
type std::http::HttpState => HttpState
},
)
}
macro_rules! uri_binds {
($($id: ident)*) => {
record!{
$(
$id => primitive!(1, concat!("std.http.prim.uri.", stringify!($id)), |u: &Uri| (u.0).$id())
),*
}
}
}
mod std {
pub(crate) mod http {
pub(crate) use crate::std_lib::http as prim;
}
}
pub fn load(vm: &Thread) -> vm::Result<ExternModule> {
ExternModule::new(
vm,
record! {
listen => primitive!(2, async fn std::http::prim::listen),
read_chunk => primitive!(1, async fn std::http::prim::read_chunk),
write_response => primitive!(2, async fn std::http::prim::write_response),
port => primitive!(1, "std.http.prim.uri.port", |u: &Uri| (u.0).port().map(|p| p.as_u16())),
uri => uri_binds!(path host query to_string)
},
)
}
|
// Retrieve the `handle` function from the http module which we use to evaluate values of type
// `EffectHandler Response`
let handle: Function<RootedThread, ListenFn> = thread
.get_global("std.http.handle")
.unwrap_or_else(|err| panic!("{}", err));
Self { handle, handler }
}
| identifier_body |
arc-rw-read-mode-shouldnt-escape.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod extra;
use extra::arc;
fn main() | {
let x = ~arc::RWArc::new(1);
let mut y = None;
do x.write_downgrade |write_mode| {
y = Some(x.downgrade(write_mode));
//~^ ERROR cannot infer an appropriate lifetime
}
y.unwrap();
// Adding this line causes a method unification failure instead
// do (&option::unwrap(y)).read |state| { assert!(*state == 1); }
} | identifier_body |
|
arc-rw-read-mode-shouldnt-escape.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod extra;
use extra::arc;
fn main() {
let x = ~arc::RWArc::new(1);
let mut y = None;
do x.write_downgrade |write_mode| {
y = Some(x.downgrade(write_mode));
//~^ ERROR cannot infer an appropriate lifetime
}
y.unwrap();
// Adding this line causes a method unification failure instead
// do (&option::unwrap(y)).read |state| { assert!(*state == 1); }
} | // file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | random_line_split |
arc-rw-read-mode-shouldnt-escape.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod extra;
use extra::arc;
fn | () {
let x = ~arc::RWArc::new(1);
let mut y = None;
do x.write_downgrade |write_mode| {
y = Some(x.downgrade(write_mode));
//~^ ERROR cannot infer an appropriate lifetime
}
y.unwrap();
// Adding this line causes a method unification failure instead
// do (&option::unwrap(y)).read |state| { assert!(*state == 1); }
}
| main | identifier_name |
uz-UZ.js | export default {
el: {
colorpicker: {
confirm: 'Qabul qilish',
clear: 'Tozalash'
},
datepicker: {
now: 'Hozir',
today: 'Bugun',
cancel: 'Bekor qilish',
clear: 'Tozalash',
confirm: 'Qabul qilish',
selectDate: 'Kunni tanlash',
selectTime: 'Soatni tanlash',
startDate: 'Boshlanish sanasi',
startTime: 'Boshlanish vaqti',
endDate: 'Tugash sanasi',
endTime: 'Tugash vaqti',
prevYear: 'Oʻtgan yil',
nextYear: 'Kelgusi yil',
prevMonth: 'Oʻtgan oy',
nextMonth: 'Kelgusi oy',
year: 'Yil',
month1: 'Yanvar',
month2: 'Fevral',
month3: 'Mart',
month4: 'Aprel',
month5: 'May',
month6: 'Iyun',
month7: 'Iyul',
month8: 'Avgust',
month9: 'Sentabr',
month10: 'Oktabr',
month11: 'Noyabr',
month12: 'Dekabr',
week: 'Hafta',
weeks: {
sun: 'Yak',
mon: 'Dush',
tue: 'Sesh',
wed: 'Chor',
thu: 'Pay', | sat: 'Shan'
},
months: {
jan: 'Yan',
feb: 'Fev',
mar: 'Mar',
apr: 'Apr',
may: 'May',
jun: 'Iyun',
jul: 'Iyul',
aug: 'Avg',
sep: 'Sen',
oct: 'Okt',
nov: 'Noy',
dec: 'Dek'
}
},
select: {
loading: 'Yuklanmoqda',
noMatch: 'Mos maʼlumot yoʻq',
noData: 'Maʼlumot yoʻq',
placeholder: 'Tanladizngiz'
},
cascader: {
noMatch: 'Mos maʼlumot topilmadi',
loading: 'Yuklanmoqda',
placeholder: 'Tanlash',
noData: 'Maʼlumot yoʻq'
},
pagination: {
goto: 'Oʻtish',
pagesize: '/sahifa',
total: 'Barchasi {total} ta',
pageClassifier: ''
},
messagebox: {
title: 'Xabar',
confirm: 'Qabul qilish',
cancel: 'Bekor qilish',
error: 'Xatolik'
},
upload: {
deleteTip: 'Oʻchirish tugmasini bosib oʻchiring',
delete: 'Oʻchirish',
preview: 'Oldin koʻrish',
continue: 'Davom qilish'
},
table: {
emptyText: 'Boʻsh',
confirmFilter: 'Qabul qilish',
resetFilter: 'Oldingi holatga qaytarish',
clearFilter: 'Jami',
sumText: 'Summasi'
},
tree: {
emptyText: 'Maʼlumot yoʻq'
},
transfer: {
noMatch: 'Mos maʼlumot topilmadi',
noData: 'Maʼlumot yoʻq',
titles: ['1-jadval', '2-jadval'],
filterPlaceholder: 'Kalit soʻzni kiriting',
noCheckedFormat: '{total} ta element',
hasCheckedFormat: '{checked}/{total} ta belgilandi'
},
image: {
error: 'Xatolik'
},
pageHeader: {
title: 'Orqaga'
},
popconfirm: {
confirmButtonText: 'Yes', // to be translated
cancelButtonText: 'No' // to be translated
},
empty: {
description: 'Boʻsh'
}
}
}; | fri: 'Jum', | random_line_split |
__init__.py | # coding=utf-8
"""
Profiler utility for python | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import os
import cProfile
from pstats import Stats
from cProfile import Profile
def start_profile():
"""
start_profile
@rtype: Profile
"""
pr = Profile()
pr.enable()
return pr
def console(x):
"""
@type x: str, unicode
@return: None
"""
print("\033[33m$", x, "\033[0m")
def end_profile(pr, items=20, printstats=False, returnvalue=False):
"""
@type pr: str, unicode
@type items: int
@type printstats: bool
@type returnvalue: bool
@return: None
"""
p = Stats(pr)
if returnvalue is True:
return p.get_print_list([items])
p.strip_dirs()
console("total time")
p.sort_stats('time')
if items is None:
p.print_stats()
else:
p.print_stats(items)
if printstats:
console("cumulative time")
p.sort_stats('cumtime')
if items is None:
p.print_stats()
else:
p.print_stats(items)
p.sort_stats('calls')
if items is None:
p.print_stats()
else:
p.print_stats(items)
def runsnake_profile_method(method, cglobals, clocals):
"""
@type method: str, unicode
@type cglobals: dict
@type clocals: dict
@return: None
"""
cProfile.runctx(method + "()", globals=cglobals, locals=clocals, filename=method + ".profile")
os.system("python /usr/local/lib/python2.7/site-packages/runsnakerun/runsnake.py " + method + ".profile")
os.system("rm " + method + ".profile")
def graph_profile_program(sourcefile):
"""
@type sourcefile: str, unicode
@return: None
"""
if 0 != os.system("python -m cProfile -o output.pstats ./" + sourcefile):
print("\033[31mprofile error:\033[0m")
print("\033[33m", "pip install graphviz", "\033[0m")
print("\033[33m", "pip install gprof2dot", "\033[0m")
elif 0 != os.system("gprof2dot -f pstats output.pstats | dot -Tpng -o " + sourcefile.replace(".py", ".png")):
print("\033[31mgprof2dot error:\033[0m")
print("\033[33m", "pip install graphviz", "\033[0m")
print("\033[33m", "pip install gprof2dot", "\033[0m")
print("\033[33m", "gprof2dot is in path? (/usr/local/bin/gprof2dot)", "\033[0m")
else:
if not os.path.exists("./" + sourcefile.replace(".py", ".png")):
print("\033[31mcannot find", sourcefile.replace(".py", ".png"), "\033[0m")
else:
os.system("open " + sourcefile.replace(".py", ".png"))
if os.remove("output.pstats"):
os.remove("output.pstats") | Erik de Jonge
[email protected]
license: gpl2
""" | random_line_split |
__init__.py | # coding=utf-8
"""
Profiler utility for python
Erik de Jonge
[email protected]
license: gpl2
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import os
import cProfile
from pstats import Stats
from cProfile import Profile
def start_profile():
"""
start_profile
@rtype: Profile
"""
pr = Profile()
pr.enable()
return pr
def console(x):
"""
@type x: str, unicode
@return: None
"""
print("\033[33m$", x, "\033[0m")
def | (pr, items=20, printstats=False, returnvalue=False):
"""
@type pr: str, unicode
@type items: int
@type printstats: bool
@type returnvalue: bool
@return: None
"""
p = Stats(pr)
if returnvalue is True:
return p.get_print_list([items])
p.strip_dirs()
console("total time")
p.sort_stats('time')
if items is None:
p.print_stats()
else:
p.print_stats(items)
if printstats:
console("cumulative time")
p.sort_stats('cumtime')
if items is None:
p.print_stats()
else:
p.print_stats(items)
p.sort_stats('calls')
if items is None:
p.print_stats()
else:
p.print_stats(items)
def runsnake_profile_method(method, cglobals, clocals):
"""
@type method: str, unicode
@type cglobals: dict
@type clocals: dict
@return: None
"""
cProfile.runctx(method + "()", globals=cglobals, locals=clocals, filename=method + ".profile")
os.system("python /usr/local/lib/python2.7/site-packages/runsnakerun/runsnake.py " + method + ".profile")
os.system("rm " + method + ".profile")
def graph_profile_program(sourcefile):
"""
@type sourcefile: str, unicode
@return: None
"""
if 0 != os.system("python -m cProfile -o output.pstats ./" + sourcefile):
print("\033[31mprofile error:\033[0m")
print("\033[33m", "pip install graphviz", "\033[0m")
print("\033[33m", "pip install gprof2dot", "\033[0m")
elif 0 != os.system("gprof2dot -f pstats output.pstats | dot -Tpng -o " + sourcefile.replace(".py", ".png")):
print("\033[31mgprof2dot error:\033[0m")
print("\033[33m", "pip install graphviz", "\033[0m")
print("\033[33m", "pip install gprof2dot", "\033[0m")
print("\033[33m", "gprof2dot is in path? (/usr/local/bin/gprof2dot)", "\033[0m")
else:
if not os.path.exists("./" + sourcefile.replace(".py", ".png")):
print("\033[31mcannot find", sourcefile.replace(".py", ".png"), "\033[0m")
else:
os.system("open " + sourcefile.replace(".py", ".png"))
if os.remove("output.pstats"):
os.remove("output.pstats")
| end_profile | identifier_name |
__init__.py | # coding=utf-8
"""
Profiler utility for python
Erik de Jonge
[email protected]
license: gpl2
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import os
import cProfile
from pstats import Stats
from cProfile import Profile
def start_profile():
"""
start_profile
@rtype: Profile
"""
pr = Profile()
pr.enable()
return pr
def console(x):
"""
@type x: str, unicode
@return: None
"""
print("\033[33m$", x, "\033[0m")
def end_profile(pr, items=20, printstats=False, returnvalue=False):
"""
@type pr: str, unicode
@type items: int
@type printstats: bool
@type returnvalue: bool
@return: None
"""
p = Stats(pr)
if returnvalue is True:
return p.get_print_list([items])
p.strip_dirs()
console("total time")
p.sort_stats('time')
if items is None:
p.print_stats()
else:
p.print_stats(items)
if printstats:
console("cumulative time")
p.sort_stats('cumtime')
if items is None:
p.print_stats()
else:
p.print_stats(items)
p.sort_stats('calls')
if items is None:
p.print_stats()
else:
p.print_stats(items)
def runsnake_profile_method(method, cglobals, clocals):
"""
@type method: str, unicode
@type cglobals: dict
@type clocals: dict
@return: None
"""
cProfile.runctx(method + "()", globals=cglobals, locals=clocals, filename=method + ".profile")
os.system("python /usr/local/lib/python2.7/site-packages/runsnakerun/runsnake.py " + method + ".profile")
os.system("rm " + method + ".profile")
def graph_profile_program(sourcefile):
"""
@type sourcefile: str, unicode
@return: None
"""
if 0 != os.system("python -m cProfile -o output.pstats ./" + sourcefile):
print("\033[31mprofile error:\033[0m")
print("\033[33m", "pip install graphviz", "\033[0m")
print("\033[33m", "pip install gprof2dot", "\033[0m")
elif 0 != os.system("gprof2dot -f pstats output.pstats | dot -Tpng -o " + sourcefile.replace(".py", ".png")):
|
else:
if not os.path.exists("./" + sourcefile.replace(".py", ".png")):
print("\033[31mcannot find", sourcefile.replace(".py", ".png"), "\033[0m")
else:
os.system("open " + sourcefile.replace(".py", ".png"))
if os.remove("output.pstats"):
os.remove("output.pstats")
| print("\033[31mgprof2dot error:\033[0m")
print("\033[33m", "pip install graphviz", "\033[0m")
print("\033[33m", "pip install gprof2dot", "\033[0m")
print("\033[33m", "gprof2dot is in path? (/usr/local/bin/gprof2dot)", "\033[0m") | conditional_block |
__init__.py | # coding=utf-8
"""
Profiler utility for python
Erik de Jonge
[email protected]
license: gpl2
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import os
import cProfile
from pstats import Stats
from cProfile import Profile
def start_profile():
"""
start_profile
@rtype: Profile
"""
pr = Profile()
pr.enable()
return pr
def console(x):
"""
@type x: str, unicode
@return: None
"""
print("\033[33m$", x, "\033[0m")
def end_profile(pr, items=20, printstats=False, returnvalue=False):
"""
@type pr: str, unicode
@type items: int
@type printstats: bool
@type returnvalue: bool
@return: None
"""
p = Stats(pr)
if returnvalue is True:
return p.get_print_list([items])
p.strip_dirs()
console("total time")
p.sort_stats('time')
if items is None:
p.print_stats()
else:
p.print_stats(items)
if printstats:
console("cumulative time")
p.sort_stats('cumtime')
if items is None:
p.print_stats()
else:
p.print_stats(items)
p.sort_stats('calls')
if items is None:
p.print_stats()
else:
p.print_stats(items)
def runsnake_profile_method(method, cglobals, clocals):
"""
@type method: str, unicode
@type cglobals: dict
@type clocals: dict
@return: None
"""
cProfile.runctx(method + "()", globals=cglobals, locals=clocals, filename=method + ".profile")
os.system("python /usr/local/lib/python2.7/site-packages/runsnakerun/runsnake.py " + method + ".profile")
os.system("rm " + method + ".profile")
def graph_profile_program(sourcefile):
| """
@type sourcefile: str, unicode
@return: None
"""
if 0 != os.system("python -m cProfile -o output.pstats ./" + sourcefile):
print("\033[31mprofile error:\033[0m")
print("\033[33m", "pip install graphviz", "\033[0m")
print("\033[33m", "pip install gprof2dot", "\033[0m")
elif 0 != os.system("gprof2dot -f pstats output.pstats | dot -Tpng -o " + sourcefile.replace(".py", ".png")):
print("\033[31mgprof2dot error:\033[0m")
print("\033[33m", "pip install graphviz", "\033[0m")
print("\033[33m", "pip install gprof2dot", "\033[0m")
print("\033[33m", "gprof2dot is in path? (/usr/local/bin/gprof2dot)", "\033[0m")
else:
if not os.path.exists("./" + sourcefile.replace(".py", ".png")):
print("\033[31mcannot find", sourcefile.replace(".py", ".png"), "\033[0m")
else:
os.system("open " + sourcefile.replace(".py", ".png"))
if os.remove("output.pstats"):
os.remove("output.pstats") | identifier_body |
|
lib.rs | #![feature(if_let)]
use std::os;
use std::io::Command;
use std::io::process::InheritFd;
use std::default::Default;
/// Extra configuration to pass to gcc.
pub struct Config {
/// Directories where gcc will look for header files.
pub include_directories: Vec<Path>,
/// Additional definitions (`-DKEY` or `-DKEY=VALUE`).
pub definitions: Vec<(String, Option<String>)>,
/// Additional object files to link into the final archive
pub objects: Vec<Path>,
}
impl Default for Config {
fn default() -> Config {
Config {
include_directories: Vec::new(),
definitions: Vec::new(),
objects: Vec::new(),
}
}
}
/// Compile a library from the given set of input C files.
///
/// This will simply compile all files into object files and then assemble them
/// into the output. This will read the standard environment variables to detect
/// cross compilations and such.
///
/// # Example
///
/// ```no_run
/// use std::default::Default;
/// gcc::compile_library("libfoo.a", &Default::default(), &[
/// "foo.c",
/// "bar.c",
/// ]);
/// ```
pub fn compile_library(output: &str, config: &Config, files: &[&str]) {
assert!(output.starts_with("lib"));
assert!(output.ends_with(".a"));
let target = os::getenv("TARGET").unwrap();
let opt_level = os::getenv("OPT_LEVEL").unwrap();
let mut cmd = Command::new(gcc(target.as_slice()));
cmd.arg(format!("-O{}", opt_level));
cmd.arg("-c");
cmd.arg("-ffunction-sections").arg("-fdata-sections");
cmd.args(cflags().as_slice());
if target.as_slice().contains("-ios") {
cmd.args(ios_flags(target.as_slice()).as_slice());
} else {
if target.as_slice().contains("i686") {
cmd.arg("-m32");
} else if target.as_slice().contains("x86_64") {
cmd.arg("-m64");
}
if !target.as_slice().contains("i686") {
cmd.arg("-fPIC");
}
}
for directory in config.include_directories.iter() {
cmd.arg("-I").arg(directory);
}
for &(ref key, ref value) in config.definitions.iter() {
if let &Some(ref value) = value {
cmd.arg(format!("-D{}={}", key, value));
} else {
cmd.arg(format!("-D{}", key));
}
}
let src = Path::new(os::getenv("CARGO_MANIFEST_DIR").unwrap());
let dst = Path::new(os::getenv("OUT_DIR").unwrap());
let mut objects = Vec::new();
for file in files.iter() {
let obj = dst.join(*file).with_extension("o");
std::io::fs::mkdir_recursive(&obj.dir_path(), std::io::USER_RWX).unwrap();
run(cmd.clone().arg(src.join(*file)).arg("-o").arg(&obj));
objects.push(obj);
}
run(Command::new(ar(target.as_slice())).arg("crus")
.arg(dst.join(output))
.args(objects.as_slice())
.args(config.objects.as_slice()));
println!("cargo:rustc-flags=-L {} -l {}:static",
dst.display(), output.slice(3, output.len() - 2));
}
fn run(cmd: &mut Command) {
println!("running: {}", cmd);
assert!(cmd.stdout(InheritFd(1))
.stderr(InheritFd(2))
.status()
.unwrap()
.success());
}
fn gcc(target: &str) -> String {
let is_android = target.find_str("android").is_some();
os::getenv("CC").unwrap_or(if cfg!(windows) {
"gcc".to_string()
} else if is_android {
format!("{}-gcc", target)
} else {
"cc".to_string()
})
} | os::getenv("AR").unwrap_or(if is_android {
format!("{}-ar", target)
} else {
"ar".to_string()
})
}
fn cflags() -> Vec<String> {
os::getenv("CFLAGS").unwrap_or(String::new())
.as_slice().words().map(|s| s.to_string())
.collect()
}
fn ios_flags(target: &str) -> Vec<String> {
let mut is_device_arch = false;
let mut res = Vec::new();
if target.starts_with("arm-") {
res.push("-arch");
res.push("armv7");
is_device_arch = true;
} else if target.starts_with("arm64-") {
res.push("-arch");
res.push("arm64");
is_device_arch = true;
} else if target.starts_with("i386-") {
res.push("-m32");
} else if target.starts_with("x86_64-") {
res.push("-m64");
}
let sdk = if is_device_arch {"iphoneos"} else {"iphonesimulator"};
println!("Detecting iOS SDK path for {}", sdk);
let sdk_path = Command::new("xcrun")
.arg("--show-sdk-path")
.arg("--sdk")
.arg(sdk)
.stderr(InheritFd(2))
.output()
.unwrap()
.output;
let sdk_path = String::from_utf8(sdk_path).unwrap();
res.push("-isysroot");
res.push(sdk_path.as_slice().trim());
res.iter().map(|s| s.to_string()).collect::<Vec<_>>()
} |
fn ar(target: &str) -> String {
let is_android = target.find_str("android").is_some();
| random_line_split |
lib.rs | #![feature(if_let)]
use std::os;
use std::io::Command;
use std::io::process::InheritFd;
use std::default::Default;
/// Extra configuration to pass to gcc.
pub struct Config {
/// Directories where gcc will look for header files.
pub include_directories: Vec<Path>,
/// Additional definitions (`-DKEY` or `-DKEY=VALUE`).
pub definitions: Vec<(String, Option<String>)>,
/// Additional object files to link into the final archive
pub objects: Vec<Path>,
}
impl Default for Config {
fn default() -> Config {
Config {
include_directories: Vec::new(),
definitions: Vec::new(),
objects: Vec::new(),
}
}
}
/// Compile a library from the given set of input C files.
///
/// This will simply compile all files into object files and then assemble them
/// into the output. This will read the standard environment variables to detect
/// cross compilations and such.
///
/// # Example
///
/// ```no_run
/// use std::default::Default;
/// gcc::compile_library("libfoo.a", &Default::default(), &[
/// "foo.c",
/// "bar.c",
/// ]);
/// ```
pub fn compile_library(output: &str, config: &Config, files: &[&str]) {
assert!(output.starts_with("lib"));
assert!(output.ends_with(".a"));
let target = os::getenv("TARGET").unwrap();
let opt_level = os::getenv("OPT_LEVEL").unwrap();
let mut cmd = Command::new(gcc(target.as_slice()));
cmd.arg(format!("-O{}", opt_level));
cmd.arg("-c");
cmd.arg("-ffunction-sections").arg("-fdata-sections");
cmd.args(cflags().as_slice());
if target.as_slice().contains("-ios") {
cmd.args(ios_flags(target.as_slice()).as_slice());
} else {
if target.as_slice().contains("i686") {
cmd.arg("-m32");
} else if target.as_slice().contains("x86_64") {
cmd.arg("-m64");
}
if !target.as_slice().contains("i686") {
cmd.arg("-fPIC");
}
}
for directory in config.include_directories.iter() {
cmd.arg("-I").arg(directory);
}
for &(ref key, ref value) in config.definitions.iter() {
if let &Some(ref value) = value {
cmd.arg(format!("-D{}={}", key, value));
} else {
cmd.arg(format!("-D{}", key));
}
}
let src = Path::new(os::getenv("CARGO_MANIFEST_DIR").unwrap());
let dst = Path::new(os::getenv("OUT_DIR").unwrap());
let mut objects = Vec::new();
for file in files.iter() {
let obj = dst.join(*file).with_extension("o");
std::io::fs::mkdir_recursive(&obj.dir_path(), std::io::USER_RWX).unwrap();
run(cmd.clone().arg(src.join(*file)).arg("-o").arg(&obj));
objects.push(obj);
}
run(Command::new(ar(target.as_slice())).arg("crus")
.arg(dst.join(output))
.args(objects.as_slice())
.args(config.objects.as_slice()));
println!("cargo:rustc-flags=-L {} -l {}:static",
dst.display(), output.slice(3, output.len() - 2));
}
fn run(cmd: &mut Command) {
println!("running: {}", cmd);
assert!(cmd.stdout(InheritFd(1))
.stderr(InheritFd(2))
.status()
.unwrap()
.success());
}
fn gcc(target: &str) -> String {
let is_android = target.find_str("android").is_some();
os::getenv("CC").unwrap_or(if cfg!(windows) | else if is_android {
format!("{}-gcc", target)
} else {
"cc".to_string()
})
}
fn ar(target: &str) -> String {
let is_android = target.find_str("android").is_some();
os::getenv("AR").unwrap_or(if is_android {
format!("{}-ar", target)
} else {
"ar".to_string()
})
}
fn cflags() -> Vec<String> {
os::getenv("CFLAGS").unwrap_or(String::new())
.as_slice().words().map(|s| s.to_string())
.collect()
}
fn ios_flags(target: &str) -> Vec<String> {
let mut is_device_arch = false;
let mut res = Vec::new();
if target.starts_with("arm-") {
res.push("-arch");
res.push("armv7");
is_device_arch = true;
} else if target.starts_with("arm64-") {
res.push("-arch");
res.push("arm64");
is_device_arch = true;
} else if target.starts_with("i386-") {
res.push("-m32");
} else if target.starts_with("x86_64-") {
res.push("-m64");
}
let sdk = if is_device_arch {"iphoneos"} else {"iphonesimulator"};
println!("Detecting iOS SDK path for {}", sdk);
let sdk_path = Command::new("xcrun")
.arg("--show-sdk-path")
.arg("--sdk")
.arg(sdk)
.stderr(InheritFd(2))
.output()
.unwrap()
.output;
let sdk_path = String::from_utf8(sdk_path).unwrap();
res.push("-isysroot");
res.push(sdk_path.as_slice().trim());
res.iter().map(|s| s.to_string()).collect::<Vec<_>>()
}
| {
"gcc".to_string()
} | conditional_block |
lib.rs | #![feature(if_let)]
use std::os;
use std::io::Command;
use std::io::process::InheritFd;
use std::default::Default;
/// Extra configuration to pass to gcc.
pub struct Config {
/// Directories where gcc will look for header files.
pub include_directories: Vec<Path>,
/// Additional definitions (`-DKEY` or `-DKEY=VALUE`).
pub definitions: Vec<(String, Option<String>)>,
/// Additional object files to link into the final archive
pub objects: Vec<Path>,
}
impl Default for Config {
fn default() -> Config {
Config {
include_directories: Vec::new(),
definitions: Vec::new(),
objects: Vec::new(),
}
}
}
/// Compile a library from the given set of input C files.
///
/// This will simply compile all files into object files and then assemble them
/// into the output. This will read the standard environment variables to detect
/// cross compilations and such.
///
/// # Example
///
/// ```no_run
/// use std::default::Default;
/// gcc::compile_library("libfoo.a", &Default::default(), &[
/// "foo.c",
/// "bar.c",
/// ]);
/// ```
pub fn compile_library(output: &str, config: &Config, files: &[&str]) {
assert!(output.starts_with("lib"));
assert!(output.ends_with(".a"));
let target = os::getenv("TARGET").unwrap();
let opt_level = os::getenv("OPT_LEVEL").unwrap();
let mut cmd = Command::new(gcc(target.as_slice()));
cmd.arg(format!("-O{}", opt_level));
cmd.arg("-c");
cmd.arg("-ffunction-sections").arg("-fdata-sections");
cmd.args(cflags().as_slice());
if target.as_slice().contains("-ios") {
cmd.args(ios_flags(target.as_slice()).as_slice());
} else {
if target.as_slice().contains("i686") {
cmd.arg("-m32");
} else if target.as_slice().contains("x86_64") {
cmd.arg("-m64");
}
if !target.as_slice().contains("i686") {
cmd.arg("-fPIC");
}
}
for directory in config.include_directories.iter() {
cmd.arg("-I").arg(directory);
}
for &(ref key, ref value) in config.definitions.iter() {
if let &Some(ref value) = value {
cmd.arg(format!("-D{}={}", key, value));
} else {
cmd.arg(format!("-D{}", key));
}
}
let src = Path::new(os::getenv("CARGO_MANIFEST_DIR").unwrap());
let dst = Path::new(os::getenv("OUT_DIR").unwrap());
let mut objects = Vec::new();
for file in files.iter() {
let obj = dst.join(*file).with_extension("o");
std::io::fs::mkdir_recursive(&obj.dir_path(), std::io::USER_RWX).unwrap();
run(cmd.clone().arg(src.join(*file)).arg("-o").arg(&obj));
objects.push(obj);
}
run(Command::new(ar(target.as_slice())).arg("crus")
.arg(dst.join(output))
.args(objects.as_slice())
.args(config.objects.as_slice()));
println!("cargo:rustc-flags=-L {} -l {}:static",
dst.display(), output.slice(3, output.len() - 2));
}
fn run(cmd: &mut Command) {
println!("running: {}", cmd);
assert!(cmd.stdout(InheritFd(1))
.stderr(InheritFd(2))
.status()
.unwrap()
.success());
}
fn gcc(target: &str) -> String {
let is_android = target.find_str("android").is_some();
os::getenv("CC").unwrap_or(if cfg!(windows) {
"gcc".to_string()
} else if is_android {
format!("{}-gcc", target)
} else {
"cc".to_string()
})
}
fn ar(target: &str) -> String |
fn cflags() -> Vec<String> {
os::getenv("CFLAGS").unwrap_or(String::new())
.as_slice().words().map(|s| s.to_string())
.collect()
}
fn ios_flags(target: &str) -> Vec<String> {
let mut is_device_arch = false;
let mut res = Vec::new();
if target.starts_with("arm-") {
res.push("-arch");
res.push("armv7");
is_device_arch = true;
} else if target.starts_with("arm64-") {
res.push("-arch");
res.push("arm64");
is_device_arch = true;
} else if target.starts_with("i386-") {
res.push("-m32");
} else if target.starts_with("x86_64-") {
res.push("-m64");
}
let sdk = if is_device_arch {"iphoneos"} else {"iphonesimulator"};
println!("Detecting iOS SDK path for {}", sdk);
let sdk_path = Command::new("xcrun")
.arg("--show-sdk-path")
.arg("--sdk")
.arg(sdk)
.stderr(InheritFd(2))
.output()
.unwrap()
.output;
let sdk_path = String::from_utf8(sdk_path).unwrap();
res.push("-isysroot");
res.push(sdk_path.as_slice().trim());
res.iter().map(|s| s.to_string()).collect::<Vec<_>>()
}
| {
let is_android = target.find_str("android").is_some();
os::getenv("AR").unwrap_or(if is_android {
format!("{}-ar", target)
} else {
"ar".to_string()
})
} | identifier_body |
lib.rs | #![feature(if_let)]
use std::os;
use std::io::Command;
use std::io::process::InheritFd;
use std::default::Default;
/// Extra configuration to pass to gcc.
pub struct | {
/// Directories where gcc will look for header files.
pub include_directories: Vec<Path>,
/// Additional definitions (`-DKEY` or `-DKEY=VALUE`).
pub definitions: Vec<(String, Option<String>)>,
/// Additional object files to link into the final archive
pub objects: Vec<Path>,
}
impl Default for Config {
fn default() -> Config {
Config {
include_directories: Vec::new(),
definitions: Vec::new(),
objects: Vec::new(),
}
}
}
/// Compile a library from the given set of input C files.
///
/// This will simply compile all files into object files and then assemble them
/// into the output. This will read the standard environment variables to detect
/// cross compilations and such.
///
/// # Example
///
/// ```no_run
/// use std::default::Default;
/// gcc::compile_library("libfoo.a", &Default::default(), &[
/// "foo.c",
/// "bar.c",
/// ]);
/// ```
pub fn compile_library(output: &str, config: &Config, files: &[&str]) {
assert!(output.starts_with("lib"));
assert!(output.ends_with(".a"));
let target = os::getenv("TARGET").unwrap();
let opt_level = os::getenv("OPT_LEVEL").unwrap();
let mut cmd = Command::new(gcc(target.as_slice()));
cmd.arg(format!("-O{}", opt_level));
cmd.arg("-c");
cmd.arg("-ffunction-sections").arg("-fdata-sections");
cmd.args(cflags().as_slice());
if target.as_slice().contains("-ios") {
cmd.args(ios_flags(target.as_slice()).as_slice());
} else {
if target.as_slice().contains("i686") {
cmd.arg("-m32");
} else if target.as_slice().contains("x86_64") {
cmd.arg("-m64");
}
if !target.as_slice().contains("i686") {
cmd.arg("-fPIC");
}
}
for directory in config.include_directories.iter() {
cmd.arg("-I").arg(directory);
}
for &(ref key, ref value) in config.definitions.iter() {
if let &Some(ref value) = value {
cmd.arg(format!("-D{}={}", key, value));
} else {
cmd.arg(format!("-D{}", key));
}
}
let src = Path::new(os::getenv("CARGO_MANIFEST_DIR").unwrap());
let dst = Path::new(os::getenv("OUT_DIR").unwrap());
let mut objects = Vec::new();
for file in files.iter() {
let obj = dst.join(*file).with_extension("o");
std::io::fs::mkdir_recursive(&obj.dir_path(), std::io::USER_RWX).unwrap();
run(cmd.clone().arg(src.join(*file)).arg("-o").arg(&obj));
objects.push(obj);
}
run(Command::new(ar(target.as_slice())).arg("crus")
.arg(dst.join(output))
.args(objects.as_slice())
.args(config.objects.as_slice()));
println!("cargo:rustc-flags=-L {} -l {}:static",
dst.display(), output.slice(3, output.len() - 2));
}
fn run(cmd: &mut Command) {
println!("running: {}", cmd);
assert!(cmd.stdout(InheritFd(1))
.stderr(InheritFd(2))
.status()
.unwrap()
.success());
}
fn gcc(target: &str) -> String {
let is_android = target.find_str("android").is_some();
os::getenv("CC").unwrap_or(if cfg!(windows) {
"gcc".to_string()
} else if is_android {
format!("{}-gcc", target)
} else {
"cc".to_string()
})
}
fn ar(target: &str) -> String {
let is_android = target.find_str("android").is_some();
os::getenv("AR").unwrap_or(if is_android {
format!("{}-ar", target)
} else {
"ar".to_string()
})
}
fn cflags() -> Vec<String> {
os::getenv("CFLAGS").unwrap_or(String::new())
.as_slice().words().map(|s| s.to_string())
.collect()
}
fn ios_flags(target: &str) -> Vec<String> {
let mut is_device_arch = false;
let mut res = Vec::new();
if target.starts_with("arm-") {
res.push("-arch");
res.push("armv7");
is_device_arch = true;
} else if target.starts_with("arm64-") {
res.push("-arch");
res.push("arm64");
is_device_arch = true;
} else if target.starts_with("i386-") {
res.push("-m32");
} else if target.starts_with("x86_64-") {
res.push("-m64");
}
let sdk = if is_device_arch {"iphoneos"} else {"iphonesimulator"};
println!("Detecting iOS SDK path for {}", sdk);
let sdk_path = Command::new("xcrun")
.arg("--show-sdk-path")
.arg("--sdk")
.arg(sdk)
.stderr(InheritFd(2))
.output()
.unwrap()
.output;
let sdk_path = String::from_utf8(sdk_path).unwrap();
res.push("-isysroot");
res.push(sdk_path.as_slice().trim());
res.iter().map(|s| s.to_string()).collect::<Vec<_>>()
}
| Config | identifier_name |
converge.c.ts | import * as dts from 'dts-element';
import { max_curry_level } from './$curriedFunctions';
const min_input_count = 1;
const max_input_count = 3;
const min_function_count = 1;
const max_function_count = max_curry_level;
const generic_return = 'R';
const after_generics = [...new Array(max_function_count)].map(
(_, index) => `U${index + 1}`,
);
const input_generics = [...new Array(max_input_count)].map(
(_, index) => `T${index + 1}`,
);
const parameters = [
...new Array(Math.max(max_function_count, max_input_count)),
].map((_, index) => `v${index + 1}`);
const import_curried_functions = [];
const declarations = [];
for (
let input_count = min_input_count;
input_count <= max_input_count;
input_count++
) {
const curried_function_name = `CurriedFunction${input_count}`;
for (
let function_count = min_function_count;
function_count <= max_function_count;
function_count++
) {
const current_input_generics = input_generics.slice(0, input_count);
const current_after_generics = after_generics.slice(0, function_count);
const current_generics = [
...current_input_generics,
...current_after_generics,
generic_return,
];
declarations.push(`
function $i${input_count}f${function_count}<${current_generics.join(
',',
)}>(
after: (${current_after_generics
.map((generic, index) => `${parameters[index]}: ${generic}`)
.join(',')}) => ${generic_return},
fns: [${current_after_generics
.map( | (input_generic, index) => `
${parameters[index]}: ${input_generic}
`,
)
.join(',')}) => ${after_generic}
`,
)
.join(',')}]
): ${curried_function_name}<${[
...current_input_generics,
generic_return,
].join(',')}>;
`);
}
import_curried_functions.push(curried_function_name);
}
export default dts.parse(`
import {${import_curried_functions.join(',')}} from './$curriedFunctions';
import {List, Variadic} from './$types';
${declarations.join('\n')}
function $variadic<${generic_return}>(after: Variadic<${generic_return}>, fns: List<Variadic<any>>): Variadic<${generic_return}>;
`).members; | after_generic => `
(${current_input_generics
.map( | random_line_split |
converge.c.ts | import * as dts from 'dts-element';
import { max_curry_level } from './$curriedFunctions';
const min_input_count = 1;
const max_input_count = 3;
const min_function_count = 1;
const max_function_count = max_curry_level;
const generic_return = 'R';
const after_generics = [...new Array(max_function_count)].map(
(_, index) => `U${index + 1}`,
);
const input_generics = [...new Array(max_input_count)].map(
(_, index) => `T${index + 1}`,
);
const parameters = [
...new Array(Math.max(max_function_count, max_input_count)),
].map((_, index) => `v${index + 1}`);
const import_curried_functions = [];
const declarations = [];
for (
let input_count = min_input_count;
input_count <= max_input_count;
input_count++
) |
export default dts.parse(`
import {${import_curried_functions.join(',')}} from './$curriedFunctions';
import {List, Variadic} from './$types';
${declarations.join('\n')}
function $variadic<${generic_return}>(after: Variadic<${generic_return}>, fns: List<Variadic<any>>): Variadic<${generic_return}>;
`).members;
| {
const curried_function_name = `CurriedFunction${input_count}`;
for (
let function_count = min_function_count;
function_count <= max_function_count;
function_count++
) {
const current_input_generics = input_generics.slice(0, input_count);
const current_after_generics = after_generics.slice(0, function_count);
const current_generics = [
...current_input_generics,
...current_after_generics,
generic_return,
];
declarations.push(`
function $i${input_count}f${function_count}<${current_generics.join(
',',
)}>(
after: (${current_after_generics
.map((generic, index) => `${parameters[index]}: ${generic}`)
.join(',')}) => ${generic_return},
fns: [${current_after_generics
.map(
after_generic => `
(${current_input_generics
.map(
(input_generic, index) => `
${parameters[index]}: ${input_generic}
`,
)
.join(',')}) => ${after_generic}
`,
)
.join(',')}]
): ${curried_function_name}<${[
...current_input_generics,
generic_return,
].join(',')}>;
`);
}
import_curried_functions.push(curried_function_name);
} | conditional_block |
resizeDialog.js | import React from 'react'
import { Map } from 'immutable'
import Path from 'path'
const ResizeDialogModal = ({ resizePath, resizeSize, initialSize, actions }) => {
const handleSettingInput = (e) => actions.updateModal('resizeSize', e.target.value)
const hideResizeDialog = (newSize) => actions.hideResizeDialog(Map({ path: resizePath, size: newSize }))
const closeResizeDialog = () => hideResizeDialog(0)
const handleSubmit = () => {
if (resizeSize >= 35 && resizeSize !== initialSize.toString()) |
}
const handleSettingKeyDown = (e) => {
if (e.keyCode === 13) {
handleSubmit()
e.preventDefault()
}
}
return (
<div className={'hosting-options-modal modal' + (resizePath ? '': ' hidden')}>
<form className="hosting-options modal-message" onSubmit="">
<div className="close-button" onClick={closeResizeDialog}>
X
</div>
<h3>Resize "{Path.basename(resizePath)}"</h3>
<p>
<label>Size in GB (Min is 35 GB)</label>
<input type="number" onChange={handleSettingInput} onKeyDown={handleSettingKeyDown} value={resizeSize} min="35" />
</p>
<span className={'error' + ( resizeSize < 35 ? '' : ' hidden' )}>Storage folder must be at least 35 GB.</span>
<p>
<input className={'button accept' + ( resizeSize !== initialSize.toString() && resizeSize >= 35 ? '' : ' disabled' )} type="button" value="Save" onClick={handleSubmit} />
</p>
</form>
</div>
)
}
export default ResizeDialogModal
| {
hideResizeDialog(resizeSize)
} | conditional_block |
resizeDialog.js | import React from 'react'
import { Map } from 'immutable'
import Path from 'path'
const ResizeDialogModal = ({ resizePath, resizeSize, initialSize, actions }) => {
const handleSettingInput = (e) => actions.updateModal('resizeSize', e.target.value)
const hideResizeDialog = (newSize) => actions.hideResizeDialog(Map({ path: resizePath, size: newSize }))
const closeResizeDialog = () => hideResizeDialog(0)
const handleSubmit = () => {
if (resizeSize >= 35 && resizeSize !== initialSize.toString()) {
hideResizeDialog(resizeSize)
}
}
const handleSettingKeyDown = (e) => {
if (e.keyCode === 13) {
handleSubmit()
e.preventDefault()
}
}
return (
<div className={'hosting-options-modal modal' + (resizePath ? '': ' hidden')}>
<form className="hosting-options modal-message" onSubmit="">
<div className="close-button" onClick={closeResizeDialog}>
X
</div>
<h3>Resize "{Path.basename(resizePath)}"</h3>
<p>
<label>Size in GB (Min is 35 GB)</label>
<input type="number" onChange={handleSettingInput} onKeyDown={handleSettingKeyDown} value={resizeSize} min="35" />
</p>
<span className={'error' + ( resizeSize < 35 ? '' : ' hidden' )}>Storage folder must be at least 35 GB.</span>
<p>
<input className={'button accept' + ( resizeSize !== initialSize.toString() && resizeSize >= 35 ? '' : ' disabled' )} type="button" value="Save" onClick={handleSubmit} />
</p>
</form>
</div>
) | export default ResizeDialogModal | }
| random_line_split |
Ball.ts | /*
* This file is part of 6502.ts, an emulator for 6502 based systems built
* in Typescript
*
* Copyright (c) 2014 -- 2020 Christian Speckner and contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
const enum Count {
renderCounterOffset = -4
}
export default class Ball {
constructor(private _collisionMask: number, private _flushLineCache: () => void) {
this.reset();
}
reset(): void {
this.color = 0xffffffff;
this.collision = 0;
this._width = 1;
this._enabledOld = false;
this._enabledNew = false;
this._enabled = false;
this._counter = 0;
this._rendering = false;
this._renderCounter = Count.renderCounterOffset;
this._moving = false;
this._hmmClocks = 0;
this._delaying = false;
this._effectiveWidth = 0;
this._lastMovementTick = 0;
}
enabl(value: number): void {
const enabledNewOldValue = this._enabledNew;
this._enabledNew = (value & 2) > 0;
if (enabledNewOldValue !== this._enabledNew && !this._delaying) {
this._flushLineCache();
this._updateEnabled();
}
}
hmbl(value: number): void {
// Shift and flip the highest bit --- this gives us the necessary movement to the right
this._hmmClocks = (value >>> 4) ^ 0x8;
}
resbl(counter: number): void {
this._counter = counter;
this._rendering = true;
this._renderCounter = Count.renderCounterOffset + (counter - 157);
}
ctrlpf(value: number): void {
const width = this._widths[(value & 0x30) >>> 4];
if (width !== this._width) {
this._flushLineCache();
}
this._width = width;
}
vdelbl(value: number): void {
const oldDelaying = this._delaying;
this._delaying = (value & 0x01) > 0;
if (oldDelaying !== this._delaying) {
this._flushLineCache();
this._updateEnabled();
}
}
startMovement(): void {
this._moving = true;
}
movementTick(clock: number, apply: boolean): boolean {
this._lastMovementTick = this._counter;
// Stop movement only if the clock matches exactly --- this is crucial for cosmic ark type hacks
if (clock === this._hmmClocks) {
this._moving = false;
}
if (this._moving && apply) {
this.tick(false);
}
return this._moving;
}
tick(isReceivingHclock: boolean): void |
getPixel(colorIn: number): number {
return this.collision ? colorIn : this.color;
}
shuffleStatus(): void {
const oldEnabledOld = this._enabledOld;
this._enabledOld = this._enabledNew;
if (this._delaying && this._enabledOld !== oldEnabledOld) {
this._flushLineCache();
this._updateEnabled();
}
}
setColor(color: number): void {
if (color !== this.color && this._enabled) {
this._flushLineCache();
}
this.color = color;
}
private _updateEnabled(): void {
this._enabled = this._delaying ? this._enabledOld : this._enabledNew;
}
color = 0xffffffff;
collision = 0;
private _enabledOld = false;
private _enabledNew = false;
private _enabled = false;
private _hmmClocks = 0;
private _counter = 0;
private _moving = false;
private _width = 1;
private _effectiveWidth = 0;
private _lastMovementTick = 0;
private _rendering = false;
private _renderCounter = Count.renderCounterOffset;
private _widths = new Uint8Array([1, 2, 4, 8]);
private _delaying = false;
}
| {
this.collision = this._rendering && this._renderCounter >= 0 && this._enabled ? 0 : this._collisionMask;
const starfieldEffect = this._moving && isReceivingHclock;
if (this._counter === 156) {
const starfieldDelta = (this._counter - this._lastMovementTick + 160) % 4;
this._rendering = true;
this._renderCounter = Count.renderCounterOffset;
if (starfieldEffect && starfieldDelta === 3 && this._width < 4) {
this._renderCounter++;
}
switch (starfieldDelta) {
case 3:
this._effectiveWidth = this._width === 1 ? 2 : this._width;
break;
case 2:
this._effectiveWidth = 0;
break;
default:
this._effectiveWidth = this._width;
break;
}
} else if (this._rendering && ++this._renderCounter >= (starfieldEffect ? this._effectiveWidth : this._width)) {
this._rendering = false;
}
if (++this._counter >= 160) {
this._counter = 0;
}
} | identifier_body |
Ball.ts | /*
* This file is part of 6502.ts, an emulator for 6502 based systems built
* in Typescript
*
* Copyright (c) 2014 -- 2020 Christian Speckner and contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
const enum Count {
renderCounterOffset = -4
}
export default class Ball {
| (private _collisionMask: number, private _flushLineCache: () => void) {
this.reset();
}
reset(): void {
this.color = 0xffffffff;
this.collision = 0;
this._width = 1;
this._enabledOld = false;
this._enabledNew = false;
this._enabled = false;
this._counter = 0;
this._rendering = false;
this._renderCounter = Count.renderCounterOffset;
this._moving = false;
this._hmmClocks = 0;
this._delaying = false;
this._effectiveWidth = 0;
this._lastMovementTick = 0;
}
enabl(value: number): void {
const enabledNewOldValue = this._enabledNew;
this._enabledNew = (value & 2) > 0;
if (enabledNewOldValue !== this._enabledNew && !this._delaying) {
this._flushLineCache();
this._updateEnabled();
}
}
hmbl(value: number): void {
// Shift and flip the highest bit --- this gives us the necessary movement to the right
this._hmmClocks = (value >>> 4) ^ 0x8;
}
resbl(counter: number): void {
this._counter = counter;
this._rendering = true;
this._renderCounter = Count.renderCounterOffset + (counter - 157);
}
ctrlpf(value: number): void {
const width = this._widths[(value & 0x30) >>> 4];
if (width !== this._width) {
this._flushLineCache();
}
this._width = width;
}
vdelbl(value: number): void {
const oldDelaying = this._delaying;
this._delaying = (value & 0x01) > 0;
if (oldDelaying !== this._delaying) {
this._flushLineCache();
this._updateEnabled();
}
}
startMovement(): void {
this._moving = true;
}
movementTick(clock: number, apply: boolean): boolean {
this._lastMovementTick = this._counter;
// Stop movement only if the clock matches exactly --- this is crucial for cosmic ark type hacks
if (clock === this._hmmClocks) {
this._moving = false;
}
if (this._moving && apply) {
this.tick(false);
}
return this._moving;
}
tick(isReceivingHclock: boolean): void {
this.collision = this._rendering && this._renderCounter >= 0 && this._enabled ? 0 : this._collisionMask;
const starfieldEffect = this._moving && isReceivingHclock;
if (this._counter === 156) {
const starfieldDelta = (this._counter - this._lastMovementTick + 160) % 4;
this._rendering = true;
this._renderCounter = Count.renderCounterOffset;
if (starfieldEffect && starfieldDelta === 3 && this._width < 4) {
this._renderCounter++;
}
switch (starfieldDelta) {
case 3:
this._effectiveWidth = this._width === 1 ? 2 : this._width;
break;
case 2:
this._effectiveWidth = 0;
break;
default:
this._effectiveWidth = this._width;
break;
}
} else if (this._rendering && ++this._renderCounter >= (starfieldEffect ? this._effectiveWidth : this._width)) {
this._rendering = false;
}
if (++this._counter >= 160) {
this._counter = 0;
}
}
getPixel(colorIn: number): number {
return this.collision ? colorIn : this.color;
}
shuffleStatus(): void {
const oldEnabledOld = this._enabledOld;
this._enabledOld = this._enabledNew;
if (this._delaying && this._enabledOld !== oldEnabledOld) {
this._flushLineCache();
this._updateEnabled();
}
}
setColor(color: number): void {
if (color !== this.color && this._enabled) {
this._flushLineCache();
}
this.color = color;
}
private _updateEnabled(): void {
this._enabled = this._delaying ? this._enabledOld : this._enabledNew;
}
color = 0xffffffff;
collision = 0;
private _enabledOld = false;
private _enabledNew = false;
private _enabled = false;
private _hmmClocks = 0;
private _counter = 0;
private _moving = false;
private _width = 1;
private _effectiveWidth = 0;
private _lastMovementTick = 0;
private _rendering = false;
private _renderCounter = Count.renderCounterOffset;
private _widths = new Uint8Array([1, 2, 4, 8]);
private _delaying = false;
}
| constructor | identifier_name |
Ball.ts | /*
* This file is part of 6502.ts, an emulator for 6502 based systems built
* in Typescript
*
* Copyright (c) 2014 -- 2020 Christian Speckner and contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
const enum Count {
renderCounterOffset = -4
}
export default class Ball {
constructor(private _collisionMask: number, private _flushLineCache: () => void) {
this.reset();
}
reset(): void {
this.color = 0xffffffff;
this.collision = 0;
this._width = 1;
this._enabledOld = false;
this._enabledNew = false;
this._enabled = false;
this._counter = 0;
this._rendering = false;
this._renderCounter = Count.renderCounterOffset;
this._moving = false;
this._hmmClocks = 0;
this._delaying = false;
this._effectiveWidth = 0;
this._lastMovementTick = 0;
}
enabl(value: number): void {
const enabledNewOldValue = this._enabledNew;
this._enabledNew = (value & 2) > 0;
if (enabledNewOldValue !== this._enabledNew && !this._delaying) {
this._flushLineCache();
this._updateEnabled();
}
}
hmbl(value: number): void {
// Shift and flip the highest bit --- this gives us the necessary movement to the right
this._hmmClocks = (value >>> 4) ^ 0x8;
}
resbl(counter: number): void {
this._counter = counter;
this._rendering = true;
this._renderCounter = Count.renderCounterOffset + (counter - 157);
}
ctrlpf(value: number): void {
const width = this._widths[(value & 0x30) >>> 4];
if (width !== this._width) {
this._flushLineCache();
}
this._width = width; | }
vdelbl(value: number): void {
const oldDelaying = this._delaying;
this._delaying = (value & 0x01) > 0;
if (oldDelaying !== this._delaying) {
this._flushLineCache();
this._updateEnabled();
}
}
startMovement(): void {
this._moving = true;
}
movementTick(clock: number, apply: boolean): boolean {
this._lastMovementTick = this._counter;
// Stop movement only if the clock matches exactly --- this is crucial for cosmic ark type hacks
if (clock === this._hmmClocks) {
this._moving = false;
}
if (this._moving && apply) {
this.tick(false);
}
return this._moving;
}
tick(isReceivingHclock: boolean): void {
this.collision = this._rendering && this._renderCounter >= 0 && this._enabled ? 0 : this._collisionMask;
const starfieldEffect = this._moving && isReceivingHclock;
if (this._counter === 156) {
const starfieldDelta = (this._counter - this._lastMovementTick + 160) % 4;
this._rendering = true;
this._renderCounter = Count.renderCounterOffset;
if (starfieldEffect && starfieldDelta === 3 && this._width < 4) {
this._renderCounter++;
}
switch (starfieldDelta) {
case 3:
this._effectiveWidth = this._width === 1 ? 2 : this._width;
break;
case 2:
this._effectiveWidth = 0;
break;
default:
this._effectiveWidth = this._width;
break;
}
} else if (this._rendering && ++this._renderCounter >= (starfieldEffect ? this._effectiveWidth : this._width)) {
this._rendering = false;
}
if (++this._counter >= 160) {
this._counter = 0;
}
}
getPixel(colorIn: number): number {
return this.collision ? colorIn : this.color;
}
shuffleStatus(): void {
const oldEnabledOld = this._enabledOld;
this._enabledOld = this._enabledNew;
if (this._delaying && this._enabledOld !== oldEnabledOld) {
this._flushLineCache();
this._updateEnabled();
}
}
setColor(color: number): void {
if (color !== this.color && this._enabled) {
this._flushLineCache();
}
this.color = color;
}
private _updateEnabled(): void {
this._enabled = this._delaying ? this._enabledOld : this._enabledNew;
}
color = 0xffffffff;
collision = 0;
private _enabledOld = false;
private _enabledNew = false;
private _enabled = false;
private _hmmClocks = 0;
private _counter = 0;
private _moving = false;
private _width = 1;
private _effectiveWidth = 0;
private _lastMovementTick = 0;
private _rendering = false;
private _renderCounter = Count.renderCounterOffset;
private _widths = new Uint8Array([1, 2, 4, 8]);
private _delaying = false;
} | random_line_split |
|
vperm2i128.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn vperm2i128_1() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM0)), operand2: Some(Direct(YMM5)), operand3: Some(Direct(YMM1)), operand4: Some(Literal8(22)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 85, 70, 193, 22], OperandSize::Dword)
}
fn vperm2i128_2() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM6)), operand2: Some(Direct(YMM2)), operand3: Some(IndirectScaledDisplaced(EDX, Two, 676667494, Some(OperandSize::Ymmword), None)), operand4: Some(Literal8(6)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 109, 70, 52, 85, 102, 32, 85, 40, 6], OperandSize::Dword)
}
fn vperm2i128_3() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM4)), operand2: Some(Direct(YMM1)), operand3: Some(Direct(YMM5)), operand4: Some(Literal8(103)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 117, 70, 229, 103], OperandSize::Qword)
}
fn vperm2i128_4() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM5)), operand2: Some(Direct(YMM4)), operand3: Some(IndirectScaledDisplaced(RCX, Eight, 707114910, Some(OperandSize::Ymmword), None)), operand4: Some(Literal8(11)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 93, 70, 44, 205, 158, 183, 37, 42, 11], OperandSize::Qword) | } | random_line_split |
|
vperm2i128.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn | () {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM0)), operand2: Some(Direct(YMM5)), operand3: Some(Direct(YMM1)), operand4: Some(Literal8(22)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 85, 70, 193, 22], OperandSize::Dword)
}
fn vperm2i128_2() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM6)), operand2: Some(Direct(YMM2)), operand3: Some(IndirectScaledDisplaced(EDX, Two, 676667494, Some(OperandSize::Ymmword), None)), operand4: Some(Literal8(6)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 109, 70, 52, 85, 102, 32, 85, 40, 6], OperandSize::Dword)
}
fn vperm2i128_3() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM4)), operand2: Some(Direct(YMM1)), operand3: Some(Direct(YMM5)), operand4: Some(Literal8(103)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 117, 70, 229, 103], OperandSize::Qword)
}
fn vperm2i128_4() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM5)), operand2: Some(Direct(YMM4)), operand3: Some(IndirectScaledDisplaced(RCX, Eight, 707114910, Some(OperandSize::Ymmword), None)), operand4: Some(Literal8(11)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 93, 70, 44, 205, 158, 183, 37, 42, 11], OperandSize::Qword)
}
| vperm2i128_1 | identifier_name |
vperm2i128.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn vperm2i128_1() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM0)), operand2: Some(Direct(YMM5)), operand3: Some(Direct(YMM1)), operand4: Some(Literal8(22)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 85, 70, 193, 22], OperandSize::Dword)
}
fn vperm2i128_2() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM6)), operand2: Some(Direct(YMM2)), operand3: Some(IndirectScaledDisplaced(EDX, Two, 676667494, Some(OperandSize::Ymmword), None)), operand4: Some(Literal8(6)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 109, 70, 52, 85, 102, 32, 85, 40, 6], OperandSize::Dword)
}
fn vperm2i128_3() |
fn vperm2i128_4() {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM5)), operand2: Some(Direct(YMM4)), operand3: Some(IndirectScaledDisplaced(RCX, Eight, 707114910, Some(OperandSize::Ymmword), None)), operand4: Some(Literal8(11)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 93, 70, 44, 205, 158, 183, 37, 42, 11], OperandSize::Qword)
}
| {
run_test(&Instruction { mnemonic: Mnemonic::VPERM2I128, operand1: Some(Direct(YMM4)), operand2: Some(Direct(YMM1)), operand3: Some(Direct(YMM5)), operand4: Some(Literal8(103)), lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 227, 117, 70, 229, 103], OperandSize::Qword)
} | identifier_body |
hdpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
# Some show/print topics code is adapted from Dr. Hoffman's online lda sample code,
# (C) 2010 Matthew D. Hoffman, GNU GPL 3.0
# http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
This module encapsulates functionality for the online Hierarchical Dirichlet Process algorithm.
It allows both model estimation from a training corpus and inference of topic
distribution on new, unseen documents.
The core estimation code is directly adapted from the `onlinelhdp.py` script
by C. Wang see
**Wang, Paisley, Blei: Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011).**
http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf
The algorithm:
* is **streamed**: training documents come in sequentially, no random access,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint
"""
from __future__ import with_statement
import logging, time
import numpy as np
import scipy.special as sp
from gensim import interfaces, utils, matutils
from six.moves import xrange
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def log_normalize(v):
log_max = 100.0
if len(v.shape) == 1:
max_val = np.max(v)
log_shift = log_max - np.log(len(v) + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift))
log_norm = np.log(tot) - log_shift
v = v - log_norm
else:
max_val = np.max(v, 1)
log_shift = log_max - np.log(v.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
v = v - log_norm[:, np.newaxis]
return (v, log_norm)
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
"""
if (len(alpha.shape) == 1):
return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
def expect_log_sticks(sticks):
"""
For stick-breaking hdp, return the E[log(sticks)]
"""
dig_sum = sp.psi(np.sum(sticks, 0))
ElogW = sp.psi(sticks[0]) - dig_sum
Elog1_W = sp.psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
class SuffStats(object):
def __init__(self, T, Wt, Dt):
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC):
"""
The constructor estimates Hierachical Dirichlet Process model parameters based
on a training corpus:
>>> hdp = HdpModel(corpus, id2word)
>>> hdp.print_topics(topics=20, topn=10)
Inference on new documents is based on the approximately LDA-equivalent topics.
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None):
"""
`gamma`: first level concentration
`alpha`: second level concentration
`eta`: the topic Dirichlet
`T`: top level truncation level
`K`: second level truncation level
`kappa`: learning rate
`tau`: slow down parameter
`max_time`: stop training after this many seconds
`max_chunks`: stop after having processed this many chunks (wrap around
corpus beginning in another corpus pass, if there are not enough chunks
in the corpus)
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = np.random.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def inference(self, chunk):
|
def __getitem__(self, bow, eps=0.01):
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= eps]
def update(self, corpus):
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.clock()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self.m_num_docs_processed):
self.update_expectations()
alpha, beta = self.hdp_to_lda()
self.lda_alpha = alpha
self.lda_beta = beta
self.print_topics(20)
if self.outputdir:
self.save_topics()
return
elif chunks_processed % save_freq == 0:
self.update_expectations()
# self.save_topics(self.m_num_docs_processed)
self.print_topics(20)
logger.info('PROGRESS: finished document %i of %i' %
(self.m_num_docs_processed, self.m_D))
def update_finished(self, start_time, chunks_processed, docs_processed):
return (
# chunk limit reached
(self.max_chunks and chunks_processed == self.max_chunks) or
# time limit reached
(self.max_time and time.clock() - start_time > self.max_time) or
# no limits and whole corpus has been processed once
(not self.max_chunks and not self.max_time and docs_processed >= self.m_D))
def update_chunk(self, chunk, update=True, opt_o=True):
# Find the unique words in this chunk...
unique_words = dict()
word_list = []
for doc in chunk:
for word_id, _ in doc:
if word_id not in unique_words:
unique_words[word_id] = len(unique_words)
word_list.append(word_id)
Wt = len(word_list) # length of words in these documents
# ...and do the lazy updates on the necessary columns of lambda
rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
self.m_Elogbeta[:, word_list] = \
sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
ss = SuffStats(self.m_T, Wt, len(chunk))
Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
# run variational inference on some new docs
score = 0.0
count = 0
for doc in chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(doc, ss, Elogsticks_1st,
word_list, unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return (score, count)
def doc_e_step(self, doc, ss, Elogsticks_1st, word_list,
unique_words, doc_word_ids, doc_word_counts, var_converge) :
"""
e step for a single doc
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
## very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
eps = 1e-100
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
### update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = sp.psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (sp.psi(v) - dig_sum))
likelihood -= np.sum(sp.gammaln(np.sum(v, 0))) - np.sum(sp.gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = self.m_lambda[:, word_list] * (1 - rhot) + \
rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = (1.0 - rhot) * self.m_varphi_ss + rhot * \
sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
## update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""
ordering the topics
"""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""
Since we're doing lazy updates on lambda, at any given moment
the current state of lambda may not be accurate. This function
updates all of the elements of lambda and Elogbeta
so that if (for example) we want to print out the
topics we've learned we'll get the correct behavior.
"""
for w in xrange(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def print_topics(self, topics=20, topn=20):
"""Alias for `show_topics()` that prints the `topn` most
probable words for `topics` number of topics to log.
Set `topics=-1` to print all topics."""
return self.show_topics(topics=topics, topn=topn, log=True)
def show_topics(self, topics=20, topn=20, log=False, formatted=True):
"""
Print the `topN` most probable words for `topics` number of topics.
Set `topics=-1` to print all topics.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(topics, topn, log, formatted)
def save_topics(self, doc_count=None):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s" % fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
def save_options(self):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.smart_open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
fout.write('var_converge: %s\n' % str(self.m_var_converge))
fout.write('D: %s\n' % str(self.m_D))
fout.write('K: %s\n' % str(self.m_K))
fout.write('T: %s\n' % str(self.m_T))
fout.write('W: %s\n' % str(self.m_W))
fout.write('alpha: %s\n' % str(self.m_alpha))
fout.write('kappa: %s\n' % str(self.m_kappa))
fout.write('eta: %s\n' % str(self.m_eta))
fout.write('gamma: %s\n' % str(self.m_gamma))
def hdp_to_lda(self):
"""
Compute the LDA almost equivalent HDP.
"""
# alpha
sticks = self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])
alpha = np.zeros(self.m_T)
left = 1.0
for i in xrange(0, self.m_T - 1):
alpha[i] = sticks[i] * left
left = left - alpha[i]
alpha[self.m_T - 1] = left
alpha = alpha * self.m_alpha
# beta
beta = (self.m_lambda + self.m_eta) / (self.m_W * self.m_eta + \
self.m_lambda_sum[:, np.newaxis])
return (alpha, beta)
def evaluate_test_corpus(self, corpus):
logger.info('TEST: evaluating test corpus')
if self.lda_alpha is None or self.lda_beta is None:
self.lda_alpha, self.lda_beta = self.hdp_to_lda()
score = 0.0
total_words = 0
for i, doc in enumerate(corpus):
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
likelihood, gamma = lda_e_step(doc_word_ids, doc_word_counts, self.lda_alpha, self.lda_beta)
theta = gamma / np.sum(gamma)
lda_betad = self.lda_beta[:, doc_word_ids]
log_predicts = np.log(np.dot(theta, lda_betad))
doc_score = sum(log_predicts) / len(doc)
logger.info('TEST: %6d %.5f' % (i, doc_score))
score += likelihood
total_words += sum(doc_word_counts)
logger.info('TEST: average score: %.5f, total score: %.5f, test docs: %d' % (score / total_words, score, len(corpus)))
return score
#endclass HdpModel
class HdpTopicFormatter(object):
(STYLE_GENSIM, STYLE_PRETTY) = (1, 2)
def __init__(self, dictionary=None, topic_data=None, topic_file=None, style=None):
if dictionary is None:
raise ValueError('no dictionary!')
if topic_data is not None:
topics = topic_data
elif topic_file is not None:
topics = np.loadtxt('%s' % topic_file)
else:
raise ValueError('no topic data!')
# sort topics
topics_sums = np.sum(topics, axis=1)
idx = matutils.argsort(topics_sums, reverse=True)
self.data = topics[idx]
self.dictionary = dictionary
if style is None:
style = self.STYLE_GENSIM
self.style = style
def print_topics(self, topics=10, topn=10):
return self.show_topics(topics, topn, True)
def show_topics(self, topics=10, topn=10, log=False, formatted=True):
shown = []
if topics < 0:
topics = len(self.data)
topics = min(topics, len(self.data))
for k in xrange(topics):
lambdak = list(self.data[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, topn)
if formatted:
topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
def show_topic_terms(self, topic_data, topn):
return [(self.dictionary[wid], weight) for (weight, wid) in topic_data[:topn]]
def format_topic(self, topic_id, topic_terms):
if self.STYLE_GENSIM == self.style:
fmt = ' + '.join(['%.3f*%s' % (weight, word) for (word, weight) in topic_terms])
fmt = 'topic %i: %s' % (topic_id, fmt)
else:
fmt = '\n'.join([' %20s %.8f' % (word, weight) for (word, weight) in topic_terms])
fmt = 'topic %i:\n%s' % (topic_id, fmt)
return fmt
#endclass HdpTopicFormatter
| if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents" % len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma | identifier_body |
hdpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
# Some show/print topics code is adapted from Dr. Hoffman's online lda sample code,
# (C) 2010 Matthew D. Hoffman, GNU GPL 3.0
# http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
This module encapsulates functionality for the online Hierarchical Dirichlet Process algorithm.
It allows both model estimation from a training corpus and inference of topic
distribution on new, unseen documents.
The core estimation code is directly adapted from the `onlinelhdp.py` script
by C. Wang see
**Wang, Paisley, Blei: Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011).**
http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf
The algorithm:
* is **streamed**: training documents come in sequentially, no random access,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint
"""
from __future__ import with_statement
import logging, time
import numpy as np
import scipy.special as sp
from gensim import interfaces, utils, matutils
from six.moves import xrange
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def log_normalize(v):
log_max = 100.0
if len(v.shape) == 1:
max_val = np.max(v)
log_shift = log_max - np.log(len(v) + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift))
log_norm = np.log(tot) - log_shift
v = v - log_norm
else:
max_val = np.max(v, 1)
log_shift = log_max - np.log(v.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
v = v - log_norm[:, np.newaxis]
return (v, log_norm)
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
"""
if (len(alpha.shape) == 1):
return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
def expect_log_sticks(sticks):
"""
For stick-breaking hdp, return the E[log(sticks)]
"""
dig_sum = sp.psi(np.sum(sticks, 0))
ElogW = sp.psi(sticks[0]) - dig_sum
Elog1_W = sp.psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
class SuffStats(object):
def __init__(self, T, Wt, Dt):
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC):
"""
The constructor estimates Hierachical Dirichlet Process model parameters based
on a training corpus:
>>> hdp = HdpModel(corpus, id2word)
>>> hdp.print_topics(topics=20, topn=10)
Inference on new documents is based on the approximately LDA-equivalent topics.
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None):
"""
`gamma`: first level concentration
`alpha`: second level concentration
`eta`: the topic Dirichlet
`T`: top level truncation level
`K`: second level truncation level
`kappa`: learning rate
`tau`: slow down parameter
`max_time`: stop training after this many seconds
`max_chunks`: stop after having processed this many chunks (wrap around
corpus beginning in another corpus pass, if there are not enough chunks
in the corpus)
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = np.random.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def inference(self, chunk):
if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents" % len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma
def __getitem__(self, bow, eps=0.01):
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= eps]
def update(self, corpus):
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.clock()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self.m_num_docs_processed):
self.update_expectations()
alpha, beta = self.hdp_to_lda()
self.lda_alpha = alpha
self.lda_beta = beta
self.print_topics(20)
if self.outputdir:
self.save_topics()
return
elif chunks_processed % save_freq == 0:
self.update_expectations()
# self.save_topics(self.m_num_docs_processed)
self.print_topics(20) |
def update_finished(self, start_time, chunks_processed, docs_processed):
return (
# chunk limit reached
(self.max_chunks and chunks_processed == self.max_chunks) or
# time limit reached
(self.max_time and time.clock() - start_time > self.max_time) or
# no limits and whole corpus has been processed once
(not self.max_chunks and not self.max_time and docs_processed >= self.m_D))
def update_chunk(self, chunk, update=True, opt_o=True):
# Find the unique words in this chunk...
unique_words = dict()
word_list = []
for doc in chunk:
for word_id, _ in doc:
if word_id not in unique_words:
unique_words[word_id] = len(unique_words)
word_list.append(word_id)
Wt = len(word_list) # length of words in these documents
# ...and do the lazy updates on the necessary columns of lambda
rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
self.m_Elogbeta[:, word_list] = \
sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
ss = SuffStats(self.m_T, Wt, len(chunk))
Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
# run variational inference on some new docs
score = 0.0
count = 0
for doc in chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(doc, ss, Elogsticks_1st,
word_list, unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return (score, count)
def doc_e_step(self, doc, ss, Elogsticks_1st, word_list,
unique_words, doc_word_ids, doc_word_counts, var_converge) :
"""
e step for a single doc
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
## very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
eps = 1e-100
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
### update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = sp.psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (sp.psi(v) - dig_sum))
likelihood -= np.sum(sp.gammaln(np.sum(v, 0))) - np.sum(sp.gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = self.m_lambda[:, word_list] * (1 - rhot) + \
rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = (1.0 - rhot) * self.m_varphi_ss + rhot * \
sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
## update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""
ordering the topics
"""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""
Since we're doing lazy updates on lambda, at any given moment
the current state of lambda may not be accurate. This function
updates all of the elements of lambda and Elogbeta
so that if (for example) we want to print out the
topics we've learned we'll get the correct behavior.
"""
for w in xrange(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def print_topics(self, topics=20, topn=20):
"""Alias for `show_topics()` that prints the `topn` most
probable words for `topics` number of topics to log.
Set `topics=-1` to print all topics."""
return self.show_topics(topics=topics, topn=topn, log=True)
def show_topics(self, topics=20, topn=20, log=False, formatted=True):
"""
Print the `topN` most probable words for `topics` number of topics.
Set `topics=-1` to print all topics.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(topics, topn, log, formatted)
def save_topics(self, doc_count=None):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s" % fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
def save_options(self):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.smart_open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
fout.write('var_converge: %s\n' % str(self.m_var_converge))
fout.write('D: %s\n' % str(self.m_D))
fout.write('K: %s\n' % str(self.m_K))
fout.write('T: %s\n' % str(self.m_T))
fout.write('W: %s\n' % str(self.m_W))
fout.write('alpha: %s\n' % str(self.m_alpha))
fout.write('kappa: %s\n' % str(self.m_kappa))
fout.write('eta: %s\n' % str(self.m_eta))
fout.write('gamma: %s\n' % str(self.m_gamma))
def hdp_to_lda(self):
"""
Compute the LDA almost equivalent HDP.
"""
# alpha
sticks = self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])
alpha = np.zeros(self.m_T)
left = 1.0
for i in xrange(0, self.m_T - 1):
alpha[i] = sticks[i] * left
left = left - alpha[i]
alpha[self.m_T - 1] = left
alpha = alpha * self.m_alpha
# beta
beta = (self.m_lambda + self.m_eta) / (self.m_W * self.m_eta + \
self.m_lambda_sum[:, np.newaxis])
return (alpha, beta)
def evaluate_test_corpus(self, corpus):
logger.info('TEST: evaluating test corpus')
if self.lda_alpha is None or self.lda_beta is None:
self.lda_alpha, self.lda_beta = self.hdp_to_lda()
score = 0.0
total_words = 0
for i, doc in enumerate(corpus):
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
likelihood, gamma = lda_e_step(doc_word_ids, doc_word_counts, self.lda_alpha, self.lda_beta)
theta = gamma / np.sum(gamma)
lda_betad = self.lda_beta[:, doc_word_ids]
log_predicts = np.log(np.dot(theta, lda_betad))
doc_score = sum(log_predicts) / len(doc)
logger.info('TEST: %6d %.5f' % (i, doc_score))
score += likelihood
total_words += sum(doc_word_counts)
logger.info('TEST: average score: %.5f, total score: %.5f, test docs: %d' % (score / total_words, score, len(corpus)))
return score
#endclass HdpModel
class HdpTopicFormatter(object):
(STYLE_GENSIM, STYLE_PRETTY) = (1, 2)
def __init__(self, dictionary=None, topic_data=None, topic_file=None, style=None):
if dictionary is None:
raise ValueError('no dictionary!')
if topic_data is not None:
topics = topic_data
elif topic_file is not None:
topics = np.loadtxt('%s' % topic_file)
else:
raise ValueError('no topic data!')
# sort topics
topics_sums = np.sum(topics, axis=1)
idx = matutils.argsort(topics_sums, reverse=True)
self.data = topics[idx]
self.dictionary = dictionary
if style is None:
style = self.STYLE_GENSIM
self.style = style
def print_topics(self, topics=10, topn=10):
return self.show_topics(topics, topn, True)
def show_topics(self, topics=10, topn=10, log=False, formatted=True):
shown = []
if topics < 0:
topics = len(self.data)
topics = min(topics, len(self.data))
for k in xrange(topics):
lambdak = list(self.data[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, topn)
if formatted:
topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
def show_topic_terms(self, topic_data, topn):
return [(self.dictionary[wid], weight) for (weight, wid) in topic_data[:topn]]
def format_topic(self, topic_id, topic_terms):
if self.STYLE_GENSIM == self.style:
fmt = ' + '.join(['%.3f*%s' % (weight, word) for (word, weight) in topic_terms])
fmt = 'topic %i: %s' % (topic_id, fmt)
else:
fmt = '\n'.join([' %20s %.8f' % (word, weight) for (word, weight) in topic_terms])
fmt = 'topic %i:\n%s' % (topic_id, fmt)
return fmt
#endclass HdpTopicFormatter | logger.info('PROGRESS: finished document %i of %i' %
(self.m_num_docs_processed, self.m_D)) | random_line_split |
hdpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
# Some show/print topics code is adapted from Dr. Hoffman's online lda sample code,
# (C) 2010 Matthew D. Hoffman, GNU GPL 3.0
# http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
This module encapsulates functionality for the online Hierarchical Dirichlet Process algorithm.
It allows both model estimation from a training corpus and inference of topic
distribution on new, unseen documents.
The core estimation code is directly adapted from the `onlinelhdp.py` script
by C. Wang see
**Wang, Paisley, Blei: Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011).**
http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf
The algorithm:
* is **streamed**: training documents come in sequentially, no random access,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint
"""
from __future__ import with_statement
import logging, time
import numpy as np
import scipy.special as sp
from gensim import interfaces, utils, matutils
from six.moves import xrange
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def log_normalize(v):
log_max = 100.0
if len(v.shape) == 1:
max_val = np.max(v)
log_shift = log_max - np.log(len(v) + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift))
log_norm = np.log(tot) - log_shift
v = v - log_norm
else:
max_val = np.max(v, 1)
log_shift = log_max - np.log(v.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
v = v - log_norm[:, np.newaxis]
return (v, log_norm)
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
"""
if (len(alpha.shape) == 1):
return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
def expect_log_sticks(sticks):
"""
For stick-breaking hdp, return the E[log(sticks)]
"""
dig_sum = sp.psi(np.sum(sticks, 0))
ElogW = sp.psi(sticks[0]) - dig_sum
Elog1_W = sp.psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
class SuffStats(object):
def __init__(self, T, Wt, Dt):
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC):
"""
The constructor estimates Hierachical Dirichlet Process model parameters based
on a training corpus:
>>> hdp = HdpModel(corpus, id2word)
>>> hdp.print_topics(topics=20, topn=10)
Inference on new documents is based on the approximately LDA-equivalent topics.
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None):
"""
`gamma`: first level concentration
`alpha`: second level concentration
`eta`: the topic Dirichlet
`T`: top level truncation level
`K`: second level truncation level
`kappa`: learning rate
`tau`: slow down parameter
`max_time`: stop training after this many seconds
`max_chunks`: stop after having processed this many chunks (wrap around
corpus beginning in another corpus pass, if there are not enough chunks
in the corpus)
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = np.random.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def inference(self, chunk):
if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents" % len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma
def __getitem__(self, bow, eps=0.01):
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= eps]
def update(self, corpus):
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.clock()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self.m_num_docs_processed):
self.update_expectations()
alpha, beta = self.hdp_to_lda()
self.lda_alpha = alpha
self.lda_beta = beta
self.print_topics(20)
if self.outputdir:
self.save_topics()
return
elif chunks_processed % save_freq == 0:
self.update_expectations()
# self.save_topics(self.m_num_docs_processed)
self.print_topics(20)
logger.info('PROGRESS: finished document %i of %i' %
(self.m_num_docs_processed, self.m_D))
def update_finished(self, start_time, chunks_processed, docs_processed):
return (
# chunk limit reached
(self.max_chunks and chunks_processed == self.max_chunks) or
# time limit reached
(self.max_time and time.clock() - start_time > self.max_time) or
# no limits and whole corpus has been processed once
(not self.max_chunks and not self.max_time and docs_processed >= self.m_D))
def update_chunk(self, chunk, update=True, opt_o=True):
# Find the unique words in this chunk...
unique_words = dict()
word_list = []
for doc in chunk:
for word_id, _ in doc:
if word_id not in unique_words:
unique_words[word_id] = len(unique_words)
word_list.append(word_id)
Wt = len(word_list) # length of words in these documents
# ...and do the lazy updates on the necessary columns of lambda
rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
self.m_Elogbeta[:, word_list] = \
sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
ss = SuffStats(self.m_T, Wt, len(chunk))
Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
# run variational inference on some new docs
score = 0.0
count = 0
for doc in chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(doc, ss, Elogsticks_1st,
word_list, unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return (score, count)
def doc_e_step(self, doc, ss, Elogsticks_1st, word_list,
unique_words, doc_word_ids, doc_word_counts, var_converge) :
"""
e step for a single doc
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
## very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
eps = 1e-100
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
### update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = sp.psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (sp.psi(v) - dig_sum))
likelihood -= np.sum(sp.gammaln(np.sum(v, 0))) - np.sum(sp.gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = self.m_lambda[:, word_list] * (1 - rhot) + \
rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = (1.0 - rhot) * self.m_varphi_ss + rhot * \
sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
## update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""
ordering the topics
"""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""
Since we're doing lazy updates on lambda, at any given moment
the current state of lambda may not be accurate. This function
updates all of the elements of lambda and Elogbeta
so that if (for example) we want to print out the
topics we've learned we'll get the correct behavior.
"""
for w in xrange(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def print_topics(self, topics=20, topn=20):
"""Alias for `show_topics()` that prints the `topn` most
probable words for `topics` number of topics to log.
Set `topics=-1` to print all topics."""
return self.show_topics(topics=topics, topn=topn, log=True)
def show_topics(self, topics=20, topn=20, log=False, formatted=True):
"""
Print the `topN` most probable words for `topics` number of topics.
Set `topics=-1` to print all topics.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(topics, topn, log, formatted)
def save_topics(self, doc_count=None):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s" % fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
def save_options(self):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.smart_open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
fout.write('var_converge: %s\n' % str(self.m_var_converge))
fout.write('D: %s\n' % str(self.m_D))
fout.write('K: %s\n' % str(self.m_K))
fout.write('T: %s\n' % str(self.m_T))
fout.write('W: %s\n' % str(self.m_W))
fout.write('alpha: %s\n' % str(self.m_alpha))
fout.write('kappa: %s\n' % str(self.m_kappa))
fout.write('eta: %s\n' % str(self.m_eta))
fout.write('gamma: %s\n' % str(self.m_gamma))
def hdp_to_lda(self):
"""
Compute the LDA almost equivalent HDP.
"""
# alpha
sticks = self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])
alpha = np.zeros(self.m_T)
left = 1.0
for i in xrange(0, self.m_T - 1):
alpha[i] = sticks[i] * left
left = left - alpha[i]
alpha[self.m_T - 1] = left
alpha = alpha * self.m_alpha
# beta
beta = (self.m_lambda + self.m_eta) / (self.m_W * self.m_eta + \
self.m_lambda_sum[:, np.newaxis])
return (alpha, beta)
def evaluate_test_corpus(self, corpus):
logger.info('TEST: evaluating test corpus')
if self.lda_alpha is None or self.lda_beta is None:
self.lda_alpha, self.lda_beta = self.hdp_to_lda()
score = 0.0
total_words = 0
for i, doc in enumerate(corpus):
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
likelihood, gamma = lda_e_step(doc_word_ids, doc_word_counts, self.lda_alpha, self.lda_beta)
theta = gamma / np.sum(gamma)
lda_betad = self.lda_beta[:, doc_word_ids]
log_predicts = np.log(np.dot(theta, lda_betad))
doc_score = sum(log_predicts) / len(doc)
logger.info('TEST: %6d %.5f' % (i, doc_score))
score += likelihood
total_words += sum(doc_word_counts)
logger.info('TEST: average score: %.5f, total score: %.5f, test docs: %d' % (score / total_words, score, len(corpus)))
return score
#endclass HdpModel
class HdpTopicFormatter(object):
(STYLE_GENSIM, STYLE_PRETTY) = (1, 2)
def __init__(self, dictionary=None, topic_data=None, topic_file=None, style=None):
if dictionary is None:
raise ValueError('no dictionary!')
if topic_data is not None:
topics = topic_data
elif topic_file is not None:
topics = np.loadtxt('%s' % topic_file)
else:
raise ValueError('no topic data!')
# sort topics
topics_sums = np.sum(topics, axis=1)
idx = matutils.argsort(topics_sums, reverse=True)
self.data = topics[idx]
self.dictionary = dictionary
if style is None:
style = self.STYLE_GENSIM
self.style = style
def print_topics(self, topics=10, topn=10):
return self.show_topics(topics, topn, True)
def show_topics(self, topics=10, topn=10, log=False, formatted=True):
shown = []
if topics < 0:
topics = len(self.data)
topics = min(topics, len(self.data))
for k in xrange(topics):
lambdak = list(self.data[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, topn)
if formatted:
|
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
def show_topic_terms(self, topic_data, topn):
return [(self.dictionary[wid], weight) for (weight, wid) in topic_data[:topn]]
def format_topic(self, topic_id, topic_terms):
if self.STYLE_GENSIM == self.style:
fmt = ' + '.join(['%.3f*%s' % (weight, word) for (word, weight) in topic_terms])
fmt = 'topic %i: %s' % (topic_id, fmt)
else:
fmt = '\n'.join([' %20s %.8f' % (word, weight) for (word, weight) in topic_terms])
fmt = 'topic %i:\n%s' % (topic_id, fmt)
return fmt
#endclass HdpTopicFormatter
| topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic) | conditional_block |
hdpmodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# HDP inference code is adapted from the onlinehdp.py script by
# Chong Wang (chongw at cs.princeton.edu).
# http://www.cs.princeton.edu/~chongw/software/onlinehdp.tar.gz
#
# Some show/print topics code is adapted from Dr. Hoffman's online lda sample code,
# (C) 2010 Matthew D. Hoffman, GNU GPL 3.0
# http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
This module encapsulates functionality for the online Hierarchical Dirichlet Process algorithm.
It allows both model estimation from a training corpus and inference of topic
distribution on new, unseen documents.
The core estimation code is directly adapted from the `onlinelhdp.py` script
by C. Wang see
**Wang, Paisley, Blei: Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011).**
http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf
The algorithm:
* is **streamed**: training documents come in sequentially, no random access,
* runs in **constant memory** w.r.t. the number of documents: size of the
training corpus does not affect memory footprint
"""
from __future__ import with_statement
import logging, time
import numpy as np
import scipy.special as sp
from gensim import interfaces, utils, matutils
from six.moves import xrange
logger = logging.getLogger(__name__)
meanchangethresh = 0.00001
rhot_bound = 0.0
def log_normalize(v):
log_max = 100.0
if len(v.shape) == 1:
max_val = np.max(v)
log_shift = log_max - np.log(len(v) + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift))
log_norm = np.log(tot) - log_shift
v = v - log_norm
else:
max_val = np.max(v, 1)
log_shift = log_max - np.log(v.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(v + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
v = v - log_norm[:, np.newaxis]
return (v, log_norm)
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), compute E[log(theta)] given alpha.
"""
if (len(alpha.shape) == 1):
return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis])
def expect_log_sticks(sticks):
"""
For stick-breaking hdp, return the E[log(sticks)]
"""
dig_sum = sp.psi(np.sum(sticks, 0))
ElogW = sp.psi(sticks[0]) - dig_sum
Elog1_W = sp.psi(sticks[1]) - dig_sum
n = len(sticks[0]) + 1
Elogsticks = np.zeros(n)
Elogsticks[0: n - 1] = ElogW
Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
return Elogsticks
def lda_e_step(doc_word_ids, doc_word_counts, alpha, beta, max_iter=100):
gamma = np.ones(len(alpha))
expElogtheta = np.exp(dirichlet_expectation(gamma))
betad = beta[:, doc_word_ids]
phinorm = np.dot(expElogtheta, betad) + 1e-100
counts = np.array(doc_word_counts)
for _ in xrange(max_iter):
lastgamma = gamma
gamma = alpha + expElogtheta * np.dot(counts / phinorm, betad.T)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
phinorm = np.dot(expElogtheta, betad) + 1e-100
meanchange = np.mean(abs(gamma - lastgamma))
if (meanchange < meanchangethresh):
break
likelihood = np.sum(counts * np.log(phinorm))
likelihood += np.sum((alpha - gamma) * Elogtheta)
likelihood += np.sum(sp.gammaln(gamma) - sp.gammaln(alpha))
likelihood += sp.gammaln(np.sum(alpha)) - sp.gammaln(np.sum(gamma))
return (likelihood, gamma)
class SuffStats(object):
def __init__(self, T, Wt, Dt):
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
class HdpModel(interfaces.TransformationABC):
"""
The constructor estimates Hierachical Dirichlet Process model parameters based
on a training corpus:
>>> hdp = HdpModel(corpus, id2word)
>>> hdp.print_topics(topics=20, topn=10)
Inference on new documents is based on the approximately LDA-equivalent topics.
Model persistency is achieved through its `load`/`save` methods.
"""
def __init__(self, corpus, id2word, max_chunks=None, max_time=None,
chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1,
gamma=1, eta=0.01, scale=1.0, var_converge=0.0001,
outputdir=None):
"""
`gamma`: first level concentration
`alpha`: second level concentration
`eta`: the topic Dirichlet
`T`: top level truncation level
`K`: second level truncation level
`kappa`: learning rate
`tau`: slow down parameter
`max_time`: stop training after this many seconds
`max_chunks`: stop after having processed this many chunks (wrap around
corpus beginning in another corpus pass, if there are not enough chunks
in the corpus)
"""
self.corpus = corpus
self.id2word = id2word
self.chunksize = chunksize
self.max_chunks = max_chunks
self.max_time = max_time
self.outputdir = outputdir
self.lda_alpha = None
self.lda_beta = None
self.m_W = len(id2word)
self.m_D = 0
if corpus:
self.m_D = len(corpus)
self.m_T = T
self.m_K = K
self.m_alpha = alpha
self.m_gamma = gamma
self.m_var_sticks = np.zeros((2, T - 1))
self.m_var_sticks[0] = 1.0
self.m_var_sticks[1] = range(T - 1, 0, -1)
self.m_varphi_ss = np.zeros(T)
self.m_lambda = np.random.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D * 100 / (T * self.m_W) - eta
self.m_eta = eta
self.m_Elogbeta = dirichlet_expectation(self.m_eta + self.m_lambda)
self.m_tau = tau + 1
self.m_kappa = kappa
self.m_scale = scale
self.m_updatect = 0
self.m_status_up_to_date = True
self.m_num_docs_processed = 0
self.m_timestamp = np.zeros(self.m_W, dtype=int)
self.m_r = [0]
self.m_lambda_sum = np.sum(self.m_lambda, axis=1)
self.m_var_converge = var_converge
if self.outputdir:
self.save_options()
# if a training corpus was provided, start estimating the model right away
if corpus is not None:
self.update(corpus)
def | (self, chunk):
if self.lda_alpha is None or self.lda_beta is None:
raise RuntimeError("model must be trained to perform inference")
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents" % len(chunk))
gamma = np.zeros((len(chunk), self.lda_beta.shape[0]))
for d, doc in enumerate(chunk):
if not doc: # leave gamma at zero for empty documents
continue
ids, counts = zip(*doc)
_, gammad = lda_e_step(ids, counts, self.lda_alpha, self.lda_beta)
gamma[d, :] = gammad
return gamma
def __getitem__(self, bow, eps=0.01):
is_corpus, corpus = utils.is_corpus(bow)
if is_corpus:
return self._apply(corpus)
gamma = self.inference([bow])[0]
topic_dist = gamma / sum(gamma) if sum(gamma) != 0 else []
return [(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= eps]
def update(self, corpus):
save_freq = max(1, int(10000 / self.chunksize)) # save every 10k docs, roughly
chunks_processed = 0
start_time = time.clock()
while True:
for chunk in utils.grouper(corpus, self.chunksize):
self.update_chunk(chunk)
self.m_num_docs_processed += len(chunk)
chunks_processed += 1
if self.update_finished(start_time, chunks_processed, self.m_num_docs_processed):
self.update_expectations()
alpha, beta = self.hdp_to_lda()
self.lda_alpha = alpha
self.lda_beta = beta
self.print_topics(20)
if self.outputdir:
self.save_topics()
return
elif chunks_processed % save_freq == 0:
self.update_expectations()
# self.save_topics(self.m_num_docs_processed)
self.print_topics(20)
logger.info('PROGRESS: finished document %i of %i' %
(self.m_num_docs_processed, self.m_D))
def update_finished(self, start_time, chunks_processed, docs_processed):
return (
# chunk limit reached
(self.max_chunks and chunks_processed == self.max_chunks) or
# time limit reached
(self.max_time and time.clock() - start_time > self.max_time) or
# no limits and whole corpus has been processed once
(not self.max_chunks and not self.max_time and docs_processed >= self.m_D))
def update_chunk(self, chunk, update=True, opt_o=True):
# Find the unique words in this chunk...
unique_words = dict()
word_list = []
for doc in chunk:
for word_id, _ in doc:
if word_id not in unique_words:
unique_words[word_id] = len(unique_words)
word_list.append(word_id)
Wt = len(word_list) # length of words in these documents
# ...and do the lazy updates on the necessary columns of lambda
rw = np.array([self.m_r[t] for t in self.m_timestamp[word_list]])
self.m_lambda[:, word_list] *= np.exp(self.m_r[-1] - rw)
self.m_Elogbeta[:, word_list] = \
sp.psi(self.m_eta + self.m_lambda[:, word_list]) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
ss = SuffStats(self.m_T, Wt, len(chunk))
Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
# run variational inference on some new docs
score = 0.0
count = 0
for doc in chunk:
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
doc_score = self.doc_e_step(doc, ss, Elogsticks_1st,
word_list, unique_words, doc_word_ids,
doc_word_counts, self.m_var_converge)
count += sum(doc_word_counts)
score += doc_score
if update:
self.update_lambda(ss, word_list, opt_o)
return (score, count)
def doc_e_step(self, doc, ss, Elogsticks_1st, word_list,
unique_words, doc_word_ids, doc_word_counts, var_converge) :
"""
e step for a single doc
"""
chunkids = [unique_words[id] for id in doc_word_ids]
Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids]
## very similar to the hdp equations
v = np.zeros((2, self.m_K - 1))
v[0] = 1.0
v[1] = self.m_alpha
# back to the uniform
phi = np.ones((len(doc_word_ids), self.m_K)) * 1.0 / self.m_K
likelihood = 0.0
old_likelihood = -1e200
converge = 1.0
eps = 1e-100
iter = 0
max_iter = 100
# not yet support second level optimization yet, to be done in the future
while iter < max_iter and (converge < 0.0 or converge > var_converge):
### update variational parameters
# var_phi
if iter < 3:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T)
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
else:
var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st
(log_var_phi, log_norm) = log_normalize(var_phi)
var_phi = np.exp(log_var_phi)
# phi
if iter < 3:
phi = np.dot(var_phi, Elogbeta_doc).T
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
else:
phi = np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd
(log_phi, log_norm) = log_normalize(phi)
phi = np.exp(log_phi)
# v
phi_all = phi * np.array(doc_word_counts)[:, np.newaxis]
v[0] = 1.0 + np.sum(phi_all[:, :self.m_K - 1], 0)
phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0))
v[1] = self.m_alpha + np.flipud(np.cumsum(phi_cum))
Elogsticks_2nd = expect_log_sticks(v)
likelihood = 0.0
# compute likelihood
# var_phi part/ C in john's notation
likelihood += np.sum((Elogsticks_1st - log_var_phi) * var_phi)
# v part/ v in john's notation, john's beta is alpha here
log_alpha = np.log(self.m_alpha)
likelihood += (self.m_K - 1) * log_alpha
dig_sum = sp.psi(np.sum(v, 0))
likelihood += np.sum((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (sp.psi(v) - dig_sum))
likelihood -= np.sum(sp.gammaln(np.sum(v, 0))) - np.sum(sp.gammaln(v))
# Z part
likelihood += np.sum((Elogsticks_2nd - log_phi) * phi)
# X part, the data part
likelihood += np.sum(phi.T * np.dot(var_phi, Elogbeta_doc * doc_word_counts))
converge = (likelihood - old_likelihood) / abs(old_likelihood)
old_likelihood = likelihood
if converge < -0.000001:
logger.warning('likelihood is decreasing!')
iter += 1
# update the suff_stat ss
# this time it only contains information from one doc
ss.m_var_sticks_ss += np.sum(var_phi, 0)
ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, phi.T * doc_word_counts)
return likelihood
def update_lambda(self, sstats, word_list, opt_o):
self.m_status_up_to_date = False
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-chunk.
rhot = self.m_scale * pow(self.m_tau + self.m_updatect, -self.m_kappa)
if rhot < rhot_bound:
rhot = rhot_bound
self.m_rhot = rhot
# Update appropriate columns of lambda based on documents.
self.m_lambda[:, word_list] = self.m_lambda[:, word_list] * (1 - rhot) + \
rhot * self.m_D * sstats.m_var_beta_ss / sstats.m_chunksize
self.m_lambda_sum = (1 - rhot) * self.m_lambda_sum + \
rhot * self.m_D * np.sum(sstats.m_var_beta_ss, axis=1) / sstats.m_chunksize
self.m_updatect += 1
self.m_timestamp[word_list] = self.m_updatect
self.m_r.append(self.m_r[-1] + np.log(1 - rhot))
self.m_varphi_ss = (1.0 - rhot) * self.m_varphi_ss + rhot * \
sstats.m_var_sticks_ss * self.m_D / sstats.m_chunksize
if opt_o:
self.optimal_ordering()
## update top level sticks
self.m_var_sticks[0] = self.m_varphi_ss[:self.m_T - 1] + 1.0
var_phi_sum = np.flipud(self.m_varphi_ss[1:])
self.m_var_sticks[1] = np.flipud(np.cumsum(var_phi_sum)) + self.m_gamma
def optimal_ordering(self):
"""
ordering the topics
"""
idx = matutils.argsort(self.m_lambda_sum, reverse=True)
self.m_varphi_ss = self.m_varphi_ss[idx]
self.m_lambda = self.m_lambda[idx, :]
self.m_lambda_sum = self.m_lambda_sum[idx]
self.m_Elogbeta = self.m_Elogbeta[idx, :]
def update_expectations(self):
"""
Since we're doing lazy updates on lambda, at any given moment
the current state of lambda may not be accurate. This function
updates all of the elements of lambda and Elogbeta
so that if (for example) we want to print out the
topics we've learned we'll get the correct behavior.
"""
for w in xrange(self.m_W):
self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
self.m_r[self.m_timestamp[w]])
self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])
self.m_timestamp[:] = self.m_updatect
self.m_status_up_to_date = True
def print_topics(self, topics=20, topn=20):
"""Alias for `show_topics()` that prints the `topn` most
probable words for `topics` number of topics to log.
Set `topics=-1` to print all topics."""
return self.show_topics(topics=topics, topn=topn, log=True)
def show_topics(self, topics=20, topn=20, log=False, formatted=True):
"""
Print the `topN` most probable words for `topics` number of topics.
Set `topics=-1` to print all topics.
Set `formatted=True` to return the topics as a list of strings, or
`False` as lists of (weight, word) pairs.
"""
if not self.m_status_up_to_date:
self.update_expectations()
betas = self.m_lambda + self.m_eta
hdp_formatter = HdpTopicFormatter(self.id2word, betas)
return hdp_formatter.show_topics(topics, topn, log, formatted)
def save_topics(self, doc_count=None):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store topics without having specified an output directory")
if doc_count is None:
fname = 'final'
else:
fname = 'doc-%i' % doc_count
fname = '%s/%s.topics' % (self.outputdir, fname)
logger.info("saving topics to %s" % fname)
betas = self.m_lambda + self.m_eta
np.savetxt(fname, betas)
def save_options(self):
"""legacy method; use `self.save()` instead"""
if not self.outputdir:
logger.error("cannot store options without having specified an output directory")
return
fname = '%s/options.dat' % self.outputdir
with utils.smart_open(fname, 'wb') as fout:
fout.write('tau: %s\n' % str(self.m_tau - 1))
fout.write('chunksize: %s\n' % str(self.chunksize))
fout.write('var_converge: %s\n' % str(self.m_var_converge))
fout.write('D: %s\n' % str(self.m_D))
fout.write('K: %s\n' % str(self.m_K))
fout.write('T: %s\n' % str(self.m_T))
fout.write('W: %s\n' % str(self.m_W))
fout.write('alpha: %s\n' % str(self.m_alpha))
fout.write('kappa: %s\n' % str(self.m_kappa))
fout.write('eta: %s\n' % str(self.m_eta))
fout.write('gamma: %s\n' % str(self.m_gamma))
def hdp_to_lda(self):
"""
Compute the LDA almost equivalent HDP.
"""
# alpha
sticks = self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])
alpha = np.zeros(self.m_T)
left = 1.0
for i in xrange(0, self.m_T - 1):
alpha[i] = sticks[i] * left
left = left - alpha[i]
alpha[self.m_T - 1] = left
alpha = alpha * self.m_alpha
# beta
beta = (self.m_lambda + self.m_eta) / (self.m_W * self.m_eta + \
self.m_lambda_sum[:, np.newaxis])
return (alpha, beta)
def evaluate_test_corpus(self, corpus):
logger.info('TEST: evaluating test corpus')
if self.lda_alpha is None or self.lda_beta is None:
self.lda_alpha, self.lda_beta = self.hdp_to_lda()
score = 0.0
total_words = 0
for i, doc in enumerate(corpus):
if len(doc) > 0:
doc_word_ids, doc_word_counts = zip(*doc)
likelihood, gamma = lda_e_step(doc_word_ids, doc_word_counts, self.lda_alpha, self.lda_beta)
theta = gamma / np.sum(gamma)
lda_betad = self.lda_beta[:, doc_word_ids]
log_predicts = np.log(np.dot(theta, lda_betad))
doc_score = sum(log_predicts) / len(doc)
logger.info('TEST: %6d %.5f' % (i, doc_score))
score += likelihood
total_words += sum(doc_word_counts)
logger.info('TEST: average score: %.5f, total score: %.5f, test docs: %d' % (score / total_words, score, len(corpus)))
return score
#endclass HdpModel
class HdpTopicFormatter(object):
(STYLE_GENSIM, STYLE_PRETTY) = (1, 2)
def __init__(self, dictionary=None, topic_data=None, topic_file=None, style=None):
if dictionary is None:
raise ValueError('no dictionary!')
if topic_data is not None:
topics = topic_data
elif topic_file is not None:
topics = np.loadtxt('%s' % topic_file)
else:
raise ValueError('no topic data!')
# sort topics
topics_sums = np.sum(topics, axis=1)
idx = matutils.argsort(topics_sums, reverse=True)
self.data = topics[idx]
self.dictionary = dictionary
if style is None:
style = self.STYLE_GENSIM
self.style = style
def print_topics(self, topics=10, topn=10):
return self.show_topics(topics, topn, True)
def show_topics(self, topics=10, topn=10, log=False, formatted=True):
shown = []
if topics < 0:
topics = len(self.data)
topics = min(topics, len(self.data))
for k in xrange(topics):
lambdak = list(self.data[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, topn)
if formatted:
topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
def show_topic_terms(self, topic_data, topn):
return [(self.dictionary[wid], weight) for (weight, wid) in topic_data[:topn]]
def format_topic(self, topic_id, topic_terms):
if self.STYLE_GENSIM == self.style:
fmt = ' + '.join(['%.3f*%s' % (weight, word) for (word, weight) in topic_terms])
fmt = 'topic %i: %s' % (topic_id, fmt)
else:
fmt = '\n'.join([' %20s %.8f' % (word, weight) for (word, weight) in topic_terms])
fmt = 'topic %i:\n%s' % (topic_id, fmt)
return fmt
#endclass HdpTopicFormatter
| inference | identifier_name |
mod.rs | #![macro_escape]
use serialize::json::{Json, ParserError};
use url::Url;
use std::collections::HashMap;
use std::io::IoError;
use std::local_data::Ref;
#[cfg(not(teepee))]
pub use self::http::Client;
#[cfg(teepee)]
pub use self::teepee::Client;
mod http;
mod teepee;
macro_rules! params {
{$($key:expr: $val:expr,)+} => (
{
use std::collections::HashMap;
let mut params: HashMap<String, String> = HashMap::new();
$(
params.insert($key.into_string(), $val.to_string());
)+
params
}
);
}
pub static USER_AGENT: &'static str = "rawr v0.1 (github.com/cybergeek94/rawr)";
local_data_key!(_modhash: String)
pub type JsonError = ParserError;
pub type JsonResult<T> = Result<T, JsonError>;
pub trait JsonClient {
/// Make a GET request, returning a Json response. The GET parameters should be in the passed URL.
/// Implementers should update the local modhash by using `set_modhash()`
fn get(&self, url: &Url) -> JsonResult<Json>;
/// Make a POST request, returning the JSON response
fn post(&self, url: &Url, params: HashMap<String, String>) -> JsonResult<Json>;
/// Make a POST request, including the value of `set_modhash` as the `X-Modhash` header
/// and the session cookie
fn post_modhash(&self, url: &Url, params: HashMap<String, String>, session: &str) -> JsonResult<Json>;
}
pub fn set_modhash(modhash: &str) {
_modhash.replace(Some(modhash.into_string()));
}
pub fn get_modhash() -> Option<Ref<String>> |
pub fn has_modhash() -> bool {
_modhash.get().is_some()
}
/// Map a std::io::IoError to a serialize::json::IoError (ParserError variant)
pub fn err_io_to_json_io(err: IoError) -> ParserError {
super::serialize::json::IoError(err.kind, err.desc)
}
#[test]
fn test_params() {
let params = params!{
"hello": "goodbye",
"yes": "no",
};
drop(params);
}
| {
_modhash.get()
} | identifier_body |
mod.rs | #![macro_escape]
use serialize::json::{Json, ParserError};
use url::Url;
use std::collections::HashMap;
use std::io::IoError;
use std::local_data::Ref;
#[cfg(not(teepee))]
pub use self::http::Client;
#[cfg(teepee)]
pub use self::teepee::Client;
mod http;
mod teepee;
macro_rules! params {
{$($key:expr: $val:expr,)+} => (
{
use std::collections::HashMap;
let mut params: HashMap<String, String> = HashMap::new();
$(
params.insert($key.into_string(), $val.to_string());
)+
params
}
);
}
pub static USER_AGENT: &'static str = "rawr v0.1 (github.com/cybergeek94/rawr)";
local_data_key!(_modhash: String)
pub type JsonError = ParserError;
pub type JsonResult<T> = Result<T, JsonError>;
pub trait JsonClient {
/// Make a GET request, returning a Json response. The GET parameters should be in the passed URL.
/// Implementers should update the local modhash by using `set_modhash()`
fn get(&self, url: &Url) -> JsonResult<Json>;
/// Make a POST request, returning the JSON response
fn post(&self, url: &Url, params: HashMap<String, String>) -> JsonResult<Json>;
/// Make a POST request, including the value of `set_modhash` as the `X-Modhash` header
/// and the session cookie
fn post_modhash(&self, url: &Url, params: HashMap<String, String>, session: &str) -> JsonResult<Json>;
}
pub fn set_modhash(modhash: &str) {
_modhash.replace(Some(modhash.into_string()));
}
pub fn get_modhash() -> Option<Ref<String>> {
_modhash.get()
}
pub fn has_modhash() -> bool {
_modhash.get().is_some()
}
/// Map a std::io::IoError to a serialize::json::IoError (ParserError variant)
pub fn err_io_to_json_io(err: IoError) -> ParserError {
super::serialize::json::IoError(err.kind, err.desc)
} |
#[test]
fn test_params() {
let params = params!{
"hello": "goodbye",
"yes": "no",
};
drop(params);
} | random_line_split |
|
mod.rs | #![macro_escape]
use serialize::json::{Json, ParserError};
use url::Url;
use std::collections::HashMap;
use std::io::IoError;
use std::local_data::Ref;
#[cfg(not(teepee))]
pub use self::http::Client;
#[cfg(teepee)]
pub use self::teepee::Client;
mod http;
mod teepee;
macro_rules! params {
{$($key:expr: $val:expr,)+} => (
{
use std::collections::HashMap;
let mut params: HashMap<String, String> = HashMap::new();
$(
params.insert($key.into_string(), $val.to_string());
)+
params
}
);
}
pub static USER_AGENT: &'static str = "rawr v0.1 (github.com/cybergeek94/rawr)";
local_data_key!(_modhash: String)
pub type JsonError = ParserError;
pub type JsonResult<T> = Result<T, JsonError>;
pub trait JsonClient {
/// Make a GET request, returning a Json response. The GET parameters should be in the passed URL.
/// Implementers should update the local modhash by using `set_modhash()`
fn get(&self, url: &Url) -> JsonResult<Json>;
/// Make a POST request, returning the JSON response
fn post(&self, url: &Url, params: HashMap<String, String>) -> JsonResult<Json>;
/// Make a POST request, including the value of `set_modhash` as the `X-Modhash` header
/// and the session cookie
fn post_modhash(&self, url: &Url, params: HashMap<String, String>, session: &str) -> JsonResult<Json>;
}
pub fn | (modhash: &str) {
_modhash.replace(Some(modhash.into_string()));
}
pub fn get_modhash() -> Option<Ref<String>> {
_modhash.get()
}
pub fn has_modhash() -> bool {
_modhash.get().is_some()
}
/// Map a std::io::IoError to a serialize::json::IoError (ParserError variant)
pub fn err_io_to_json_io(err: IoError) -> ParserError {
super::serialize::json::IoError(err.kind, err.desc)
}
#[test]
fn test_params() {
let params = params!{
"hello": "goodbye",
"yes": "no",
};
drop(params);
}
| set_modhash | identifier_name |
ListAssetTransactions.ts | import { ListAssetTransactions } from '../../src/Commands'
import { TestCommand } from '../test-helpers'
describe('/ListAssetTransactions', function() {
it(
'ListAssetTransactions() should return a properly configured JSON-RPC request',
function() {
const asset = 'asset'
const verbose = true
const count = 10
const start = 0
const localOrdering = false
// Signature 1: [string]
TestCommand(
ListAssetTransactions(asset),
ListAssetTransactions,
[asset],
)
// Signature 2: [string, boolean]
TestCommand(
ListAssetTransactions(asset, verbose),
ListAssetTransactions,
[asset, verbose],
)
// Signature 3: [string, boolean, number]
TestCommand(
ListAssetTransactions(asset, verbose, count),
ListAssetTransactions,
[asset, verbose, count],
)
// Signature 4: [string, boolean, number, number]
TestCommand(
ListAssetTransactions(asset, verbose, count, start),
ListAssetTransactions,
[asset, verbose, count, start],
)
// Signature 5: [string, boolean, number, number, boolean]
TestCommand(
ListAssetTransactions(asset, verbose, count, start, localOrdering), | [asset, verbose, count, start, localOrdering],
)
},
)
}) | ListAssetTransactions, | random_line_split |
single_thread_calculator.rs | use crate::{MonteCarloPiCalculator, gen_random};
use std::sync::Arc;
pub struct SingleThreadCalculator {}
impl SingleThreadCalculator {
#[inline]
fn gen_randoms_static(n: usize) -> (Vec<f64>, Vec<f64>) {
let mut xs = vec![0.0; n];
let mut ys = vec![0.0; n];
for i in 0..n {
let mut t = gen_random(i as f64 / n as f64);
t = gen_random(t);
t = gen_random(t);
xs[i] = t;
for _ in 0..10 {
t = gen_random(t);
}
ys[i] = t;
}
return (xs, ys);
}
#[inline]
#[allow(unused_parens)]
fn cal_static(xs: &Arc<Vec<f64>>, ys: &Arc<Vec<f64>>, n: usize) -> u64 {
let mut cnt = 0;
for i in 0..n {
if (xs[i] * xs[i] + ys[i] * ys[i] < 1.0) |
}
return cnt;
}
}
impl MonteCarloPiCalculator for SingleThreadCalculator {
#[inline]
fn new(_n: usize) -> SingleThreadCalculator {
return SingleThreadCalculator {};
}
#[inline]
fn gen_randoms(&self, n: usize) -> (Vec<f64>, Vec<f64>) {
return SingleThreadCalculator::gen_randoms_static(n);
}
#[inline]
fn cal(&self, xs: &Arc<Vec<f64>>, ys: &Arc<Vec<f64>>, n: usize) -> u64 {
return SingleThreadCalculator::cal_static(xs, ys, n);
}
} | {
cnt += 1;
} | conditional_block |
single_thread_calculator.rs | use crate::{MonteCarloPiCalculator, gen_random};
use std::sync::Arc;
pub struct SingleThreadCalculator {}
impl SingleThreadCalculator {
#[inline]
fn gen_randoms_static(n: usize) -> (Vec<f64>, Vec<f64>) {
let mut xs = vec![0.0; n];
let mut ys = vec![0.0; n];
for i in 0..n {
let mut t = gen_random(i as f64 / n as f64);
t = gen_random(t);
t = gen_random(t);
xs[i] = t;
for _ in 0..10 {
t = gen_random(t);
}
ys[i] = t;
}
return (xs, ys);
}
#[inline]
#[allow(unused_parens)]
fn | (xs: &Arc<Vec<f64>>, ys: &Arc<Vec<f64>>, n: usize) -> u64 {
let mut cnt = 0;
for i in 0..n {
if (xs[i] * xs[i] + ys[i] * ys[i] < 1.0) {
cnt += 1;
}
}
return cnt;
}
}
impl MonteCarloPiCalculator for SingleThreadCalculator {
#[inline]
fn new(_n: usize) -> SingleThreadCalculator {
return SingleThreadCalculator {};
}
#[inline]
fn gen_randoms(&self, n: usize) -> (Vec<f64>, Vec<f64>) {
return SingleThreadCalculator::gen_randoms_static(n);
}
#[inline]
fn cal(&self, xs: &Arc<Vec<f64>>, ys: &Arc<Vec<f64>>, n: usize) -> u64 {
return SingleThreadCalculator::cal_static(xs, ys, n);
}
} | cal_static | identifier_name |
single_thread_calculator.rs | use crate::{MonteCarloPiCalculator, gen_random};
use std::sync::Arc;
pub struct SingleThreadCalculator {}
impl SingleThreadCalculator {
#[inline]
fn gen_randoms_static(n: usize) -> (Vec<f64>, Vec<f64>) {
let mut xs = vec![0.0; n];
let mut ys = vec![0.0; n];
for i in 0..n {
let mut t = gen_random(i as f64 / n as f64);
t = gen_random(t);
t = gen_random(t);
xs[i] = t;
for _ in 0..10 {
t = gen_random(t);
}
ys[i] = t;
}
return (xs, ys);
}
#[inline]
#[allow(unused_parens)]
fn cal_static(xs: &Arc<Vec<f64>>, ys: &Arc<Vec<f64>>, n: usize) -> u64 |
}
impl MonteCarloPiCalculator for SingleThreadCalculator {
#[inline]
fn new(_n: usize) -> SingleThreadCalculator {
return SingleThreadCalculator {};
}
#[inline]
fn gen_randoms(&self, n: usize) -> (Vec<f64>, Vec<f64>) {
return SingleThreadCalculator::gen_randoms_static(n);
}
#[inline]
fn cal(&self, xs: &Arc<Vec<f64>>, ys: &Arc<Vec<f64>>, n: usize) -> u64 {
return SingleThreadCalculator::cal_static(xs, ys, n);
}
} | {
let mut cnt = 0;
for i in 0..n {
if (xs[i] * xs[i] + ys[i] * ys[i] < 1.0) {
cnt += 1;
}
}
return cnt;
} | identifier_body |
single_thread_calculator.rs | use crate::{MonteCarloPiCalculator, gen_random};
use std::sync::Arc;
pub struct SingleThreadCalculator {}
impl SingleThreadCalculator {
#[inline]
fn gen_randoms_static(n: usize) -> (Vec<f64>, Vec<f64>) {
let mut xs = vec![0.0; n];
let mut ys = vec![0.0; n];
for i in 0..n {
let mut t = gen_random(i as f64 / n as f64);
t = gen_random(t);
t = gen_random(t);
xs[i] = t;
for _ in 0..10 {
t = gen_random(t);
}
ys[i] = t;
}
return (xs, ys);
} | fn cal_static(xs: &Arc<Vec<f64>>, ys: &Arc<Vec<f64>>, n: usize) -> u64 {
let mut cnt = 0;
for i in 0..n {
if (xs[i] * xs[i] + ys[i] * ys[i] < 1.0) {
cnt += 1;
}
}
return cnt;
}
}
impl MonteCarloPiCalculator for SingleThreadCalculator {
#[inline]
fn new(_n: usize) -> SingleThreadCalculator {
return SingleThreadCalculator {};
}
#[inline]
fn gen_randoms(&self, n: usize) -> (Vec<f64>, Vec<f64>) {
return SingleThreadCalculator::gen_randoms_static(n);
}
#[inline]
fn cal(&self, xs: &Arc<Vec<f64>>, ys: &Arc<Vec<f64>>, n: usize) -> u64 {
return SingleThreadCalculator::cal_static(xs, ys, n);
}
} |
#[inline]
#[allow(unused_parens)] | random_line_split |
http_stream.py | #!/usr/bin/env python
#
# Update a redis server cache when an evenement is trigger
# in MySQL replication log
#
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import *
mysql_settings = {'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'passwd': ''}
import json
import cherrypy
class Streamer(object):
def __init__(self):
|
def index(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
def content():
for binlogevent in self.stream:
for row in binlogevent.rows:
if isinstance(binlogevent, DeleteRowsEvent):
yield json.dumps({
"action": "delete",
"id": row["values"]["id"]}) + "\n"
elif isinstance(binlogevent, UpdateRowsEvent):
yield json.dumps({
"action": "update",
"id": row["after_values"]["id"],
"doc": row["after_values"]}) + "\n"
elif isinstance(binlogevent, WriteRowsEvent):
yield json.dumps({
"action": "insert",
"id": row["values"]["id"],
"doc": row["values"]}) + "\n"
return content()
index.exposed = True
index._cp_config = {"response.stream": True}
cherrypy.quickstart(Streamer())
| self.stream = BinLogStreamReader(connection_settings = mysql_settings,
only_events = [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent], blocking = True, resume_stream = True) | identifier_body |
http_stream.py | #!/usr/bin/env python
#
# Update a redis server cache when an evenement is trigger
# in MySQL replication log
#
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import *
mysql_settings = {'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'passwd': ''}
import json
import cherrypy
class Streamer(object):
def __init__(self):
self.stream = BinLogStreamReader(connection_settings = mysql_settings,
only_events = [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent], blocking = True, resume_stream = True)
def index(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
def content():
for binlogevent in self.stream:
for row in binlogevent.rows:
if isinstance(binlogevent, DeleteRowsEvent):
yield json.dumps({
"action": "delete",
"id": row["values"]["id"]}) + "\n"
elif isinstance(binlogevent, UpdateRowsEvent):
yield json.dumps({
"action": "update",
"id": row["after_values"]["id"],
"doc": row["after_values"]}) + "\n"
elif isinstance(binlogevent, WriteRowsEvent):
|
return content()
index.exposed = True
index._cp_config = {"response.stream": True}
cherrypy.quickstart(Streamer())
| yield json.dumps({
"action": "insert",
"id": row["values"]["id"],
"doc": row["values"]}) + "\n" | conditional_block |
http_stream.py | #!/usr/bin/env python
#
# Update a redis server cache when an evenement is trigger
# in MySQL replication log
#
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import *
mysql_settings = {'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'passwd': ''}
import json
import cherrypy
class Streamer(object):
def __init__(self):
self.stream = BinLogStreamReader(connection_settings = mysql_settings,
only_events = [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent], blocking = True, resume_stream = True)
def index(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
def | ():
for binlogevent in self.stream:
for row in binlogevent.rows:
if isinstance(binlogevent, DeleteRowsEvent):
yield json.dumps({
"action": "delete",
"id": row["values"]["id"]}) + "\n"
elif isinstance(binlogevent, UpdateRowsEvent):
yield json.dumps({
"action": "update",
"id": row["after_values"]["id"],
"doc": row["after_values"]}) + "\n"
elif isinstance(binlogevent, WriteRowsEvent):
yield json.dumps({
"action": "insert",
"id": row["values"]["id"],
"doc": row["values"]}) + "\n"
return content()
index.exposed = True
index._cp_config = {"response.stream": True}
cherrypy.quickstart(Streamer())
| content | identifier_name |
http_stream.py | #!/usr/bin/env python
#
# Update a redis server cache when an evenement is trigger
# in MySQL replication log
#
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import *
mysql_settings = {'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'passwd': ''}
import json
import cherrypy
class Streamer(object):
def __init__(self):
self.stream = BinLogStreamReader(connection_settings = mysql_settings,
only_events = [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent], blocking = True, resume_stream = True)
def index(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
def content():
for binlogevent in self.stream:
for row in binlogevent.rows:
if isinstance(binlogevent, DeleteRowsEvent):
yield json.dumps({
"action": "delete",
"id": row["values"]["id"]}) + "\n"
elif isinstance(binlogevent, UpdateRowsEvent):
yield json.dumps({
"action": "update",
"id": row["after_values"]["id"],
"doc": row["after_values"]}) + "\n"
elif isinstance(binlogevent, WriteRowsEvent):
yield json.dumps({
"action": "insert",
"id": row["values"]["id"],
"doc": row["values"]}) + "\n"
return content()
index.exposed = True |
cherrypy.quickstart(Streamer()) | index._cp_config = {"response.stream": True} | random_line_split |
old_clustering_example.py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 03 10:16:52 2015
@author: Keine
"""
import sqlite3
cx = sqlite3.connect("../text2DB/get_data.db")
distxy = [([0.0] * 49) for i in range(49)]
cu = cx.cursor()
for i in range(49):
|
cx.close();
#print distxy[49-1][48-1]
#from scipy.cluster.hierarchy import linkage, dendrogram
#R = dendrogram(linkage(distxy, method='complete'))
#suptitle('Cluster Dendrogram', fontweight='bold', fontsize=14);
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
data_dist = pdist(distxy) # computing the distance
data_link = linkage(data_dist) # computing the linkage
dendrogram(data_link)
plt.xlabel('User_ID')
plt.ylabel('Similarity ratio')
plt.suptitle('Hierarchy Clustering', fontweight='bold', fontsize=14);
| for j in range(49):
if i == j:
distxy[i-1][j-1] = 0.0
else:
print i
print j
sql = cu.execute("""select similarity from old_similarity where id1 = ? and id2 = ?""", (i,j))
if sql.fetchall() == []:
sim = 0
else:
sql = cu.execute("""select similarity from old_similarity where id1 = ? and id2 = ?""", (i,j))
sim = float(sql.fetchone()[0])
distxy[i][j] = sim | conditional_block |
old_clustering_example.py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 03 10:16:52 2015
@author: Keine
"""
import sqlite3
cx = sqlite3.connect("../text2DB/get_data.db")
distxy = [([0.0] * 49) for i in range(49)]
cu = cx.cursor()
for i in range(49):
for j in range(49):
if i == j:
distxy[i-1][j-1] = 0.0
else:
print i
print j
sql = cu.execute("""select similarity from old_similarity where id1 = ? and id2 = ?""", (i,j))
if sql.fetchall() == []:
sim = 0
else:
sql = cu.execute("""select similarity from old_similarity where id1 = ? and id2 = ?""", (i,j))
sim = float(sql.fetchone()[0])
distxy[i][j] = sim
cx.close();
#print distxy[49-1][48-1]
#from scipy.cluster.hierarchy import linkage, dendrogram
#R = dendrogram(linkage(distxy, method='complete')) |
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
data_dist = pdist(distxy) # computing the distance
data_link = linkage(data_dist) # computing the linkage
dendrogram(data_link)
plt.xlabel('User_ID')
plt.ylabel('Similarity ratio')
plt.suptitle('Hierarchy Clustering', fontweight='bold', fontsize=14); |
#suptitle('Cluster Dendrogram', fontweight='bold', fontsize=14); | random_line_split |
merge-collection.js | /*
* Copyright 2016-2017 Hewlett Packard Enterprise Development Company, L.P.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
*/
define([
'backbone',
'find/app/util/merge-collection'
], function(Backbone, MergeCollection) {
'use strict';
const Animal = {CAT: 'CAT', DOG: 'DOG'};
describe('Merge collection', function() {
beforeEach(function() {
this.catCollection = new Backbone.Collection([
{id: 0, name: 'Oscar', animal: Animal.CAT},
{id: 1, name: 'Katy', animal: Animal.CAT}
]);
this.dogCollection = new Backbone.Collection([
{id: 0, name: 'Rover', animal: Animal.DOG}
]);
this.mergeCollection = new MergeCollection([], {
collections: [this.catCollection, this.dogCollection],
comparator: 'name',
typeAttribute: 'animal'
});
});
it('adds all existing models on construction', function() {
expect(this.mergeCollection.length).toBe(3);
expect(this.mergeCollection.at(0)).toBe(this.catCollection.at(1));
expect(this.mergeCollection.at(1)).toBe(this.catCollection.at(0));
expect(this.mergeCollection.at(2)).toBe(this.dogCollection.at(0));
});
it('adds models added to the tracked collection from itself', function() {
const model = this.dogCollection.add({id: 1, name: 'Barky', animal: Animal.DOG});
expect(this.mergeCollection.length).toBe(4);
expect(this.mergeCollection.findWhere({name: 'Barky'})).toBe(model);
});
it('removes models removed from the tracked collections from itself', function() {
this.catCollection.remove(0);
expect(this.mergeCollection.length).toBe(2);
expect(this.mergeCollection.findWhere({name: 'Oscar'})).toBeUndefined();
});
it('resets itself when one of the tracked collections is reset', function() {
this.catCollection.reset([
{id: 3, name: 'Tom', animal: Animal.CAT}
]);
| expect(this.mergeCollection.at(0)).toBe(this.dogCollection.at(0));
expect(this.mergeCollection.at(1)).toBe(this.catCollection.at(0));
});
it('preserves the collection reference on the models', function() {
expect(this.mergeCollection.findWhere({animal: Animal.CAT}).collection).toBe(this.catCollection);
});
it('handles adding two new models to the tracked collection', function() {
// New models have no id
this.catCollection.add({name: 'Willow', animal: Animal.CAT});
this.catCollection.add({name: 'George', animal: Animal.CAT});
expect(this.mergeCollection.length).toBe(5);
});
});
}); | expect(this.mergeCollection.length).toBe(2); | random_line_split |
sk.js | /*
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2007 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Slovak language file.
*/
var FCKLang =
{
// Language direction : "ltr" (left to right) or "rtl" (right to left).
Dir : "ltr",
ToolbarCollapse : "Skryť panel nástrojov",
ToolbarExpand : "Zobraziť panel nástrojov",
// Toolbar Items and Context Menu
Save : "Uložit",
NewPage : "Nová stránka",
Preview : "Náhľad",
Cut : "Vystrihnúť",
Copy : "Kopírovať",
Paste : "Vložiť",
PasteText : "Vložiť ako čistý text",
PasteWord : "Vložiť z Wordu",
Print : "Tlač",
SelectAll : "Vybrať všetko",
RemoveFormat : "Odstrániť formátovanie",
InsertLinkLbl : "Odkaz",
InsertLink : "Vložiť/zmeniť odkaz",
RemoveLink : "Odstrániť odkaz",
Anchor : "Vložiť/zmeniť kotvu",
AnchorDelete : "Remove Anchor", //MISSING
InsertImageLbl : "Obrázok",
InsertImage : "Vložiť/zmeniť obrázok",
InsertFlashLbl : "Flash",
InsertFlash : "Vložiť/zmeniť Flash",
InsertTableLbl : "Tabuľka",
InsertTable : "Vložiť/zmeniť tabuľku",
InsertLineLbl : "Čiara",
InsertLine : "Vložiť vodorovnú čiaru",
InsertSpecialCharLbl: "Špeciálne znaky",
InsertSpecialChar : "Vložiť špeciálne znaky",
InsertSmileyLbl : "Smajlíky",
InsertSmiley : "Vložiť smajlíka",
About : "O aplikáci FCKeditor",
Bold : "Tučné",
Italic : "Kurzíva",
Underline : "Podčiarknuté",
StrikeThrough : "Prečiarknuté",
Subscript : "Dolný index",
Superscript : "Horný index",
LeftJustify : "Zarovnať vľavo",
CenterJustify : "Zarovnať na stred",
RightJustify : "Zarovnať vpravo",
BlockJustify : "Zarovnať do bloku",
DecreaseIndent : "Zmenšiť odsadenie",
IncreaseIndent : "Zväčšiť odsadenie",
Blockquote : "Blockquote", //MISSING
Undo : "Späť",
Redo : "Znovu",
NumberedListLbl : "Číslovanie",
NumberedList : "Vložiť/odstrániť číslovaný zoznam",
BulletedListLbl : "Odrážky",
BulletedList : "Vložiť/odstraniť odrážky",
ShowTableBorders : "Zobraziť okraje tabuliek",
ShowDetails : "Zobraziť podrobnosti",
Style : "Štýl",
FontFormat : "Formát",
Font : "Písmo",
FontSize : "Veľkosť",
TextColor : "Farba textu",
BGColor : "Farba pozadia",
Source : "Zdroj",
Find : "Hľadať",
Replace : "Nahradiť",
SpellCheck : "Kontrola pravopisu",
UniversalKeyboard : "Univerzálna klávesnica",
PageBreakLbl : "Oddeľovač stránky",
PageBreak : "Vložiť oddeľovač stránky",
Form : "Formulár",
Checkbox : "Zaškrtávacie políčko",
RadioButton : "Prepínač",
TextField : "Textové pole",
Textarea : "Textová oblasť",
HiddenField : "Skryté pole",
Button : "Tlačíidlo",
SelectionField : "Rozbaľovací zoznam",
ImageButton : "Obrázkové tlačidlo",
FitWindow : "Maximalizovať veľkosť okna editora",
ShowBlocks : "Show Blocks", //MISSING
// Context Menu
EditLink : "Zmeniť odkaz",
CellCM : "Bunka",
RowCM : "Riadok",
ColumnCM : "Stĺpec",
InsertRowAfter : "Insert Row After", //MISSING
InsertRowBefore : "Insert Row Before", //MISSING
DeleteRows : "Vymazať riadok",
InsertColumnAfter : "Insert Column After", //MISSING
InsertColumnBefore : "Insert Column Before", //MISSING
DeleteColumns : "Zmazať stĺpec",
InsertCellAfter : "Insert Cell After", //MISSING
InsertCellBefore : "Insert Cell Before", //MISSING
DeleteCells : "Vymazať bunky",
MergeCells : "Zlúčiť bunky",
MergeRight : "Merge Right", //MISSING
MergeDown : "Merge Down", //MISSING
HorizontalSplitCell : "Split Cell Horizontally", //MISSING
VerticalSplitCell : "Split Cell Vertically", //MISSING
TableDelete : "Vymazať tabuľku",
CellProperties : "Vlastnosti bunky",
TableProperties : "Vlastnosti tabuľky",
ImageProperties : "Vlastnosti obrázku",
FlashProperties : "Vlastnosti Flashu",
AnchorProp : "Vlastnosti kotvy",
ButtonProp : "Vlastnosti tlačidla",
CheckboxProp : "Vlastnosti zaškrtávacieho políčka",
HiddenFieldProp : "Vlastnosti skrytého poľa",
RadioButtonProp : "Vlastnosti prepínača",
ImageButtonProp : "Vlastnosti obrázkového tlačidla",
TextFieldProp : "Vlastnosti textového poľa",
SelectionFieldProp : "Vlastnosti rozbaľovacieho zoznamu",
TextareaProp : "Vlastnosti textovej oblasti",
FormProp : "Vlastnosti formulára",
FontFormats : "Normálny;Formátovaný;Adresa;Nadpis 1;Nadpis 2;Nadpis 3;Nadpis 4;Nadpis 5;Nadpis 6;Odsek (DIV)",
// Alerts and Messages
ProcessingXHTML : "Prebieha spracovanie XHTML. Čakajte prosím...",
Done : "Dokončené.",
PasteWordConfirm : "Vyzerá to tak, že vkladaný text je kopírovaný z Wordu. Chcete ho pred vložením vyčistiť?",
NotCompatiblePaste : "Tento príkaz je dostupný len v prehliadači Internet Explorer verzie 5.5 alebo vyššej. Chcete vložiť text bez vyčistenia?",
UnknownToolbarItem : "Neznáma položka panela nástrojov \"%1\"",
UnknownCommand : "Neznámy príkaz \"%1\"",
NotImplemented : "Príkaz nie je implementovaný",
UnknownToolbarSet : "Panel nástrojov \"%1\" neexistuje",
NoActiveX : "Bezpečnostné nastavenia vášho prehliadača môžu obmedzovať niektoré funkcie editora. Pre ich plnú funkčnosť musíte zapnúť voľbu \"Spúšťať ActiveX moduly a zásuvné moduly\", inak sa môžete stretnúť s chybami a nefunkčnosťou niektorých funkcií.",
BrowseServerBlocked : "Prehliadač zdrojových prvkov nebolo možné otvoriť. Uistite sa, že máte vypnuté všetky blokovače vyskakujúcich okien.",
DialogBlocked : "Dialógové okno nebolo možné otvoriť. Uistite sa, že máte vypnuté všetky blokovače vyskakujúcich okien.",
// Dialogs
DlgBtnOK : "OK",
DlgBtnCancel : "Zrušiť",
DlgBtnClose : "Zavrieť",
DlgBtnBrowseServer : "Prechádzať server",
DlgAdvancedTag : "Rozšírené",
DlgOpOther : "<Ďalšie>",
DlgInfoTab : "Info",
DlgAlertUrl : "Prosím vložte URL",
// General Dialogs Labels
DlgGenNotSet : "<nenastavené>",
DlgGenId : "Id",
DlgGenLangDir : "Orientácia jazyka",
DlgGenLangDirLtr : "Zľava doprava (LTR)",
DlgGenLangDirRtl : "Sprava doľava (RTL)",
DlgGenLangCode : "Kód jazyka",
DlgGenAccessKey : "Prístupový kľúč",
DlgGenName : "Meno",
DlgGenTabIndex : "Poradie prvku",
DlgGenLongDescr : "Dlhý popis URL",
DlgGenClass : "Trieda štýlu",
DlgGenTitle : "Pomocný titulok",
DlgGenContType : "Pomocný typ obsahu",
DlgGenLinkCharset : "Priradená znaková sada",
DlgGenStyle : "Štýl",
// Image Dialog
DlgImgTitle : "Vlastnosti obrázku",
DlgImgInfoTab : "Informácie o obrázku",
DlgImgBtnUpload : "Odoslať na server",
DlgImgURL : "URL",
DlgImgUpload : "Odoslať",
DlgImgAlt : "Alternatívny text",
DlgImgWidth : "Šírka",
DlgImgHeight : "Výška",
DlgImgLockRatio : "Zámok",
DlgBtnResetSize : "Pôvodná veľkosť",
DlgImgBorder : "Okraje",
DlgImgHSpace : "H-medzera",
DlgImgVSpace : "V-medzera",
DlgImgAlign : "Zarovnanie",
DlgImgAlignLeft : "Vľavo",
DlgImgAlignAbsBottom: "Úplne dole",
DlgImgAlignAbsMiddle: "Do stredu",
DlgImgAlignBaseline : "Na základňu",
DlgImgAlignBottom : "Dole",
DlgImgAlignMiddle : "Na stred",
DlgImgAlignRight : "Vpravo",
DlgImgAlignTextTop : "Na horný okraj textu",
DlgImgAlignTop : "Nahor",
DlgImgPreview : "Náhľad",
DlgImgAlertUrl : "Zadajte prosím URL obrázku",
DlgImgLinkTab : "Odkaz",
// Flash Dialog
DlgFlashTitle : "Vlastnosti Flashu",
DlgFlashChkPlay : "Automatické prehrávanie",
DlgFlashChkLoop : "Opakovanie",
DlgFlashChkMenu : "Povoliť Flash Menu",
DlgFlashScale : "Mierka",
DlgFlashScaleAll : "Zobraziť mierku",
DlgFlashScaleNoBorder : "Bez okrajov",
DlgFlashScaleFit : "Roztiahnuť na celé",
// Link Dialog
DlgLnkWindowTitle : "Odkaz",
DlgLnkInfoTab : "Informácie o odkaze",
DlgLnkTargetTab : "Cieľ",
DlgLnkType : "Typ odkazu",
| DlgLnkURL : "URL",
DlgLnkAnchorSel : "Vybrať kotvu",
DlgLnkAnchorByName : "Podľa mena kotvy",
DlgLnkAnchorById : "Podľa Id objektu",
DlgLnkNoAnchors : "(V stránke nie je definovaná žiadna kotva)",
DlgLnkEMail : "E-Mailová adresa",
DlgLnkEMailSubject : "Predmet správy",
DlgLnkEMailBody : "Telo správy",
DlgLnkUpload : "Odoslať",
DlgLnkBtnUpload : "Odoslať na server",
DlgLnkTarget : "Cieľ",
DlgLnkTargetFrame : "<rámec>",
DlgLnkTargetPopup : "<vyskakovacie okno>",
DlgLnkTargetBlank : "Nové okno (_blank)",
DlgLnkTargetParent : "Rodičovské okno (_parent)",
DlgLnkTargetSelf : "Rovnaké okno (_self)",
DlgLnkTargetTop : "Hlavné okno (_top)",
DlgLnkTargetFrameName : "Meno rámu cieľa",
DlgLnkPopWinName : "Názov vyskakovacieho okna",
DlgLnkPopWinFeat : "Vlastnosti vyskakovacieho okna",
DlgLnkPopResize : "Meniteľná veľkosť",
DlgLnkPopLocation : "Panel umiestnenia",
DlgLnkPopMenu : "Panel ponuky",
DlgLnkPopScroll : "Posuvníky",
DlgLnkPopStatus : "Stavový riadok",
DlgLnkPopToolbar : "Panel nástrojov",
DlgLnkPopFullScrn : "Celá obrazovka (IE)",
DlgLnkPopDependent : "Závislosť (Netscape)",
DlgLnkPopWidth : "Šírka",
DlgLnkPopHeight : "Výška",
DlgLnkPopLeft : "Ľavý okraj",
DlgLnkPopTop : "Horný okraj",
DlnLnkMsgNoUrl : "Zadajte prosím URL odkazu",
DlnLnkMsgNoEMail : "Zadajte prosím e-mailovú adresu",
DlnLnkMsgNoAnchor : "Vyberte prosím kotvu",
DlnLnkMsgInvPopName : "Názov vyskakovacieho okna sa musá začínať písmenom a nemôže obsahovať medzery",
// Color Dialog
DlgColorTitle : "Výber farby",
DlgColorBtnClear : "Vymazať",
DlgColorHighlight : "Zvýraznená",
DlgColorSelected : "Vybraná",
// Smiley Dialog
DlgSmileyTitle : "Vkladanie smajlíkov",
// Special Character Dialog
DlgSpecialCharTitle : "Výber špeciálneho znaku",
// Table Dialog
DlgTableTitle : "Vlastnosti tabuľky",
DlgTableRows : "Riadky",
DlgTableColumns : "Stĺpce",
DlgTableBorder : "Ohraničenie",
DlgTableAlign : "Zarovnanie",
DlgTableAlignNotSet : "<nenastavené>",
DlgTableAlignLeft : "Vľavo",
DlgTableAlignCenter : "Na stred",
DlgTableAlignRight : "Vpravo",
DlgTableWidth : "Šírka",
DlgTableWidthPx : "pixelov",
DlgTableWidthPc : "percent",
DlgTableHeight : "Výška",
DlgTableCellSpace : "Vzdialenosť buniek",
DlgTableCellPad : "Odsadenie obsahu",
DlgTableCaption : "Popis",
DlgTableSummary : "Prehľad",
// Table Cell Dialog
DlgCellTitle : "Vlastnosti bunky",
DlgCellWidth : "Šírka",
DlgCellWidthPx : "bodov",
DlgCellWidthPc : "percent",
DlgCellHeight : "Výška",
DlgCellWordWrap : "Zalamovannie",
DlgCellWordWrapNotSet : "<nenastavené>",
DlgCellWordWrapYes : "Áno",
DlgCellWordWrapNo : "Nie",
DlgCellHorAlign : "Vodorovné zarovnanie",
DlgCellHorAlignNotSet : "<nenastavené>",
DlgCellHorAlignLeft : "Vľavo",
DlgCellHorAlignCenter : "Na stred",
DlgCellHorAlignRight: "Vpravo",
DlgCellVerAlign : "Zvislé zarovnanie",
DlgCellVerAlignNotSet : "<nenastavené>",
DlgCellVerAlignTop : "Nahor",
DlgCellVerAlignMiddle : "Doprostred",
DlgCellVerAlignBottom : "Dole",
DlgCellVerAlignBaseline : "Na základňu",
DlgCellRowSpan : "Zlúčené riadky",
DlgCellCollSpan : "Zlúčené stĺpce",
DlgCellBackColor : "Farba pozadia",
DlgCellBorderColor : "Farba ohraničenia",
DlgCellBtnSelect : "Výber...",
// Find and Replace Dialog
DlgFindAndReplaceTitle : "Find and Replace", //MISSING
// Find Dialog
DlgFindTitle : "Hľadať",
DlgFindFindBtn : "Hľadať",
DlgFindNotFoundMsg : "Hľadaný text nebol nájdený.",
// Replace Dialog
DlgReplaceTitle : "Nahradiť",
DlgReplaceFindLbl : "Čo hľadať:",
DlgReplaceReplaceLbl : "Čím nahradiť:",
DlgReplaceCaseChk : "Rozlišovať malé/veľké písmená",
DlgReplaceReplaceBtn : "Nahradiť",
DlgReplaceReplAllBtn : "Nahradiť všetko",
DlgReplaceWordChk : "Len celé slová",
// Paste Operations / Dialog
PasteErrorCut : "Bezpečnostné nastavenie Vášho prehliadača nedovoľujú editoru spustiť funkciu pre vystrihnutie zvoleného textu do schránky. Prosím vystrihnite zvolený text do schránky pomocou klávesnice (Ctrl+X).",
PasteErrorCopy : "Bezpečnostné nastavenie Vášho prehliadača nedovoľujú editoru spustiť funkciu pre kopírovanie zvoleného textu do schránky. Prosím skopírujte zvolený text do schránky pomocou klávesnice (Ctrl+C).",
PasteAsText : "Vložiť ako čistý text",
PasteFromWord : "Vložiť text z Wordu",
DlgPasteMsg2 : "Prosím vložte nasledovný rámček použitím klávesnice (<STRONG>Ctrl+V</STRONG>) a stlačte <STRONG>OK</STRONG>.",
DlgPasteSec : "Because of your browser security settings, the editor is not able to access your clipboard data directly. You are required to paste it again in this window.", //MISSING
DlgPasteIgnoreFont : "Ignorovať nastavenia typu písma",
DlgPasteRemoveStyles : "Odstrániť formátovanie",
DlgPasteCleanBox : "Vyčistiť schránku",
// Color Picker
ColorAutomatic : "Automaticky",
ColorMoreColors : "Viac farieb...",
// Document Properties
DocProps : "Vlastnosti dokumentu",
// Anchor Dialog
DlgAnchorTitle : "Vlastnosti kotvy",
DlgAnchorName : "Meno kotvy",
DlgAnchorErrorName : "Zadajte prosím meno kotvy",
// Speller Pages Dialog
DlgSpellNotInDic : "Nie je v slovníku",
DlgSpellChangeTo : "Zmeniť na",
DlgSpellBtnIgnore : "Ignorovať",
DlgSpellBtnIgnoreAll : "Ignorovať všetko",
DlgSpellBtnReplace : "Prepísat",
DlgSpellBtnReplaceAll : "Prepísat všetko",
DlgSpellBtnUndo : "Späť",
DlgSpellNoSuggestions : "- Žiadny návrh -",
DlgSpellProgress : "Prebieha kontrola pravopisu...",
DlgSpellNoMispell : "Kontrola pravopisu dokončená: bez chýb",
DlgSpellNoChanges : "Kontrola pravopisu dokončená: žiadne slová nezmenené",
DlgSpellOneChange : "Kontrola pravopisu dokončená: zmenené jedno slovo",
DlgSpellManyChanges : "Kontrola pravopisu dokončená: zmenených %1 slov",
IeSpellDownload : "Kontrola pravopisu nie je naištalovaná. Chcete ju hneď stiahnuť?",
// Button Dialog
DlgButtonText : "Text",
DlgButtonType : "Typ",
DlgButtonTypeBtn : "Tlačidlo",
DlgButtonTypeSbm : "Odoslať",
DlgButtonTypeRst : "Vymazať",
// Checkbox and Radio Button Dialogs
DlgCheckboxName : "Názov",
DlgCheckboxValue : "Hodnota",
DlgCheckboxSelected : "Vybrané",
// Form Dialog
DlgFormName : "Názov",
DlgFormAction : "Akcie",
DlgFormMethod : "Metóda",
// Select Field Dialog
DlgSelectName : "Názov",
DlgSelectValue : "Hodnota",
DlgSelectSize : "Veľkosť",
DlgSelectLines : "riadkov",
DlgSelectChkMulti : "Povoliť viacnásobný výber",
DlgSelectOpAvail : "Dostupné možnosti",
DlgSelectOpText : "Text",
DlgSelectOpValue : "Hodnota",
DlgSelectBtnAdd : "Pridať",
DlgSelectBtnModify : "Zmeniť",
DlgSelectBtnUp : "Hore",
DlgSelectBtnDown : "Dole",
DlgSelectBtnSetValue : "Nastaviť ako vybranú hodnotu",
DlgSelectBtnDelete : "Zmazať",
// Textarea Dialog
DlgTextareaName : "Názov",
DlgTextareaCols : "Stĺpce",
DlgTextareaRows : "Riadky",
// Text Field Dialog
DlgTextName : "Názov",
DlgTextValue : "Hodnota",
DlgTextCharWidth : "Šírka pola (znakov)",
DlgTextMaxChars : "Maximálny počet znakov",
DlgTextType : "Typ",
DlgTextTypeText : "Text",
DlgTextTypePass : "Heslo",
// Hidden Field Dialog
DlgHiddenName : "Názov",
DlgHiddenValue : "Hodnota",
// Bulleted List Dialog
BulletedListProp : "Vlastnosti odrážok",
NumberedListProp : "Vlastnosti číslovania",
DlgLstStart : "Štart",
DlgLstType : "Typ",
DlgLstTypeCircle : "Krúžok",
DlgLstTypeDisc : "Disk",
DlgLstTypeSquare : "Štvorec",
DlgLstTypeNumbers : "Číslovanie (1, 2, 3)",
DlgLstTypeLCase : "Malé písmená (a, b, c)",
DlgLstTypeUCase : "Veľké písmená (A, B, C)",
DlgLstTypeSRoman : "Malé rímske číslice (i, ii, iii)",
DlgLstTypeLRoman : "Veľké rímske číslice (I, II, III)",
// Document Properties Dialog
DlgDocGeneralTab : "Všeobecné",
DlgDocBackTab : "Pozadie",
DlgDocColorsTab : "Farby a okraje",
DlgDocMetaTab : "Meta Data",
DlgDocPageTitle : "Titulok",
DlgDocLangDir : "Orientácie jazyka",
DlgDocLangDirLTR : "Zľava doprava (LTR)",
DlgDocLangDirRTL : "Sprava doľava (RTL)",
DlgDocLangCode : "Kód jazyka",
DlgDocCharSet : "Kódová stránka",
DlgDocCharSetCE : "Stredoeurópske",
DlgDocCharSetCT : "Čínština tradičná (Big5)",
DlgDocCharSetCR : "Cyrillika",
DlgDocCharSetGR : "Gréčtina",
DlgDocCharSetJP : "Japončina",
DlgDocCharSetKR : "Korejčina",
DlgDocCharSetTR : "Turečtina",
DlgDocCharSetUN : "Unicode (UTF-8)",
DlgDocCharSetWE : "Západná európa",
DlgDocCharSetOther : "Iná kódová stránka",
DlgDocDocType : "Typ záhlavia dokumentu",
DlgDocDocTypeOther : "Iný typ záhlavia dokumentu",
DlgDocIncXHTML : "Obsahuje deklarácie XHTML",
DlgDocBgColor : "Farba pozadia",
DlgDocBgImage : "URL adresa obrázku na pozadí",
DlgDocBgNoScroll : "Fixné pozadie",
DlgDocCText : "Text",
DlgDocCLink : "Odkaz",
DlgDocCVisited : "Navštívený odkaz",
DlgDocCActive : "Aktívny odkaz",
DlgDocMargins : "Okraje stránky",
DlgDocMaTop : "Horný",
DlgDocMaLeft : "Ľavý",
DlgDocMaRight : "Pravý",
DlgDocMaBottom : "Dolný",
DlgDocMeIndex : "Kľúčové slová pre indexovanie (oddelené čiarkou)",
DlgDocMeDescr : "Popis stránky",
DlgDocMeAuthor : "Autor",
DlgDocMeCopy : "Autorské práva",
DlgDocPreview : "Náhľad",
// Templates Dialog
Templates : "Šablóny",
DlgTemplatesTitle : "Šablóny obsahu",
DlgTemplatesSelMsg : "Prosím vyberte šablóny na otvorenie v editore<br>(súšasný obsah bude stratený):",
DlgTemplatesLoading : "Nahrávam zoznam šablón. Čakajte prosím...",
DlgTemplatesNoTpl : "(žiadne šablóny nenájdené)",
DlgTemplatesReplace : "Nahradiť aktuálny obsah",
// About Dialog
DlgAboutAboutTab : "O aplikáci",
DlgAboutBrowserInfoTab : "Informácie o prehliadači",
DlgAboutLicenseTab : "Licencia",
DlgAboutVersion : "verzia",
DlgAboutInfo : "Viac informácií získate na"
}; | DlgLnkTypeURL : "URL",
DlgLnkTypeAnchor : "Kotva v tejto stránke",
DlgLnkTypeEMail : "E-Mail",
DlgLnkProto : "Protokol",
DlgLnkProtoOther : "<iný>",
| random_line_split |
monitor_correction_test.py | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: monitor_correction_test
:platform: Unix
:synopsis: tests the monitor correction
.. moduleauthor:: Aaron Parsons <[email protected]>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class MonitorCorrectionTest(unittest.TestCase):
def | (self):
data_file = tu.get_test_data_path('mm.nxs')
process_file = tu.get_test_process_path('monitor_correction_test.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file))
if __name__ == "__main__":
unittest.main()
| test_monitor_correction | identifier_name |
monitor_correction_test.py | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: monitor_correction_test
:platform: Unix
:synopsis: tests the monitor correction
.. moduleauthor:: Aaron Parsons <[email protected]>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class MonitorCorrectionTest(unittest.TestCase):
|
if __name__ == "__main__":
unittest.main()
| def test_monitor_correction(self):
data_file = tu.get_test_data_path('mm.nxs')
process_file = tu.get_test_process_path('monitor_correction_test.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file)) | identifier_body |
monitor_correction_test.py | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: monitor_correction_test
:platform: Unix
:synopsis: tests the monitor correction
.. moduleauthor:: Aaron Parsons <[email protected]>
"""
import unittest
from savu.test import test_utils as tu | from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class MonitorCorrectionTest(unittest.TestCase):
def test_monitor_correction(self):
data_file = tu.get_test_data_path('mm.nxs')
process_file = tu.get_test_process_path('monitor_correction_test.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file))
if __name__ == "__main__":
unittest.main() | random_line_split |
|
monitor_correction_test.py | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: monitor_correction_test
:platform: Unix
:synopsis: tests the monitor correction
.. moduleauthor:: Aaron Parsons <[email protected]>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class MonitorCorrectionTest(unittest.TestCase):
def test_monitor_correction(self):
data_file = tu.get_test_data_path('mm.nxs')
process_file = tu.get_test_process_path('monitor_correction_test.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file))
if __name__ == "__main__":
| unittest.main() | conditional_block |
|
__init__.py | # -*- coding: utf-8 -*-
import re
import datetime
import logging
from urlparse import parse_qsl
from mamchecker.model import depth_1st, problemCtxObjs, keysOmit, table_entry, ctxkey
from mamchecker.hlp import datefmt, last
from mamchecker.util import PageBase |
def prepare(
qs # url query_string (after ?)
, skey # start key, filter is filled up with it.
# student key normally, but can be other, e.g. school, too.
# if a parent belongs to user then all children can be queried
, userkey
):
'''prepares the perameters for depth_1st
>>> #see depth_1st
>>> skey = ctxkey(['Sc1', 'Pe1', 'Te1','Cl1','St1'])
>>> #qs= "Sc0&*&*&*&*&*"
>>> qs= "q~r.be"
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', [('query_string', '=', 'r.be')]]
>>> qs= ' '
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', []]
>>> qs= "1DK&*&d>3"
>>> p = prepare(qs,skey,None)[0]
'''
@last
def filters(x):
'''convert to GAE filters from
lst is ["<field><operator><value>",...]
~ -> =
q = query_string
age fields: H = hours, S = seconds, M = minutes, d = days
'''
AGES = {'d': 'days', 'H': 'hours', 'M': 'minutes', 'S': 'seconds'}
ABBR = {'q': 'query_string'}
filters = []
if not isinstance(x, str):
return
for le in x.split(','):
#le = next(iter(x.split(',')))
le = le.replace('~', '=')
match = re.match(r'(\w+)([=!<>]+)([\w\d\.]+)', le)
if match:
grps = match.groups()
name, op, value = grps
if name in ABBR:
name = ABBR[name]
age = None
# le='d<~3'
if name in AGES:
age = AGES[name]
if name in AGES.values():
age = name
if age:
value = datetime.datetime.now(
) - datetime.timedelta(**{age: int(value)})
name = 'answered'
filters.append((name, op, value))
return filters
#qs = ''
O = problemCtxObjs
# q=query, qq=*->[], qqf=filter->gae filter (name,op,value)
q = filter(None, [k.strip() for k, v in parse_qsl(qs, True)])
qq = [[] if x == '*' else x for x in q]
qqf = [filters() if filters(x) else x for x in qq]
# fill up to len(O)
delta = len(O) - len(qqf)
if delta > 0:
ext = [str(v) for k, v in skey.pairs()]
extpart = min(len(ext), delta)
rest = delta - extpart
qqf = ext[:extpart] + [[]] * rest + qqf
keys = keysOmit(qqf)
obj = keys and keys[-1].get() # parent to start from
if obj and obj.userkey == userkey:
return qqf, keys, O, True
else:
return qqf, [], O, False, userkey
class Page(PageBase):
def __init__(self, _request):
super(self.__class__, self).__init__(_request)
self.table = lambda: depth_1st(
*
prepare(
self.request.query_string,
self.request.student.key,
self.user and self.user.key))
self.params = {
'table': self.table,
'table_entry': table_entry}
def post_response(self):
for urlsafe in self.request.get_all('deletee'):
k = ndb.Key(urlsafe=urlsafe)
k.delete()
return self.get_response() | from google.appengine.ext import ndb
| random_line_split |
__init__.py | # -*- coding: utf-8 -*-
import re
import datetime
import logging
from urlparse import parse_qsl
from mamchecker.model import depth_1st, problemCtxObjs, keysOmit, table_entry, ctxkey
from mamchecker.hlp import datefmt, last
from mamchecker.util import PageBase
from google.appengine.ext import ndb
def prepare(
qs # url query_string (after ?)
, skey # start key, filter is filled up with it.
# student key normally, but can be other, e.g. school, too.
# if a parent belongs to user then all children can be queried
, userkey
):
'''prepares the perameters for depth_1st
>>> #see depth_1st
>>> skey = ctxkey(['Sc1', 'Pe1', 'Te1','Cl1','St1'])
>>> #qs= "Sc0&*&*&*&*&*"
>>> qs= "q~r.be"
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', [('query_string', '=', 'r.be')]]
>>> qs= ' '
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', []]
>>> qs= "1DK&*&d>3"
>>> p = prepare(qs,skey,None)[0]
'''
@last
def | (x):
'''convert to GAE filters from
lst is ["<field><operator><value>",...]
~ -> =
q = query_string
age fields: H = hours, S = seconds, M = minutes, d = days
'''
AGES = {'d': 'days', 'H': 'hours', 'M': 'minutes', 'S': 'seconds'}
ABBR = {'q': 'query_string'}
filters = []
if not isinstance(x, str):
return
for le in x.split(','):
#le = next(iter(x.split(',')))
le = le.replace('~', '=')
match = re.match(r'(\w+)([=!<>]+)([\w\d\.]+)', le)
if match:
grps = match.groups()
name, op, value = grps
if name in ABBR:
name = ABBR[name]
age = None
# le='d<~3'
if name in AGES:
age = AGES[name]
if name in AGES.values():
age = name
if age:
value = datetime.datetime.now(
) - datetime.timedelta(**{age: int(value)})
name = 'answered'
filters.append((name, op, value))
return filters
#qs = ''
O = problemCtxObjs
# q=query, qq=*->[], qqf=filter->gae filter (name,op,value)
q = filter(None, [k.strip() for k, v in parse_qsl(qs, True)])
qq = [[] if x == '*' else x for x in q]
qqf = [filters() if filters(x) else x for x in qq]
# fill up to len(O)
delta = len(O) - len(qqf)
if delta > 0:
ext = [str(v) for k, v in skey.pairs()]
extpart = min(len(ext), delta)
rest = delta - extpart
qqf = ext[:extpart] + [[]] * rest + qqf
keys = keysOmit(qqf)
obj = keys and keys[-1].get() # parent to start from
if obj and obj.userkey == userkey:
return qqf, keys, O, True
else:
return qqf, [], O, False, userkey
class Page(PageBase):
def __init__(self, _request):
super(self.__class__, self).__init__(_request)
self.table = lambda: depth_1st(
*
prepare(
self.request.query_string,
self.request.student.key,
self.user and self.user.key))
self.params = {
'table': self.table,
'table_entry': table_entry}
def post_response(self):
for urlsafe in self.request.get_all('deletee'):
k = ndb.Key(urlsafe=urlsafe)
k.delete()
return self.get_response()
| filters | identifier_name |
__init__.py | # -*- coding: utf-8 -*-
import re
import datetime
import logging
from urlparse import parse_qsl
from mamchecker.model import depth_1st, problemCtxObjs, keysOmit, table_entry, ctxkey
from mamchecker.hlp import datefmt, last
from mamchecker.util import PageBase
from google.appengine.ext import ndb
def prepare(
qs # url query_string (after ?)
, skey # start key, filter is filled up with it.
# student key normally, but can be other, e.g. school, too.
# if a parent belongs to user then all children can be queried
, userkey
):
'''prepares the perameters for depth_1st
>>> #see depth_1st
>>> skey = ctxkey(['Sc1', 'Pe1', 'Te1','Cl1','St1'])
>>> #qs= "Sc0&*&*&*&*&*"
>>> qs= "q~r.be"
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', [('query_string', '=', 'r.be')]]
>>> qs= ' '
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', []]
>>> qs= "1DK&*&d>3"
>>> p = prepare(qs,skey,None)[0]
'''
@last
def filters(x):
'''convert to GAE filters from
lst is ["<field><operator><value>",...]
~ -> =
q = query_string
age fields: H = hours, S = seconds, M = minutes, d = days
'''
AGES = {'d': 'days', 'H': 'hours', 'M': 'minutes', 'S': 'seconds'}
ABBR = {'q': 'query_string'}
filters = []
if not isinstance(x, str):
return
for le in x.split(','):
#le = next(iter(x.split(',')))
le = le.replace('~', '=')
match = re.match(r'(\w+)([=!<>]+)([\w\d\.]+)', le)
if match:
grps = match.groups()
name, op, value = grps
if name in ABBR:
name = ABBR[name]
age = None
# le='d<~3'
if name in AGES:
age = AGES[name]
if name in AGES.values():
age = name
if age:
value = datetime.datetime.now(
) - datetime.timedelta(**{age: int(value)})
name = 'answered'
filters.append((name, op, value))
return filters
#qs = ''
O = problemCtxObjs
# q=query, qq=*->[], qqf=filter->gae filter (name,op,value)
q = filter(None, [k.strip() for k, v in parse_qsl(qs, True)])
qq = [[] if x == '*' else x for x in q]
qqf = [filters() if filters(x) else x for x in qq]
# fill up to len(O)
delta = len(O) - len(qqf)
if delta > 0:
ext = [str(v) for k, v in skey.pairs()]
extpart = min(len(ext), delta)
rest = delta - extpart
qqf = ext[:extpart] + [[]] * rest + qqf
keys = keysOmit(qqf)
obj = keys and keys[-1].get() # parent to start from
if obj and obj.userkey == userkey:
|
else:
return qqf, [], O, False, userkey
class Page(PageBase):
def __init__(self, _request):
super(self.__class__, self).__init__(_request)
self.table = lambda: depth_1st(
*
prepare(
self.request.query_string,
self.request.student.key,
self.user and self.user.key))
self.params = {
'table': self.table,
'table_entry': table_entry}
def post_response(self):
for urlsafe in self.request.get_all('deletee'):
k = ndb.Key(urlsafe=urlsafe)
k.delete()
return self.get_response()
| return qqf, keys, O, True | conditional_block |
__init__.py | # -*- coding: utf-8 -*-
import re
import datetime
import logging
from urlparse import parse_qsl
from mamchecker.model import depth_1st, problemCtxObjs, keysOmit, table_entry, ctxkey
from mamchecker.hlp import datefmt, last
from mamchecker.util import PageBase
from google.appengine.ext import ndb
def prepare(
qs # url query_string (after ?)
, skey # start key, filter is filled up with it.
# student key normally, but can be other, e.g. school, too.
# if a parent belongs to user then all children can be queried
, userkey
):
'''prepares the perameters for depth_1st
>>> #see depth_1st
>>> skey = ctxkey(['Sc1', 'Pe1', 'Te1','Cl1','St1'])
>>> #qs= "Sc0&*&*&*&*&*"
>>> qs= "q~r.be"
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', [('query_string', '=', 'r.be')]]
>>> qs= ' '
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', []]
>>> qs= "1DK&*&d>3"
>>> p = prepare(qs,skey,None)[0]
'''
@last
def filters(x):
|
#qs = ''
O = problemCtxObjs
# q=query, qq=*->[], qqf=filter->gae filter (name,op,value)
q = filter(None, [k.strip() for k, v in parse_qsl(qs, True)])
qq = [[] if x == '*' else x for x in q]
qqf = [filters() if filters(x) else x for x in qq]
# fill up to len(O)
delta = len(O) - len(qqf)
if delta > 0:
ext = [str(v) for k, v in skey.pairs()]
extpart = min(len(ext), delta)
rest = delta - extpart
qqf = ext[:extpart] + [[]] * rest + qqf
keys = keysOmit(qqf)
obj = keys and keys[-1].get() # parent to start from
if obj and obj.userkey == userkey:
return qqf, keys, O, True
else:
return qqf, [], O, False, userkey
class Page(PageBase):
def __init__(self, _request):
super(self.__class__, self).__init__(_request)
self.table = lambda: depth_1st(
*
prepare(
self.request.query_string,
self.request.student.key,
self.user and self.user.key))
self.params = {
'table': self.table,
'table_entry': table_entry}
def post_response(self):
for urlsafe in self.request.get_all('deletee'):
k = ndb.Key(urlsafe=urlsafe)
k.delete()
return self.get_response()
| '''convert to GAE filters from
lst is ["<field><operator><value>",...]
~ -> =
q = query_string
age fields: H = hours, S = seconds, M = minutes, d = days
'''
AGES = {'d': 'days', 'H': 'hours', 'M': 'minutes', 'S': 'seconds'}
ABBR = {'q': 'query_string'}
filters = []
if not isinstance(x, str):
return
for le in x.split(','):
#le = next(iter(x.split(',')))
le = le.replace('~', '=')
match = re.match(r'(\w+)([=!<>]+)([\w\d\.]+)', le)
if match:
grps = match.groups()
name, op, value = grps
if name in ABBR:
name = ABBR[name]
age = None
# le='d<~3'
if name in AGES:
age = AGES[name]
if name in AGES.values():
age = name
if age:
value = datetime.datetime.now(
) - datetime.timedelta(**{age: int(value)})
name = 'answered'
filters.append((name, op, value))
return filters | identifier_body |
smooth-scroll.js | /*!
* smooth-scroll v9.4.1: Animate scrolling to anchor links
* (c) 2016 Chris Ferdinandi
* MIT License
* http://github.com/cferdinandi/smooth-scroll
*/
(function (root, factory) {
if ( typeof define === 'function' && define.amd ) {
define([], factory(root));
} else if ( typeof exports === 'object' ) {
module.exports = factory(root);
} else {
root.smoothScroll = factory(root);
}
})(typeof global !== 'undefined' ? global : this.window || this.global, function (root) {
'use strict';
//
// Variables
//
var smoothScroll = {}; // Object for public APIs
var supports = 'querySelector' in document && 'addEventListener' in root; // Feature test
var settings, eventTimeout, fixedHeader, headerHeight, animationInterval;
// Default settings
var defaults = {
selector: '[data-scroll]',
selectorHeader: '[data-scroll-header]',
speed: 500,
easing: 'easeInOutCubic',
offset: 0,
updateURL: true,
callback: function () {}
};
//
// Methods
//
/**
* Merge two or more objects. Returns a new object.
* @private
* @param {Boolean} deep If true, do a deep (or recursive) merge [optional]
* @param {Object} objects The objects to merge together
* @returns {Object} Merged values of defaults and options
*/
var extend = function () {
// Variables
var extended = {};
var deep = false;
var i = 0;
var length = arguments.length;
// Check if a deep merge
if ( Object.prototype.toString.call( arguments[0] ) === '[object Boolean]' ) {
deep = arguments[0];
i++;
}
// Merge the object into the extended object
var merge = function (obj) {
for ( var prop in obj ) {
if ( Object.prototype.hasOwnProperty.call( obj, prop ) ) {
// If deep merge and property is an object, merge properties
if ( deep && Object.prototype.toString.call(obj[prop]) === '[object Object]' ) {
extended[prop] = extend( true, extended[prop], obj[prop] );
} else {
extended[prop] = obj[prop];
}
}
}
};
// Loop through each object and conduct a merge
for ( ; i < length; i++ ) {
var obj = arguments[i];
merge(obj);
}
return extended;
};
/**
* Get the height of an element.
* @private
* @param {Node} elem The element to get the height of
* @return {Number} The element's height in pixels
*/
var getHeight = function ( elem ) {
return Math.max( elem.scrollHeight, elem.offsetHeight, elem.clientHeight );
};
/**
* Get the closest matching element up the DOM tree.
* @private
* @param {Element} elem Starting element
* @param {String} selector Selector to match against (class, ID, data attribute, or tag)
* @return {Boolean|Element} Returns null if not match found
*/
var getClosest = function ( elem, selector ) {
// Variables
var firstChar = selector.charAt(0);
var supports = 'classList' in document.documentElement;
var attribute, value;
// If selector is a data attribute, split attribute from value
if ( firstChar === '[' ) {
selector = selector.substr(1, selector.length - 2);
attribute = selector.split( '=' );
if ( attribute.length > 1 ) {
value = true;
attribute[1] = attribute[1].replace( /"/g, '' ).replace( /'/g, '' );
}
}
// Get closest match
for ( ; elem && elem !== document && elem.nodeType === 1; elem = elem.parentNode ) {
// If selector is a class
if ( firstChar === '.' ) {
if ( supports ) {
if ( elem.classList.contains( selector.substr(1) ) ) {
return elem;
}
} else {
if ( new RegExp('(^|\\s)' + selector.substr(1) + '(\\s|$)').test( elem.className ) ) {
return elem;
}
}
}
// If selector is an ID
if ( firstChar === '#' ) {
if ( elem.id === selector.substr(1) ) {
return elem;
}
}
// If selector is a data attribute
if ( firstChar === '[' ) {
if ( elem.hasAttribute( attribute[0] ) ) {
if ( value ) {
if ( elem.getAttribute( attribute[0] ) === attribute[1] ) {
return elem;
}
} else {
return elem;
}
}
}
// If selector is a tag
if ( elem.tagName.toLowerCase() === selector ) {
return elem;
}
}
return null;
};
/**
* Escape special characters for use with querySelector
* @public
* @param {String} id The anchor ID to escape
* @author Mathias Bynens
* @link https://github.com/mathiasbynens/CSS.escape
*/
smoothScroll.escapeCharacters = function ( id ) {
// Remove leading hash
if ( id.charAt(0) === '#' ) {
id = id.substr(1);
}
var string = String(id);
var length = string.length;
var index = -1;
var codeUnit;
var result = '';
var firstCodeUnit = string.charCodeAt(0);
while (++index < length) {
codeUnit = string.charCodeAt(index);
// Note: there’s no need to special-case astral symbols, surrogate
// pairs, or lone surrogates.
// If the character is NULL (U+0000), then throw an
// `InvalidCharacterError` exception and terminate these steps.
if (codeUnit === 0x0000) {
throw new InvalidCharacterError(
'Invalid character: the input contains U+0000.'
);
}
if (
// If the character is in the range [\1-\1F] (U+0001 to U+001F) or is
// U+007F, […]
(codeUnit >= 0x0001 && codeUnit <= 0x001F) || codeUnit == 0x007F ||
// If the character is the first character and is in the range [0-9]
// (U+0030 to U+0039), […]
(index === 0 && codeUnit >= 0x0030 && codeUnit <= 0x0039) ||
// If the character is the second character and is in the range [0-9]
// (U+0030 to U+0039) and the first character is a `-` (U+002D), […]
(
index === 1 &&
codeUnit >= 0x0030 && codeUnit <= 0x0039 &&
firstCodeUnit === 0x002D
)
) {
// http://dev.w3.org/csswg/cssom/#escape-a-character-as-code-point
result += '\\' + codeUnit.toString(16) + ' ';
continue;
}
// If the character is not handled by one of the above rules and is
// greater than or equal to U+0080, is `-` (U+002D) or `_` (U+005F), or
// is in one of the ranges [0-9] (U+0030 to U+0039), [A-Z] (U+0041 to
// U+005A), or [a-z] (U+0061 to U+007A), […]
if (
codeUnit >= 0x0080 ||
codeUnit === 0x002D ||
codeUnit === 0x005F ||
codeUnit >= 0x0030 && codeUnit <= 0x0039 ||
codeUnit >= 0x0041 && codeUnit <= 0x005A ||
codeUnit >= 0x0061 && codeUnit <= 0x007A
) {
// the character itself
result += string.charAt(index);
continue;
}
// Otherwise, the escaped character.
// http://dev.w3.org/csswg/cssom/#escape-a-character
result += '\\' + string.charAt(index);
}
return '#' + result;
};
/**
* Calculate the easing pattern
* @private
* @link https://gist.github.com/gre/1650294
* @param {String} type Easing pattern
* @param {Number} time Time animation should take to complete
* @returns {Number}
*/
var easingPattern = function ( type, time ) {
var pattern;
if ( type === 'easeInQuad' ) pattern = time * time; // accelerating from zero velocity
if ( type === 'easeOutQuad' ) pattern = time * (2 - time); // decelerating to zero velocity
if ( type === 'easeInOutQuad' ) pattern = time < 0.5 ? 2 * time * time : -1 + (4 - 2 * time) * time; // acceleration until halfway, then deceleration
if ( type === 'easeInCubic' ) pattern = time * time * time; // accelerating from zero velocity
if ( type === 'easeOutCubic' ) pattern = (--time) * time * time + 1; // decelerating to zero velocity
if ( type === 'easeInOutCubic' ) pattern = time < 0.5 ? 4 * time * time * time : (time - 1) * (2 * time - 2) * (2 * time - 2) + 1; // acceleration until halfway, then deceleration
if ( type === 'easeInQuart' ) pattern = time * time * time * time; // accelerating from zero velocity
if ( type === 'easeOutQuart' ) pattern = 1 - (--time) * time * time * time; // decelerating to zero velocity
if ( type === 'easeInOutQuart' ) pattern = time < 0.5 ? 8 * time * time * time * time : 1 - 8 * (--time) * time * time * time; // acceleration until halfway, then deceleration
if ( type === 'easeInQuint' ) pattern = time * time * time * time * time; // accelerating from zero velocity
if ( type === 'easeOutQuint' ) pattern = 1 + (--time) * time * time * time * time; // decelerating to zero velocity
if ( type === 'easeInOutQuint' ) pattern = time < 0.5 ? 16 * time * time * time * time * time : 1 + 16 * (--time) * time * time * time * time; // acceleration until halfway, then deceleration
return pattern || time; // no easing, no acceleration
};
/**
* Calculate how far to scroll
* @private
* @param {Element} anchor The anchor element to scroll to
* @param {Number} headerHeight Height of a fixed header, if any
* @param {Number} offset Number of pixels by which to offset scroll
* @returns {Number}
*/
var getEndLocation = function ( anchor, headerHeight, offset ) {
var location = 0;
if (anchor.offsetParent) {
do {
| n = Math.max(location - headerHeight - offset, 0);
return Math.min(location, getDocumentHeight() - getViewportHeight());
};
/**
* Determine the viewport's height
* @private
* @returns {Number}
*/
var getViewportHeight = function() {
return Math.max(document.documentElement.clientHeight, window.innerHeight || 0);
};
/**
* Determine the document's height
* @private
* @returns {Number}
*/
var getDocumentHeight = function () {
return Math.max(
root.document.body.scrollHeight, root.document.documentElement.scrollHeight,
root.document.body.offsetHeight, root.document.documentElement.offsetHeight,
root.document.body.clientHeight, root.document.documentElement.clientHeight
);
};
/**
* Convert data-options attribute into an object of key/value pairs
* @private
* @param {String} options Link-specific options as a data attribute string
* @returns {Object}
*/
var getDataOptions = function ( options ) {
return !options || !(typeof JSON === 'object' && typeof JSON.parse === 'function') ? {} : JSON.parse( options );
};
/**
* Update the URL
* @private
* @param {Element} anchor The element to scroll to
* @param {Boolean} url Whether or not to update the URL history
*/
var updateUrl = function ( anchor, url ) {
if ( root.history.pushState && (url || url === 'true') && root.location.protocol !== 'file:' ) {
root.history.pushState( null, null, [root.location.protocol, '//', root.location.host, root.location.pathname, root.location.search, anchor].join('') );
}
};
var getHeaderHeight = function ( header ) {
return header === null ? 0 : ( getHeight( header ) + header.offsetTop );
};
/**
* Start/stop the scrolling animation
* @public
* @param {Element} anchor The element to scroll to
* @param {Element} toggle The element that toggled the scroll event
* @param {Object} options
*/
smoothScroll.animateScroll = function ( anchor, toggle, options ) {
// Options and overrides
var overrides = getDataOptions( toggle ? toggle.getAttribute('data-options') : null );
var animateSettings = extend( settings || defaults, options || {}, overrides ); // Merge user options with defaults
// Selectors and variables
var isNum = Object.prototype.toString.call( anchor ) === '[object Number]' ? true : false;
var hash = smoothScroll.escapeCharacters( anchor );
var anchorElem = isNum ? null : ( hash === '#' ? root.document.documentElement : root.document.querySelector( hash ) );
if ( !isNum && !anchorElem ) return;
var startLocation = root.pageYOffset; // Current location on the page
if ( !fixedHeader ) { fixedHeader = root.document.querySelector( animateSettings.selectorHeader ); } // Get the fixed header if not already set
if ( !headerHeight ) { headerHeight = getHeaderHeight( fixedHeader ); } // Get the height of a fixed header if one exists and not already set
var endLocation = isNum ? anchor : getEndLocation( anchorElem, headerHeight, parseInt(animateSettings.offset, 10) ); // Location to scroll to
var distance = endLocation - startLocation; // distance to travel
var documentHeight = getDocumentHeight();
var timeLapsed = 0;
var percentage, position;
// Update URL
if ( !isNum ) {
updateUrl( anchor, animateSettings.updateURL );
}
/**
* Stop the scroll animation when it reaches its target (or the bottom/top of page)
* @private
* @param {Number} position Current position on the page
* @param {Number} endLocation Scroll to location
* @param {Number} animationInterval How much to scroll on this loop
*/
var stopAnimateScroll = function ( position, endLocation, animationInterval ) {
var currentLocation = root.pageYOffset;
if ( position == endLocation || currentLocation == endLocation || ( (root.innerHeight + currentLocation) >=
documentHeight ) ) {
clearInterval(animationInterval);
// If scroll target is an anchor, bring it into focus
if ( !isNum ) {
anchorElem.focus();
if ( document.activeElement.id !== anchorElem.id ) {
anchorElem.setAttribute( 'tabindex', '-1' );
anchorElem.focus();
anchorElem.style.outline = 'none';
}
}
animateSettings.callback( anchor, toggle ); // Run callbacks after animation complete
}
};
/**
* Loop scrolling animation
* @private
*/
var loopAnimateScroll = function () {
timeLapsed += 16;
percentage = ( timeLapsed / parseInt(animateSettings.speed, 10) );
percentage = ( percentage > 1 ) ? 1 : percentage;
position = startLocation + ( distance * easingPattern(animateSettings.easing, percentage) );
root.scrollTo( 0, Math.floor(position) );
stopAnimateScroll(position, endLocation, animationInterval);
};
/**
* Set interval timer
* @private
*/
var startAnimateScroll = function () {
clearInterval(animationInterval);
animationInterval = setInterval(loopAnimateScroll, 16);
};
/**
* Reset position to fix weird iOS bug
* @link https://github.com/cferdinandi/smooth-scroll/issues/45
*/
if ( root.pageYOffset === 0 ) {
root.scrollTo( 0, 0 );
}
// Start scrolling animation
startAnimateScroll();
};
/**
* If smooth scroll element clicked, animate scroll
* @private
*/
var eventHandler = function (event) {
// Don't run if right-click or command/control + click
if ( event.button !== 0 || event.metaKey || event.ctrlKey ) return;
// If a smooth scroll link, animate it
var toggle = getClosest( event.target, settings.selector );
if ( toggle && toggle.tagName.toLowerCase() === 'a' ) {
// Check that link is an anchor and points to current page
if ( toggle.hostname !== root.location.hostname || toggle.pathname !== root.location.pathname || !/#/.test(toggle.href) ) return;
event.preventDefault(); // Prevent default click event
smoothScroll.animateScroll( toggle.hash, toggle, settings); // Animate scroll
}
};
/**
* On window scroll and resize, only run events at a rate of 15fps for better performance
* @private
* @param {Function} eventTimeout Timeout function
* @param {Object} settings
*/
var eventThrottler = function (event) {
if ( !eventTimeout ) {
eventTimeout = setTimeout(function() {
eventTimeout = null; // Reset timeout
headerHeight = getHeaderHeight( fixedHeader ); // Get the height of a fixed header if one exists
}, 66);
}
};
/**
* Destroy the current initialization.
* @public
*/
smoothScroll.destroy = function () {
// If plugin isn't already initialized, stop
if ( !settings ) return;
// Remove event listeners
root.document.removeEventListener( 'click', eventHandler, false );
root.removeEventListener( 'resize', eventThrottler, false );
// Reset varaibles
settings = null;
eventTimeout = null;
fixedHeader = null;
headerHeight = null;
animationInterval = null;
};
/**
* Initialize Smooth Scroll
* @public
* @param {Object} options User settings
*/
smoothScroll.init = function ( options ) {
// feature test
if ( !supports ) return;
// Destroy any existing initializations
smoothScroll.destroy();
// Selectors and variables
settings = extend( defaults, options || {} ); // Merge user options with defaults
fixedHeader = root.document.querySelector( settings.selectorHeader ); // Get the fixed header
headerHeight = getHeaderHeight( fixedHeader );
// When a toggle is clicked, run the click handler
root.document.addEventListener('click', eventHandler, false );
if ( fixedHeader ) { root.addEventListener( 'resize', eventThrottler, false ); }
};
//
// Public APIs
//
return smoothScroll;
}); | location += anchor.offsetTop;
anchor = anchor.offsetParent;
} while (anchor);
}
locatio | conditional_block |
smooth-scroll.js | /*!
* smooth-scroll v9.4.1: Animate scrolling to anchor links
* (c) 2016 Chris Ferdinandi
* MIT License
* http://github.com/cferdinandi/smooth-scroll
*/
(function (root, factory) {
if ( typeof define === 'function' && define.amd ) {
define([], factory(root));
} else if ( typeof exports === 'object' ) {
module.exports = factory(root);
} else {
root.smoothScroll = factory(root);
}
})(typeof global !== 'undefined' ? global : this.window || this.global, function (root) {
'use strict';
//
// Variables
//
var smoothScroll = {}; // Object for public APIs
var supports = 'querySelector' in document && 'addEventListener' in root; // Feature test
var settings, eventTimeout, fixedHeader, headerHeight, animationInterval;
// Default settings
var defaults = {
selector: '[data-scroll]',
selectorHeader: '[data-scroll-header]',
speed: 500,
easing: 'easeInOutCubic',
offset: 0,
updateURL: true,
callback: function () {}
};
//
// Methods
//
/**
* Merge two or more objects. Returns a new object.
* @private
* @param {Boolean} deep If true, do a deep (or recursive) merge [optional]
* @param {Object} objects The objects to merge together
* @returns {Object} Merged values of defaults and options
*/
var extend = function () {
// Variables
var extended = {};
var deep = false;
var i = 0;
var length = arguments.length;
// Check if a deep merge
if ( Object.prototype.toString.call( arguments[0] ) === '[object Boolean]' ) {
deep = arguments[0];
i++;
}
// Merge the object into the extended object
var merge = function (obj) {
for ( var prop in obj ) {
if ( Object.prototype.hasOwnProperty.call( obj, prop ) ) {
// If deep merge and property is an object, merge properties
if ( deep && Object.prototype.toString.call(obj[prop]) === '[object Object]' ) {
extended[prop] = extend( true, extended[prop], obj[prop] );
} else {
extended[prop] = obj[prop];
}
}
}
};
// Loop through each object and conduct a merge
for ( ; i < length; i++ ) {
var obj = arguments[i];
merge(obj);
}
return extended;
};
/**
* Get the height of an element.
* @private
* @param {Node} elem The element to get the height of
* @return {Number} The element's height in pixels
*/
var getHeight = function ( elem ) {
return Math.max( elem.scrollHeight, elem.offsetHeight, elem.clientHeight );
};
/**
* Get the closest matching element up the DOM tree.
* @private
* @param {Element} elem Starting element
* @param {String} selector Selector to match against (class, ID, data attribute, or tag)
* @return {Boolean|Element} Returns null if not match found
*/
var getClosest = function ( elem, selector ) {
// Variables
var firstChar = selector.charAt(0);
var supports = 'classList' in document.documentElement;
var attribute, value;
// If selector is a data attribute, split attribute from value
if ( firstChar === '[' ) {
selector = selector.substr(1, selector.length - 2);
attribute = selector.split( '=' );
if ( attribute.length > 1 ) {
value = true;
attribute[1] = attribute[1].replace( /"/g, '' ).replace( /'/g, '' );
}
}
// Get closest match
for ( ; elem && elem !== document && elem.nodeType === 1; elem = elem.parentNode ) {
// If selector is a class
if ( firstChar === '.' ) {
if ( supports ) {
if ( elem.classList.contains( selector.substr(1) ) ) {
return elem;
}
} else {
if ( new RegExp('(^|\\s)' + selector.substr(1) + '(\\s|$)').test( elem.className ) ) {
return elem;
}
}
}
// If selector is an ID
if ( firstChar === '#' ) {
if ( elem.id === selector.substr(1) ) {
return elem;
}
}
// If selector is a data attribute
if ( firstChar === '[' ) {
if ( elem.hasAttribute( attribute[0] ) ) {
if ( value ) {
if ( elem.getAttribute( attribute[0] ) === attribute[1] ) {
return elem;
}
} else {
return elem;
}
}
}
// If selector is a tag
if ( elem.tagName.toLowerCase() === selector ) {
return elem;
} |
};
/**
* Escape special characters for use with querySelector
* @public
* @param {String} id The anchor ID to escape
* @author Mathias Bynens
* @link https://github.com/mathiasbynens/CSS.escape
*/
smoothScroll.escapeCharacters = function ( id ) {
// Remove leading hash
if ( id.charAt(0) === '#' ) {
id = id.substr(1);
}
var string = String(id);
var length = string.length;
var index = -1;
var codeUnit;
var result = '';
var firstCodeUnit = string.charCodeAt(0);
while (++index < length) {
codeUnit = string.charCodeAt(index);
// Note: there’s no need to special-case astral symbols, surrogate
// pairs, or lone surrogates.
// If the character is NULL (U+0000), then throw an
// `InvalidCharacterError` exception and terminate these steps.
if (codeUnit === 0x0000) {
throw new InvalidCharacterError(
'Invalid character: the input contains U+0000.'
);
}
if (
// If the character is in the range [\1-\1F] (U+0001 to U+001F) or is
// U+007F, […]
(codeUnit >= 0x0001 && codeUnit <= 0x001F) || codeUnit == 0x007F ||
// If the character is the first character and is in the range [0-9]
// (U+0030 to U+0039), […]
(index === 0 && codeUnit >= 0x0030 && codeUnit <= 0x0039) ||
// If the character is the second character and is in the range [0-9]
// (U+0030 to U+0039) and the first character is a `-` (U+002D), […]
(
index === 1 &&
codeUnit >= 0x0030 && codeUnit <= 0x0039 &&
firstCodeUnit === 0x002D
)
) {
// http://dev.w3.org/csswg/cssom/#escape-a-character-as-code-point
result += '\\' + codeUnit.toString(16) + ' ';
continue;
}
// If the character is not handled by one of the above rules and is
// greater than or equal to U+0080, is `-` (U+002D) or `_` (U+005F), or
// is in one of the ranges [0-9] (U+0030 to U+0039), [A-Z] (U+0041 to
// U+005A), or [a-z] (U+0061 to U+007A), […]
if (
codeUnit >= 0x0080 ||
codeUnit === 0x002D ||
codeUnit === 0x005F ||
codeUnit >= 0x0030 && codeUnit <= 0x0039 ||
codeUnit >= 0x0041 && codeUnit <= 0x005A ||
codeUnit >= 0x0061 && codeUnit <= 0x007A
) {
// the character itself
result += string.charAt(index);
continue;
}
// Otherwise, the escaped character.
// http://dev.w3.org/csswg/cssom/#escape-a-character
result += '\\' + string.charAt(index);
}
return '#' + result;
};
/**
* Calculate the easing pattern
* @private
* @link https://gist.github.com/gre/1650294
* @param {String} type Easing pattern
* @param {Number} time Time animation should take to complete
* @returns {Number}
*/
var easingPattern = function ( type, time ) {
var pattern;
if ( type === 'easeInQuad' ) pattern = time * time; // accelerating from zero velocity
if ( type === 'easeOutQuad' ) pattern = time * (2 - time); // decelerating to zero velocity
if ( type === 'easeInOutQuad' ) pattern = time < 0.5 ? 2 * time * time : -1 + (4 - 2 * time) * time; // acceleration until halfway, then deceleration
if ( type === 'easeInCubic' ) pattern = time * time * time; // accelerating from zero velocity
if ( type === 'easeOutCubic' ) pattern = (--time) * time * time + 1; // decelerating to zero velocity
if ( type === 'easeInOutCubic' ) pattern = time < 0.5 ? 4 * time * time * time : (time - 1) * (2 * time - 2) * (2 * time - 2) + 1; // acceleration until halfway, then deceleration
if ( type === 'easeInQuart' ) pattern = time * time * time * time; // accelerating from zero velocity
if ( type === 'easeOutQuart' ) pattern = 1 - (--time) * time * time * time; // decelerating to zero velocity
if ( type === 'easeInOutQuart' ) pattern = time < 0.5 ? 8 * time * time * time * time : 1 - 8 * (--time) * time * time * time; // acceleration until halfway, then deceleration
if ( type === 'easeInQuint' ) pattern = time * time * time * time * time; // accelerating from zero velocity
if ( type === 'easeOutQuint' ) pattern = 1 + (--time) * time * time * time * time; // decelerating to zero velocity
if ( type === 'easeInOutQuint' ) pattern = time < 0.5 ? 16 * time * time * time * time * time : 1 + 16 * (--time) * time * time * time * time; // acceleration until halfway, then deceleration
return pattern || time; // no easing, no acceleration
};
/**
* Calculate how far to scroll
* @private
* @param {Element} anchor The anchor element to scroll to
* @param {Number} headerHeight Height of a fixed header, if any
* @param {Number} offset Number of pixels by which to offset scroll
* @returns {Number}
*/
var getEndLocation = function ( anchor, headerHeight, offset ) {
var location = 0;
if (anchor.offsetParent) {
do {
location += anchor.offsetTop;
anchor = anchor.offsetParent;
} while (anchor);
}
location = Math.max(location - headerHeight - offset, 0);
return Math.min(location, getDocumentHeight() - getViewportHeight());
};
/**
* Determine the viewport's height
* @private
* @returns {Number}
*/
var getViewportHeight = function() {
return Math.max(document.documentElement.clientHeight, window.innerHeight || 0);
};
/**
* Determine the document's height
* @private
* @returns {Number}
*/
var getDocumentHeight = function () {
return Math.max(
root.document.body.scrollHeight, root.document.documentElement.scrollHeight,
root.document.body.offsetHeight, root.document.documentElement.offsetHeight,
root.document.body.clientHeight, root.document.documentElement.clientHeight
);
};
/**
* Convert data-options attribute into an object of key/value pairs
* @private
* @param {String} options Link-specific options as a data attribute string
* @returns {Object}
*/
var getDataOptions = function ( options ) {
return !options || !(typeof JSON === 'object' && typeof JSON.parse === 'function') ? {} : JSON.parse( options );
};
/**
* Update the URL
* @private
* @param {Element} anchor The element to scroll to
* @param {Boolean} url Whether or not to update the URL history
*/
var updateUrl = function ( anchor, url ) {
if ( root.history.pushState && (url || url === 'true') && root.location.protocol !== 'file:' ) {
root.history.pushState( null, null, [root.location.protocol, '//', root.location.host, root.location.pathname, root.location.search, anchor].join('') );
}
};
var getHeaderHeight = function ( header ) {
return header === null ? 0 : ( getHeight( header ) + header.offsetTop );
};
/**
* Start/stop the scrolling animation
* @public
* @param {Element} anchor The element to scroll to
* @param {Element} toggle The element that toggled the scroll event
* @param {Object} options
*/
smoothScroll.animateScroll = function ( anchor, toggle, options ) {
// Options and overrides
var overrides = getDataOptions( toggle ? toggle.getAttribute('data-options') : null );
var animateSettings = extend( settings || defaults, options || {}, overrides ); // Merge user options with defaults
// Selectors and variables
var isNum = Object.prototype.toString.call( anchor ) === '[object Number]' ? true : false;
var hash = smoothScroll.escapeCharacters( anchor );
var anchorElem = isNum ? null : ( hash === '#' ? root.document.documentElement : root.document.querySelector( hash ) );
if ( !isNum && !anchorElem ) return;
var startLocation = root.pageYOffset; // Current location on the page
if ( !fixedHeader ) { fixedHeader = root.document.querySelector( animateSettings.selectorHeader ); } // Get the fixed header if not already set
if ( !headerHeight ) { headerHeight = getHeaderHeight( fixedHeader ); } // Get the height of a fixed header if one exists and not already set
var endLocation = isNum ? anchor : getEndLocation( anchorElem, headerHeight, parseInt(animateSettings.offset, 10) ); // Location to scroll to
var distance = endLocation - startLocation; // distance to travel
var documentHeight = getDocumentHeight();
var timeLapsed = 0;
var percentage, position;
// Update URL
if ( !isNum ) {
updateUrl( anchor, animateSettings.updateURL );
}
/**
* Stop the scroll animation when it reaches its target (or the bottom/top of page)
* @private
* @param {Number} position Current position on the page
* @param {Number} endLocation Scroll to location
* @param {Number} animationInterval How much to scroll on this loop
*/
var stopAnimateScroll = function ( position, endLocation, animationInterval ) {
var currentLocation = root.pageYOffset;
if ( position == endLocation || currentLocation == endLocation || ( (root.innerHeight + currentLocation) >=
documentHeight ) ) {
clearInterval(animationInterval);
// If scroll target is an anchor, bring it into focus
if ( !isNum ) {
anchorElem.focus();
if ( document.activeElement.id !== anchorElem.id ) {
anchorElem.setAttribute( 'tabindex', '-1' );
anchorElem.focus();
anchorElem.style.outline = 'none';
}
}
animateSettings.callback( anchor, toggle ); // Run callbacks after animation complete
}
};
/**
* Loop scrolling animation
* @private
*/
var loopAnimateScroll = function () {
timeLapsed += 16;
percentage = ( timeLapsed / parseInt(animateSettings.speed, 10) );
percentage = ( percentage > 1 ) ? 1 : percentage;
position = startLocation + ( distance * easingPattern(animateSettings.easing, percentage) );
root.scrollTo( 0, Math.floor(position) );
stopAnimateScroll(position, endLocation, animationInterval);
};
/**
* Set interval timer
* @private
*/
var startAnimateScroll = function () {
clearInterval(animationInterval);
animationInterval = setInterval(loopAnimateScroll, 16);
};
/**
* Reset position to fix weird iOS bug
* @link https://github.com/cferdinandi/smooth-scroll/issues/45
*/
if ( root.pageYOffset === 0 ) {
root.scrollTo( 0, 0 );
}
// Start scrolling animation
startAnimateScroll();
};
/**
* If smooth scroll element clicked, animate scroll
* @private
*/
var eventHandler = function (event) {
// Don't run if right-click or command/control + click
if ( event.button !== 0 || event.metaKey || event.ctrlKey ) return;
// If a smooth scroll link, animate it
var toggle = getClosest( event.target, settings.selector );
if ( toggle && toggle.tagName.toLowerCase() === 'a' ) {
// Check that link is an anchor and points to current page
if ( toggle.hostname !== root.location.hostname || toggle.pathname !== root.location.pathname || !/#/.test(toggle.href) ) return;
event.preventDefault(); // Prevent default click event
smoothScroll.animateScroll( toggle.hash, toggle, settings); // Animate scroll
}
};
/**
* On window scroll and resize, only run events at a rate of 15fps for better performance
* @private
* @param {Function} eventTimeout Timeout function
* @param {Object} settings
*/
var eventThrottler = function (event) {
if ( !eventTimeout ) {
eventTimeout = setTimeout(function() {
eventTimeout = null; // Reset timeout
headerHeight = getHeaderHeight( fixedHeader ); // Get the height of a fixed header if one exists
}, 66);
}
};
/**
* Destroy the current initialization.
* @public
*/
smoothScroll.destroy = function () {
// If plugin isn't already initialized, stop
if ( !settings ) return;
// Remove event listeners
root.document.removeEventListener( 'click', eventHandler, false );
root.removeEventListener( 'resize', eventThrottler, false );
// Reset varaibles
settings = null;
eventTimeout = null;
fixedHeader = null;
headerHeight = null;
animationInterval = null;
};
/**
* Initialize Smooth Scroll
* @public
* @param {Object} options User settings
*/
smoothScroll.init = function ( options ) {
// feature test
if ( !supports ) return;
// Destroy any existing initializations
smoothScroll.destroy();
// Selectors and variables
settings = extend( defaults, options || {} ); // Merge user options with defaults
fixedHeader = root.document.querySelector( settings.selectorHeader ); // Get the fixed header
headerHeight = getHeaderHeight( fixedHeader );
// When a toggle is clicked, run the click handler
root.document.addEventListener('click', eventHandler, false );
if ( fixedHeader ) { root.addEventListener( 'resize', eventThrottler, false ); }
};
//
// Public APIs
//
return smoothScroll;
}); |
}
return null; | random_line_split |
playmp3.py | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
#======================================================================
#
# playsnd.py - play sound with ctypes + mci
#
# Created by skywind on 2013/12/01
# Last change: 2014/01/26 23:40:20
#
#======================================================================
from __future__ import print_function
import sys
import time
import os
import ctypes
import threading
#----------------------------------------------------------------------
# 2/3 compatible
#----------------------------------------------------------------------
if sys.version_info[0] >= 3:
long = int
unicode = str
xrange = range
#----------------------------------------------------------------------
# WinMM - Windows player
#----------------------------------------------------------------------
class WinMM (object):
def __init__ (self, prefix = ''):
import ctypes.wintypes
self.__winmm = ctypes.windll.winmm
self.__mciSendString = self.__winmm.mciSendStringW
self.__prefix = prefix
LPCWSTR = ctypes.wintypes.LPCWSTR
UINT = ctypes.wintypes.UINT
HANDLE = ctypes.wintypes.HANDLE
DWORD = ctypes.wintypes.DWORD
self.__mciSendString.argtypes = [LPCWSTR, LPCWSTR, UINT, HANDLE]
self.__mciSendString.restype = ctypes.wintypes.DWORD
self.__mciGetErrorStringW = self.__winmm.mciGetErrorStringW
self.__mciGetErrorStringW.argtypes = [DWORD, LPCWSTR, UINT]
self.__mciGetErrorStringW.restype = ctypes.wintypes.BOOL
self.__buffer = ctypes.create_unicode_buffer(2048)
self.__alias_index = 0
self.__lock = threading.Lock()
def mciSendString (self, command, encoding = None):
|
def mciGetErrorString (self, error):
buffer = self.__buffer
with self.__lock:
hr = self.__mciGetErrorStringW(error, buffer, 2048)
if hr == 0:
hr = None
else:
hr = buffer.value
return hr
def open (self, filename, media_type = ''):
if not os.path.exists(filename):
return None
filename = os.path.abspath(filename)
with self.__lock:
name = 'media:%s%d'%(self.__prefix, self.__alias_index)
self.__alias_index += 1
if self.__alias_index > 0x7fffffff:
self.__alias_index = 0
cmd = u'open "%s" alias %s'%(filename, name)
if media_type:
cmd = u'open "%s" type %s alias %s'%(filename, media_type, name)
hr = self.mciSendString(cmd)
if isinstance(hr, str) or isinstance(hr, unicode):
return name
return None
def close (self, name):
hr = self.mciSendString(u'close %s'%name)
if isinstance(hr, unicode) or isinstance(hr, str):
return True
return False
def __get_status (self, name, what):
hr = self.mciSendString(u'status %s %s'%(name, what))
if isinstance(hr, unicode) or isinstance(hr, str):
return hr
return None
def __get_status_int (self, name, what):
hr = self.__get_status(name, what)
if hr is None:
return -1
hr = long(hr)
return (hr > 0x7fffffff) and hr or int(hr)
def __mci_no_return (self, cmd):
hr = self.mciSendString(cmd)
if isinstance(hr, unicode) or isinstance(hr, str):
return True
return False
def get_length (self, name):
return self.__get_status_int(name, 'length')
def get_position (self, name):
return self.__get_status_int(name, 'position')
def get_mode (self, name):
hr = self.__get_status(name, 'mode')
return hr
def play (self, name, start = 0, end = -1, wait = False, repeat = False):
if wait:
repeat = False
if start < 0:
start = 0
cmd = u'play %s from %d'%(name, start)
if end >= 0:
cmd += u' to %d'%end
if wait:
cmd += u' wait'
if repeat:
cmd += u' repeat'
return self.__mci_no_return(cmd)
def stop (self, name):
return self.__mci_no_return(u'stop %s'%name)
def seek (self, name, position):
if isinstance(position, str) or isinstance(position, unicode):
if position == u'end':
position = 'end'
else:
position = '0'
elif position < 0:
position = 'end'
else:
position = str(position)
return self.__mci_no_return(u'seek %s to %s'%name)
def pause (self, name):
return self.__mci_no_return(u'pause %s'%name)
def resume (self, name):
return self.__mci_no_return(u'resume %s'%name)
def get_volume (self, name):
return self.__get_status_int(name, 'volume')
def set_volume (self, name, volume):
return self.__mci_no_return(u'setaudio %s volume to %s'%(name, volume))
def is_playing (self, name):
mode = self.get_mode(name)
if mode is None:
return False
if mode != 'playing':
return False
return True
#----------------------------------------------------------------------
# main entry
#----------------------------------------------------------------------
def main (args = None):
if args is None:
args = sys.argv
args = [n for n in args]
if len(args) < 2:
print('usage: playmp3.py [mp3]')
return 0
mp3 = args[1]
if not os.path.exists(mp3):
print('not find: %s'%mp3)
return 1
def ms2time(ms):
if ms <= 0: return '00:00:000'
time_sec, ms = ms / 1000, ms % 1000
time_min, time_sec = time_sec / 60, time_sec % 60
time_hor, time_min = time_min / 60, time_min % 60
if time_hor == 0: return '%02d:%02d:%03d'%(time_min, time_sec, ms)
return '%02d:%02d:%02d:%03d'%(time_hor, time_min, time_sec, ms)
winmm = WinMM()
name = winmm.open(mp3)
if name is None:
print('can not play: %s'%mp3)
return 2
import ctypes.wintypes
user32 = ctypes.windll.user32
user32.GetAsyncKeyState.restype = ctypes.wintypes.WORD
user32.GetAsyncKeyState.argtypes = [ ctypes.c_char ]
size = winmm.get_length(name)
print('Playing "%s", press \'q\' to exit ....'%mp3)
winmm.play(name, repeat = True)
while 1:
if user32.GetAsyncKeyState(b'Q'): break
time.sleep(0.1)
pos = winmm.get_position(name)
sys.stdout.write('[%s / %s]\r'%(ms2time(pos), ms2time(size)))
sys.stdout.flush()
print('')
print('stopped')
winmm.close(name)
return 0
#----------------------------------------------------------------------
# testing case
#----------------------------------------------------------------------
if __name__ == '__main__':
def test1():
winmm = WinMM()
name = winmm.open('d:/music/sample.mp3')
print(name)
print(winmm.get_length(name))
print(winmm.get_volume(name))
print(winmm.set_volume(name, 1000))
ts = time.time()
print(winmm.play(name))
ts = time.time() - ts
print("ts", ts)
input()
print('is_playing', winmm.is_playing(name))
print('position:', winmm.get_position(name))
print('mode:', winmm.get_mode(name))
print(winmm.stop(name))
print('mode:', winmm.get_mode(name))
return 0
def test2():
main([__file__, 'd:/music/sample.mp3'])
return 0
# test2()
main()
| if encoding is None:
encoding = sys.getfilesystemencoding()
if isinstance(command, bytes):
command = command.decode(encoding)
with self.__lock:
hr = self.__mciSendString(command, self.__buffer, 2048, 0)
hr = (hr != 0) and long(hr) or self.__buffer.value
return hr | identifier_body |
playmp3.py | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
#======================================================================
#
# playsnd.py - play sound with ctypes + mci
#
# Created by skywind on 2013/12/01
# Last change: 2014/01/26 23:40:20
#
#======================================================================
from __future__ import print_function
import sys
import time
import os
import ctypes
import threading
#----------------------------------------------------------------------
# 2/3 compatible
#----------------------------------------------------------------------
if sys.version_info[0] >= 3:
long = int
unicode = str
xrange = range
| #----------------------------------------------------------------------
class WinMM (object):
def __init__ (self, prefix = ''):
import ctypes.wintypes
self.__winmm = ctypes.windll.winmm
self.__mciSendString = self.__winmm.mciSendStringW
self.__prefix = prefix
LPCWSTR = ctypes.wintypes.LPCWSTR
UINT = ctypes.wintypes.UINT
HANDLE = ctypes.wintypes.HANDLE
DWORD = ctypes.wintypes.DWORD
self.__mciSendString.argtypes = [LPCWSTR, LPCWSTR, UINT, HANDLE]
self.__mciSendString.restype = ctypes.wintypes.DWORD
self.__mciGetErrorStringW = self.__winmm.mciGetErrorStringW
self.__mciGetErrorStringW.argtypes = [DWORD, LPCWSTR, UINT]
self.__mciGetErrorStringW.restype = ctypes.wintypes.BOOL
self.__buffer = ctypes.create_unicode_buffer(2048)
self.__alias_index = 0
self.__lock = threading.Lock()
def mciSendString (self, command, encoding = None):
if encoding is None:
encoding = sys.getfilesystemencoding()
if isinstance(command, bytes):
command = command.decode(encoding)
with self.__lock:
hr = self.__mciSendString(command, self.__buffer, 2048, 0)
hr = (hr != 0) and long(hr) or self.__buffer.value
return hr
def mciGetErrorString (self, error):
buffer = self.__buffer
with self.__lock:
hr = self.__mciGetErrorStringW(error, buffer, 2048)
if hr == 0:
hr = None
else:
hr = buffer.value
return hr
def open (self, filename, media_type = ''):
if not os.path.exists(filename):
return None
filename = os.path.abspath(filename)
with self.__lock:
name = 'media:%s%d'%(self.__prefix, self.__alias_index)
self.__alias_index += 1
if self.__alias_index > 0x7fffffff:
self.__alias_index = 0
cmd = u'open "%s" alias %s'%(filename, name)
if media_type:
cmd = u'open "%s" type %s alias %s'%(filename, media_type, name)
hr = self.mciSendString(cmd)
if isinstance(hr, str) or isinstance(hr, unicode):
return name
return None
def close (self, name):
hr = self.mciSendString(u'close %s'%name)
if isinstance(hr, unicode) or isinstance(hr, str):
return True
return False
def __get_status (self, name, what):
hr = self.mciSendString(u'status %s %s'%(name, what))
if isinstance(hr, unicode) or isinstance(hr, str):
return hr
return None
def __get_status_int (self, name, what):
hr = self.__get_status(name, what)
if hr is None:
return -1
hr = long(hr)
return (hr > 0x7fffffff) and hr or int(hr)
def __mci_no_return (self, cmd):
hr = self.mciSendString(cmd)
if isinstance(hr, unicode) or isinstance(hr, str):
return True
return False
def get_length (self, name):
return self.__get_status_int(name, 'length')
def get_position (self, name):
return self.__get_status_int(name, 'position')
def get_mode (self, name):
hr = self.__get_status(name, 'mode')
return hr
def play (self, name, start = 0, end = -1, wait = False, repeat = False):
if wait:
repeat = False
if start < 0:
start = 0
cmd = u'play %s from %d'%(name, start)
if end >= 0:
cmd += u' to %d'%end
if wait:
cmd += u' wait'
if repeat:
cmd += u' repeat'
return self.__mci_no_return(cmd)
def stop (self, name):
return self.__mci_no_return(u'stop %s'%name)
def seek (self, name, position):
if isinstance(position, str) or isinstance(position, unicode):
if position == u'end':
position = 'end'
else:
position = '0'
elif position < 0:
position = 'end'
else:
position = str(position)
return self.__mci_no_return(u'seek %s to %s'%name)
def pause (self, name):
return self.__mci_no_return(u'pause %s'%name)
def resume (self, name):
return self.__mci_no_return(u'resume %s'%name)
def get_volume (self, name):
return self.__get_status_int(name, 'volume')
def set_volume (self, name, volume):
return self.__mci_no_return(u'setaudio %s volume to %s'%(name, volume))
def is_playing (self, name):
mode = self.get_mode(name)
if mode is None:
return False
if mode != 'playing':
return False
return True
#----------------------------------------------------------------------
# main entry
#----------------------------------------------------------------------
def main (args = None):
if args is None:
args = sys.argv
args = [n for n in args]
if len(args) < 2:
print('usage: playmp3.py [mp3]')
return 0
mp3 = args[1]
if not os.path.exists(mp3):
print('not find: %s'%mp3)
return 1
def ms2time(ms):
if ms <= 0: return '00:00:000'
time_sec, ms = ms / 1000, ms % 1000
time_min, time_sec = time_sec / 60, time_sec % 60
time_hor, time_min = time_min / 60, time_min % 60
if time_hor == 0: return '%02d:%02d:%03d'%(time_min, time_sec, ms)
return '%02d:%02d:%02d:%03d'%(time_hor, time_min, time_sec, ms)
winmm = WinMM()
name = winmm.open(mp3)
if name is None:
print('can not play: %s'%mp3)
return 2
import ctypes.wintypes
user32 = ctypes.windll.user32
user32.GetAsyncKeyState.restype = ctypes.wintypes.WORD
user32.GetAsyncKeyState.argtypes = [ ctypes.c_char ]
size = winmm.get_length(name)
print('Playing "%s", press \'q\' to exit ....'%mp3)
winmm.play(name, repeat = True)
while 1:
if user32.GetAsyncKeyState(b'Q'): break
time.sleep(0.1)
pos = winmm.get_position(name)
sys.stdout.write('[%s / %s]\r'%(ms2time(pos), ms2time(size)))
sys.stdout.flush()
print('')
print('stopped')
winmm.close(name)
return 0
#----------------------------------------------------------------------
# testing case
#----------------------------------------------------------------------
if __name__ == '__main__':
def test1():
winmm = WinMM()
name = winmm.open('d:/music/sample.mp3')
print(name)
print(winmm.get_length(name))
print(winmm.get_volume(name))
print(winmm.set_volume(name, 1000))
ts = time.time()
print(winmm.play(name))
ts = time.time() - ts
print("ts", ts)
input()
print('is_playing', winmm.is_playing(name))
print('position:', winmm.get_position(name))
print('mode:', winmm.get_mode(name))
print(winmm.stop(name))
print('mode:', winmm.get_mode(name))
return 0
def test2():
main([__file__, 'd:/music/sample.mp3'])
return 0
# test2()
main() | #----------------------------------------------------------------------
# WinMM - Windows player
| random_line_split |
playmp3.py | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
#======================================================================
#
# playsnd.py - play sound with ctypes + mci
#
# Created by skywind on 2013/12/01
# Last change: 2014/01/26 23:40:20
#
#======================================================================
from __future__ import print_function
import sys
import time
import os
import ctypes
import threading
#----------------------------------------------------------------------
# 2/3 compatible
#----------------------------------------------------------------------
if sys.version_info[0] >= 3:
long = int
unicode = str
xrange = range
#----------------------------------------------------------------------
# WinMM - Windows player
#----------------------------------------------------------------------
class WinMM (object):
def __init__ (self, prefix = ''):
import ctypes.wintypes
self.__winmm = ctypes.windll.winmm
self.__mciSendString = self.__winmm.mciSendStringW
self.__prefix = prefix
LPCWSTR = ctypes.wintypes.LPCWSTR
UINT = ctypes.wintypes.UINT
HANDLE = ctypes.wintypes.HANDLE
DWORD = ctypes.wintypes.DWORD
self.__mciSendString.argtypes = [LPCWSTR, LPCWSTR, UINT, HANDLE]
self.__mciSendString.restype = ctypes.wintypes.DWORD
self.__mciGetErrorStringW = self.__winmm.mciGetErrorStringW
self.__mciGetErrorStringW.argtypes = [DWORD, LPCWSTR, UINT]
self.__mciGetErrorStringW.restype = ctypes.wintypes.BOOL
self.__buffer = ctypes.create_unicode_buffer(2048)
self.__alias_index = 0
self.__lock = threading.Lock()
def mciSendString (self, command, encoding = None):
if encoding is None:
encoding = sys.getfilesystemencoding()
if isinstance(command, bytes):
command = command.decode(encoding)
with self.__lock:
hr = self.__mciSendString(command, self.__buffer, 2048, 0)
hr = (hr != 0) and long(hr) or self.__buffer.value
return hr
def mciGetErrorString (self, error):
buffer = self.__buffer
with self.__lock:
hr = self.__mciGetErrorStringW(error, buffer, 2048)
if hr == 0:
hr = None
else:
hr = buffer.value
return hr
def open (self, filename, media_type = ''):
if not os.path.exists(filename):
return None
filename = os.path.abspath(filename)
with self.__lock:
name = 'media:%s%d'%(self.__prefix, self.__alias_index)
self.__alias_index += 1
if self.__alias_index > 0x7fffffff:
self.__alias_index = 0
cmd = u'open "%s" alias %s'%(filename, name)
if media_type:
cmd = u'open "%s" type %s alias %s'%(filename, media_type, name)
hr = self.mciSendString(cmd)
if isinstance(hr, str) or isinstance(hr, unicode):
return name
return None
def close (self, name):
hr = self.mciSendString(u'close %s'%name)
if isinstance(hr, unicode) or isinstance(hr, str):
return True
return False
def __get_status (self, name, what):
hr = self.mciSendString(u'status %s %s'%(name, what))
if isinstance(hr, unicode) or isinstance(hr, str):
return hr
return None
def __get_status_int (self, name, what):
hr = self.__get_status(name, what)
if hr is None:
return -1
hr = long(hr)
return (hr > 0x7fffffff) and hr or int(hr)
def __mci_no_return (self, cmd):
hr = self.mciSendString(cmd)
if isinstance(hr, unicode) or isinstance(hr, str):
return True
return False
def get_length (self, name):
return self.__get_status_int(name, 'length')
def get_position (self, name):
return self.__get_status_int(name, 'position')
def get_mode (self, name):
hr = self.__get_status(name, 'mode')
return hr
def play (self, name, start = 0, end = -1, wait = False, repeat = False):
if wait:
repeat = False
if start < 0:
start = 0
cmd = u'play %s from %d'%(name, start)
if end >= 0:
cmd += u' to %d'%end
if wait:
cmd += u' wait'
if repeat:
cmd += u' repeat'
return self.__mci_no_return(cmd)
def stop (self, name):
return self.__mci_no_return(u'stop %s'%name)
def seek (self, name, position):
if isinstance(position, str) or isinstance(position, unicode):
if position == u'end':
position = 'end'
else:
position = '0'
elif position < 0:
position = 'end'
else:
position = str(position)
return self.__mci_no_return(u'seek %s to %s'%name)
def pause (self, name):
return self.__mci_no_return(u'pause %s'%name)
def resume (self, name):
return self.__mci_no_return(u'resume %s'%name)
def get_volume (self, name):
return self.__get_status_int(name, 'volume')
def set_volume (self, name, volume):
return self.__mci_no_return(u'setaudio %s volume to %s'%(name, volume))
def is_playing (self, name):
mode = self.get_mode(name)
if mode is None:
return False
if mode != 'playing':
return False
return True
#----------------------------------------------------------------------
# main entry
#----------------------------------------------------------------------
def main (args = None):
if args is None:
args = sys.argv
args = [n for n in args]
if len(args) < 2:
print('usage: playmp3.py [mp3]')
return 0
mp3 = args[1]
if not os.path.exists(mp3):
print('not find: %s'%mp3)
return 1
def ms2time(ms):
if ms <= 0: return '00:00:000'
time_sec, ms = ms / 1000, ms % 1000
time_min, time_sec = time_sec / 60, time_sec % 60
time_hor, time_min = time_min / 60, time_min % 60
if time_hor == 0: return '%02d:%02d:%03d'%(time_min, time_sec, ms)
return '%02d:%02d:%02d:%03d'%(time_hor, time_min, time_sec, ms)
winmm = WinMM()
name = winmm.open(mp3)
if name is None:
print('can not play: %s'%mp3)
return 2
import ctypes.wintypes
user32 = ctypes.windll.user32
user32.GetAsyncKeyState.restype = ctypes.wintypes.WORD
user32.GetAsyncKeyState.argtypes = [ ctypes.c_char ]
size = winmm.get_length(name)
print('Playing "%s", press \'q\' to exit ....'%mp3)
winmm.play(name, repeat = True)
while 1:
if user32.GetAsyncKeyState(b'Q'): break
time.sleep(0.1)
pos = winmm.get_position(name)
sys.stdout.write('[%s / %s]\r'%(ms2time(pos), ms2time(size)))
sys.stdout.flush()
print('')
print('stopped')
winmm.close(name)
return 0
#----------------------------------------------------------------------
# testing case
#----------------------------------------------------------------------
if __name__ == '__main__':
def | ():
winmm = WinMM()
name = winmm.open('d:/music/sample.mp3')
print(name)
print(winmm.get_length(name))
print(winmm.get_volume(name))
print(winmm.set_volume(name, 1000))
ts = time.time()
print(winmm.play(name))
ts = time.time() - ts
print("ts", ts)
input()
print('is_playing', winmm.is_playing(name))
print('position:', winmm.get_position(name))
print('mode:', winmm.get_mode(name))
print(winmm.stop(name))
print('mode:', winmm.get_mode(name))
return 0
def test2():
main([__file__, 'd:/music/sample.mp3'])
return 0
# test2()
main()
| test1 | identifier_name |
playmp3.py | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
#======================================================================
#
# playsnd.py - play sound with ctypes + mci
#
# Created by skywind on 2013/12/01
# Last change: 2014/01/26 23:40:20
#
#======================================================================
from __future__ import print_function
import sys
import time
import os
import ctypes
import threading
#----------------------------------------------------------------------
# 2/3 compatible
#----------------------------------------------------------------------
if sys.version_info[0] >= 3:
long = int
unicode = str
xrange = range
#----------------------------------------------------------------------
# WinMM - Windows player
#----------------------------------------------------------------------
class WinMM (object):
def __init__ (self, prefix = ''):
import ctypes.wintypes
self.__winmm = ctypes.windll.winmm
self.__mciSendString = self.__winmm.mciSendStringW
self.__prefix = prefix
LPCWSTR = ctypes.wintypes.LPCWSTR
UINT = ctypes.wintypes.UINT
HANDLE = ctypes.wintypes.HANDLE
DWORD = ctypes.wintypes.DWORD
self.__mciSendString.argtypes = [LPCWSTR, LPCWSTR, UINT, HANDLE]
self.__mciSendString.restype = ctypes.wintypes.DWORD
self.__mciGetErrorStringW = self.__winmm.mciGetErrorStringW
self.__mciGetErrorStringW.argtypes = [DWORD, LPCWSTR, UINT]
self.__mciGetErrorStringW.restype = ctypes.wintypes.BOOL
self.__buffer = ctypes.create_unicode_buffer(2048)
self.__alias_index = 0
self.__lock = threading.Lock()
def mciSendString (self, command, encoding = None):
if encoding is None:
encoding = sys.getfilesystemencoding()
if isinstance(command, bytes):
command = command.decode(encoding)
with self.__lock:
hr = self.__mciSendString(command, self.__buffer, 2048, 0)
hr = (hr != 0) and long(hr) or self.__buffer.value
return hr
def mciGetErrorString (self, error):
buffer = self.__buffer
with self.__lock:
hr = self.__mciGetErrorStringW(error, buffer, 2048)
if hr == 0:
hr = None
else:
hr = buffer.value
return hr
def open (self, filename, media_type = ''):
if not os.path.exists(filename):
return None
filename = os.path.abspath(filename)
with self.__lock:
name = 'media:%s%d'%(self.__prefix, self.__alias_index)
self.__alias_index += 1
if self.__alias_index > 0x7fffffff:
self.__alias_index = 0
cmd = u'open "%s" alias %s'%(filename, name)
if media_type:
cmd = u'open "%s" type %s alias %s'%(filename, media_type, name)
hr = self.mciSendString(cmd)
if isinstance(hr, str) or isinstance(hr, unicode):
return name
return None
def close (self, name):
hr = self.mciSendString(u'close %s'%name)
if isinstance(hr, unicode) or isinstance(hr, str):
return True
return False
def __get_status (self, name, what):
hr = self.mciSendString(u'status %s %s'%(name, what))
if isinstance(hr, unicode) or isinstance(hr, str):
return hr
return None
def __get_status_int (self, name, what):
hr = self.__get_status(name, what)
if hr is None:
return -1
hr = long(hr)
return (hr > 0x7fffffff) and hr or int(hr)
def __mci_no_return (self, cmd):
hr = self.mciSendString(cmd)
if isinstance(hr, unicode) or isinstance(hr, str):
return True
return False
def get_length (self, name):
return self.__get_status_int(name, 'length')
def get_position (self, name):
return self.__get_status_int(name, 'position')
def get_mode (self, name):
hr = self.__get_status(name, 'mode')
return hr
def play (self, name, start = 0, end = -1, wait = False, repeat = False):
if wait:
repeat = False
if start < 0:
start = 0
cmd = u'play %s from %d'%(name, start)
if end >= 0:
cmd += u' to %d'%end
if wait:
cmd += u' wait'
if repeat:
cmd += u' repeat'
return self.__mci_no_return(cmd)
def stop (self, name):
return self.__mci_no_return(u'stop %s'%name)
def seek (self, name, position):
if isinstance(position, str) or isinstance(position, unicode):
if position == u'end':
position = 'end'
else:
|
elif position < 0:
position = 'end'
else:
position = str(position)
return self.__mci_no_return(u'seek %s to %s'%name)
def pause (self, name):
return self.__mci_no_return(u'pause %s'%name)
def resume (self, name):
return self.__mci_no_return(u'resume %s'%name)
def get_volume (self, name):
return self.__get_status_int(name, 'volume')
def set_volume (self, name, volume):
return self.__mci_no_return(u'setaudio %s volume to %s'%(name, volume))
def is_playing (self, name):
mode = self.get_mode(name)
if mode is None:
return False
if mode != 'playing':
return False
return True
#----------------------------------------------------------------------
# main entry
#----------------------------------------------------------------------
def main (args = None):
if args is None:
args = sys.argv
args = [n for n in args]
if len(args) < 2:
print('usage: playmp3.py [mp3]')
return 0
mp3 = args[1]
if not os.path.exists(mp3):
print('not find: %s'%mp3)
return 1
def ms2time(ms):
if ms <= 0: return '00:00:000'
time_sec, ms = ms / 1000, ms % 1000
time_min, time_sec = time_sec / 60, time_sec % 60
time_hor, time_min = time_min / 60, time_min % 60
if time_hor == 0: return '%02d:%02d:%03d'%(time_min, time_sec, ms)
return '%02d:%02d:%02d:%03d'%(time_hor, time_min, time_sec, ms)
winmm = WinMM()
name = winmm.open(mp3)
if name is None:
print('can not play: %s'%mp3)
return 2
import ctypes.wintypes
user32 = ctypes.windll.user32
user32.GetAsyncKeyState.restype = ctypes.wintypes.WORD
user32.GetAsyncKeyState.argtypes = [ ctypes.c_char ]
size = winmm.get_length(name)
print('Playing "%s", press \'q\' to exit ....'%mp3)
winmm.play(name, repeat = True)
while 1:
if user32.GetAsyncKeyState(b'Q'): break
time.sleep(0.1)
pos = winmm.get_position(name)
sys.stdout.write('[%s / %s]\r'%(ms2time(pos), ms2time(size)))
sys.stdout.flush()
print('')
print('stopped')
winmm.close(name)
return 0
#----------------------------------------------------------------------
# testing case
#----------------------------------------------------------------------
if __name__ == '__main__':
def test1():
winmm = WinMM()
name = winmm.open('d:/music/sample.mp3')
print(name)
print(winmm.get_length(name))
print(winmm.get_volume(name))
print(winmm.set_volume(name, 1000))
ts = time.time()
print(winmm.play(name))
ts = time.time() - ts
print("ts", ts)
input()
print('is_playing', winmm.is_playing(name))
print('position:', winmm.get_position(name))
print('mode:', winmm.get_mode(name))
print(winmm.stop(name))
print('mode:', winmm.get_mode(name))
return 0
def test2():
main([__file__, 'd:/music/sample.mp3'])
return 0
# test2()
main()
| position = '0' | conditional_block |
pool.js | var assert = require('assert');
var adapter = require('../index.js');
var config = require('./support/config.js');
var makeSlowQuery = require('./support/makeSlowQuery.js');
var ConnectionPool = false;
try {
ConnectionPool = require('any-db-pool');
}
catch (e) {
ConnectionPool = false;
}
var delaySeconds = 2;
var ifPoolExists = ConnectionPool ? describe : describe.skip;
ifPoolExists('Slow query', function(){
'use strict';
this.timeout((delaySeconds + 1) * 1000);
var connection = false;
before(function(done){
connection = adapter.createConnection(config, function(err){
assert.ifError(err);
done();
});
connection.on('error', function(err){
assert.ifError(err);
});
});
after(function(done){
if (connection) {
connection.end(done);
}
else {
done('connection missing');
}
});
it('should take intentionally long time to finish', function(done){
makeSlowQuery(connection, delaySeconds, done);
});
});
ifPoolExists('Pool', function(){
'use strict';
this.timeout((delaySeconds + 1) * 1000);
var pool = false;
before(function(){
var poolParams = {
min: 5,
max: 15,
reset: function(conn, done) {
conn.query('ROLLBACK TRANSACTION', done);
}
};
pool = new ConnectionPool(adapter, config, poolParams);
});
after(function(done){
pool.close(done);
});
it('should exist', function(){
assert.ok(pool);
});
['query', 'acquire', 'release', 'close'].forEach(function(name){
it('should provide `'+name+'()` method', function(){
assert.ok(pool.query, 'There should be a `'+name+'` provided by the ConnectionPool object');
assert.ok(pool.query instanceof Function, '`'+name+'()` should be a function');
});
});
it('should run simple query', function(done){
pool.query('SELECT 1 AS test', function(err, result){
assert.ifError(err);
assert.strictEqual(result.rowCount, 1, 'There should be 1 row there');
done();
});
});
it('should acquire two different connections', function(done){
var ids = {length: 0};
var todo = 2;
var onAcquired = function(id){
ids[id] = true;
ids.length++;
if (ids.length >= todo) |
};
var onDone = function(){
var keys = Object.keys(ids);
assert.strictEqual(ids.length, todo, 'There should be '+todo+' connections acquired');
assert.strictEqual(ids.length, keys.length - 1, 'There should be '+todo+' connections acquired');
done();
};
pool.acquire(function(err, connection){
assert.ifError(err);
onAcquired(connection.id);
pool.release(connection);
});
pool.acquire(function(err, connection){
assert.ifError(err);
onAcquired(connection.id);
pool.release(connection);
});
});
it('should run multiple queries on multiple connections asynchronously', function(done){
var results = [];
var delays = [4, 3, 2, 1];
this.timeout((delays[0]+1) * 2000);
var onResult = function(value){
results.push(value);
if (results.length >= delays.length) {
onDone();
}
};
var onDone = function(){
assert.strictEqual(results.length, delays.length, 'There should be '+delays.length+' result values');
delays.forEach(function(delay, index){
assert.strictEqual(results[delay-1], index, 'Result '+(delay-1)+' should be equal '+index);
});
done();
};
delays.forEach(function(delay, index){
pool.acquire(function(err, connection){
assert.ifError(err);
makeSlowQuery(connection, delay, function(){
onResult(index);
pool.release(connection);
});
});
});
});
}); | {
onDone();
} | conditional_block |
pool.js | var assert = require('assert');
var adapter = require('../index.js');
var config = require('./support/config.js');
var makeSlowQuery = require('./support/makeSlowQuery.js');
var ConnectionPool = false;
try {
ConnectionPool = require('any-db-pool');
}
catch (e) {
ConnectionPool = false;
}
var delaySeconds = 2;
var ifPoolExists = ConnectionPool ? describe : describe.skip;
ifPoolExists('Slow query', function(){
'use strict';
this.timeout((delaySeconds + 1) * 1000);
var connection = false;
before(function(done){
connection = adapter.createConnection(config, function(err){
assert.ifError(err);
done();
});
connection.on('error', function(err){
assert.ifError(err);
});
});
after(function(done){
if (connection) {
connection.end(done);
}
else {
done('connection missing');
}
});
it('should take intentionally long time to finish', function(done){
makeSlowQuery(connection, delaySeconds, done);
});
});
ifPoolExists('Pool', function(){
'use strict';
this.timeout((delaySeconds + 1) * 1000);
var pool = false;
before(function(){
var poolParams = {
min: 5,
max: 15,
reset: function(conn, done) {
conn.query('ROLLBACK TRANSACTION', done);
}
};
pool = new ConnectionPool(adapter, config, poolParams);
});
|
it('should exist', function(){
assert.ok(pool);
});
['query', 'acquire', 'release', 'close'].forEach(function(name){
it('should provide `'+name+'()` method', function(){
assert.ok(pool.query, 'There should be a `'+name+'` provided by the ConnectionPool object');
assert.ok(pool.query instanceof Function, '`'+name+'()` should be a function');
});
});
it('should run simple query', function(done){
pool.query('SELECT 1 AS test', function(err, result){
assert.ifError(err);
assert.strictEqual(result.rowCount, 1, 'There should be 1 row there');
done();
});
});
it('should acquire two different connections', function(done){
var ids = {length: 0};
var todo = 2;
var onAcquired = function(id){
ids[id] = true;
ids.length++;
if (ids.length >= todo) {
onDone();
}
};
var onDone = function(){
var keys = Object.keys(ids);
assert.strictEqual(ids.length, todo, 'There should be '+todo+' connections acquired');
assert.strictEqual(ids.length, keys.length - 1, 'There should be '+todo+' connections acquired');
done();
};
pool.acquire(function(err, connection){
assert.ifError(err);
onAcquired(connection.id);
pool.release(connection);
});
pool.acquire(function(err, connection){
assert.ifError(err);
onAcquired(connection.id);
pool.release(connection);
});
});
it('should run multiple queries on multiple connections asynchronously', function(done){
var results = [];
var delays = [4, 3, 2, 1];
this.timeout((delays[0]+1) * 2000);
var onResult = function(value){
results.push(value);
if (results.length >= delays.length) {
onDone();
}
};
var onDone = function(){
assert.strictEqual(results.length, delays.length, 'There should be '+delays.length+' result values');
delays.forEach(function(delay, index){
assert.strictEqual(results[delay-1], index, 'Result '+(delay-1)+' should be equal '+index);
});
done();
};
delays.forEach(function(delay, index){
pool.acquire(function(err, connection){
assert.ifError(err);
makeSlowQuery(connection, delay, function(){
onResult(index);
pool.release(connection);
});
});
});
});
}); | after(function(done){
pool.close(done);
}); | random_line_split |
config.py | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it | #
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for Zenodo Records."""
from __future__ import absolute_import, print_function
from flask_babelex import gettext
from speaklater import make_lazy_gettext
_ = make_lazy_gettext(lambda: gettext)
ZENODO_COMMUNITIES_AUTO_ENABLED = True
"""Automatically add and request to communities upon publishing."""
ZENODO_COMMUNITIES_AUTO_REQUEST = ['zenodo', ]
"""Communities which are to be auto-requested upon first publishing."""
ZENODO_COMMUNITIES_REQUEST_IF_GRANTS = ['ecfunded', ]
"""Communities which are to be auto-requested if record has grants."""
ZENODO_COMMUNITIES_ADD_IF_GRANTS = []
"""Communities which are to be auto-added if record has grants."""
ZENODO_BUCKET_QUOTA_SIZE = 50 * 1000 * 1000 * 1000 # 50 GB
"""Maximum quota per bucket."""
ZENODO_MAX_FILE_SIZE = ZENODO_BUCKET_QUOTA_SIZE
"""Maximum file size accepted.""" | # and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version. | random_line_split |
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gengeo(AutotoolsPackage):
"""GenGeo is a library of tools for creating complex particle
geometries for use in ESyS-Particle simulations. GenGeo is a standalone
application with a Python API that creates geometry files suitable for
importing into ESyS-Particle simulations. The functionality of GenGeo far
exceeds the in-simulation geometry creation utilities
provided by ESyS-Particle itself."""
homepage = "https://launchpad.net/esys-particle/gengeo"
url = "https://launchpad.net/esys-particle/trunk/3.0-alpha/+download/gengeo-163.tar.gz"
maintainers = ['dorton21']
version('163', sha256='9c896d430d8f315a45379d2b82e7d374f36259af66a745bfdee4c022a080d34d')
extends('python')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('boost+python')
depends_on('openmpi')
def autoreconf(self, spec, prefix):
autogen = Executable('./autogen.sh')
autogen()
def configure_args(self):
args = [ | 'CCFLAGS=-fpermissive',
'CXXFLAGS=-fpermissive',
]
return args | '--verbose',
'--with-boost=' + self.spec['boost'].prefix, | random_line_split |
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gengeo(AutotoolsPackage):
| """GenGeo is a library of tools for creating complex particle
geometries for use in ESyS-Particle simulations. GenGeo is a standalone
application with a Python API that creates geometry files suitable for
importing into ESyS-Particle simulations. The functionality of GenGeo far
exceeds the in-simulation geometry creation utilities
provided by ESyS-Particle itself."""
homepage = "https://launchpad.net/esys-particle/gengeo"
url = "https://launchpad.net/esys-particle/trunk/3.0-alpha/+download/gengeo-163.tar.gz"
maintainers = ['dorton21']
version('163', sha256='9c896d430d8f315a45379d2b82e7d374f36259af66a745bfdee4c022a080d34d')
extends('python')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('boost+python')
depends_on('openmpi')
def autoreconf(self, spec, prefix):
autogen = Executable('./autogen.sh')
autogen()
def configure_args(self):
args = [
'--verbose',
'--with-boost=' + self.spec['boost'].prefix,
'CCFLAGS=-fpermissive',
'CXXFLAGS=-fpermissive',
]
return args | identifier_body |
|
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gengeo(AutotoolsPackage):
"""GenGeo is a library of tools for creating complex particle
geometries for use in ESyS-Particle simulations. GenGeo is a standalone
application with a Python API that creates geometry files suitable for
importing into ESyS-Particle simulations. The functionality of GenGeo far
exceeds the in-simulation geometry creation utilities
provided by ESyS-Particle itself."""
homepage = "https://launchpad.net/esys-particle/gengeo"
url = "https://launchpad.net/esys-particle/trunk/3.0-alpha/+download/gengeo-163.tar.gz"
maintainers = ['dorton21']
version('163', sha256='9c896d430d8f315a45379d2b82e7d374f36259af66a745bfdee4c022a080d34d')
extends('python')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('boost+python')
depends_on('openmpi')
def | (self, spec, prefix):
autogen = Executable('./autogen.sh')
autogen()
def configure_args(self):
args = [
'--verbose',
'--with-boost=' + self.spec['boost'].prefix,
'CCFLAGS=-fpermissive',
'CXXFLAGS=-fpermissive',
]
return args
| autoreconf | identifier_name |
buffer.ts | import c = require('../context');
/** A memory block of values */
export class Mem {
/** The GL buffer target, or any other native binding */
public buffer:any = null;
/** A data block of memeory */
public data:ArrayBuffer;
/** Open gl context */
private _glc:c.Context;
/** Size of this memory block */
public size:number;
constructor(size:number) {
this.data = new ArrayBuffer(size);
this.size = size;
}
/** Replace a section of the data in this data block */
public memset(offset:number, src:Mem, srcOffset:number, bytes:number) {
var dstU8 = new Uint8Array(this.data, offset, bytes);
var srcU8 = new Uint8Array(src.data, srcOffset, bytes);
dstU8.set(srcU8);
}
}
/** Minimal api we expect on typed arrays */
export interface TypedArray {
set(data:any[]):void;
length:number;
}
/** Vritual memory block point; A pointer to a section of a memory block */
export class Buffer<T extends TypedArray> {
/** Data view only */
public data:T;
/** Size of each element in this view */
public block:number;
/** Number of elements in this buffer */
public length:number;
/** The actual memory block */
public mem:Mem;
/*
* Construct a new public view to a memory block
* @param T The type; eg. Float32Array
* @param data The data block to use for this.
* @param offset The offset into the data block to use for this.
*/
constructor(T:any, data:Mem, length:number, offset:number = 0) {
this.block = T['BYTES_PER_ELEMENT'];
this.mem = data;
this.data = this._factory(T, length, offset);
this.length = length;
}
/** Returns as an array for debugging or whatever */
public asArray():number[] {
var rtn = [];
for (var i = 0; i < this.data.length; ++i) {
rtn.push(this.data[i]);
}
return rtn;
}
/*
* Element factory
* <p>
* NB. that these are binary extensions, so using constructor.apply()
* on a new object doesn't work; you have to manually check for types.
* */
private _factory(type:any, length:number, offset:number):T {
var max = this.mem.size / this.block;
if (length > max) {
throw Error('Invalid length ' + length + ' when memory block is only ' + max + ' long');
}
switch (type) {
case Float32Array:
var rtn:any = new Float32Array(this.mem.data, this.block * offset, length);
return <T> rtn;
}
throw new Error('Invalid type: ' + type);
}
/*
* Replace a section of the data in this data block
* @param offset The offset into this VP to set data from.
* @param src The source to read new data from
* @param srcOffset The offset into the source for elements
* @param items The number of items to copy over
*/
public memset(offset:number, src:Buffer<T>, srcOffset:number, items:number) {
this.mem.memset(offset * this.block, src.mem, srcOffset * this.block, items * this.block);
}
/** Set values from an array of the correct type */
public set(data:any[]):Buffer<T> {
if (data.length != this.length) |
this.data.set(data);
return this;
}
}
/** Convenience function for a simple data element */
export function factory(length:number):Buffer<Float32Array> {
var size = Float32Array['BYTES_PER_ELEMENT'] * length;
var mem = new Mem(size);
return new Buffer<Float32Array>(Float32Array, mem, length, 0);
}
| {
throw Error('Invalid set length ' + data.length + ' != buffer size ' + this.length);
} | conditional_block |
buffer.ts | import c = require('../context');
/** A memory block of values */
export class Mem {
/** The GL buffer target, or any other native binding */
public buffer:any = null;
/** A data block of memeory */
public data:ArrayBuffer;
/** Open gl context */
private _glc:c.Context;
/** Size of this memory block */
public size:number;
constructor(size:number) {
this.data = new ArrayBuffer(size);
this.size = size;
}
/** Replace a section of the data in this data block */
public memset(offset:number, src:Mem, srcOffset:number, bytes:number) {
var dstU8 = new Uint8Array(this.data, offset, bytes);
var srcU8 = new Uint8Array(src.data, srcOffset, bytes);
dstU8.set(srcU8);
}
}
/** Minimal api we expect on typed arrays */
export interface TypedArray {
set(data:any[]):void;
length:number;
}
/** Vritual memory block point; A pointer to a section of a memory block */
export class Buffer<T extends TypedArray> {
/** Data view only */
public data:T;
/** Size of each element in this view */
public block:number;
/** Number of elements in this buffer */
public length:number;
/** The actual memory block */
public mem:Mem;
/*
* Construct a new public view to a memory block
* @param T The type; eg. Float32Array
* @param data The data block to use for this.
* @param offset The offset into the data block to use for this.
*/
constructor(T:any, data:Mem, length:number, offset:number = 0) {
this.block = T['BYTES_PER_ELEMENT'];
this.mem = data;
this.data = this._factory(T, length, offset);
this.length = length;
}
/** Returns as an array for debugging or whatever */
public asArray():number[] {
var rtn = [];
for (var i = 0; i < this.data.length; ++i) {
rtn.push(this.data[i]);
}
return rtn;
}
/*
* Element factory
* <p>
* NB. that these are binary extensions, so using constructor.apply()
* on a new object doesn't work; you have to manually check for types.
* */
private _factory(type:any, length:number, offset:number):T |
/*
* Replace a section of the data in this data block
* @param offset The offset into this VP to set data from.
* @param src The source to read new data from
* @param srcOffset The offset into the source for elements
* @param items The number of items to copy over
*/
public memset(offset:number, src:Buffer<T>, srcOffset:number, items:number) {
this.mem.memset(offset * this.block, src.mem, srcOffset * this.block, items * this.block);
}
/** Set values from an array of the correct type */
public set(data:any[]):Buffer<T> {
if (data.length != this.length) {
throw Error('Invalid set length ' + data.length + ' != buffer size ' + this.length);
}
this.data.set(data);
return this;
}
}
/** Convenience function for a simple data element */
export function factory(length:number):Buffer<Float32Array> {
var size = Float32Array['BYTES_PER_ELEMENT'] * length;
var mem = new Mem(size);
return new Buffer<Float32Array>(Float32Array, mem, length, 0);
}
| {
var max = this.mem.size / this.block;
if (length > max) {
throw Error('Invalid length ' + length + ' when memory block is only ' + max + ' long');
}
switch (type) {
case Float32Array:
var rtn:any = new Float32Array(this.mem.data, this.block * offset, length);
return <T> rtn;
}
throw new Error('Invalid type: ' + type);
} | identifier_body |
buffer.ts | import c = require('../context');
/** A memory block of values */
export class Mem {
/** The GL buffer target, or any other native binding */
public buffer:any = null;
/** A data block of memeory */
public data:ArrayBuffer;
/** Open gl context */
private _glc:c.Context;
/** Size of this memory block */
public size:number;
constructor(size:number) { |
/** Replace a section of the data in this data block */
public memset(offset:number, src:Mem, srcOffset:number, bytes:number) {
var dstU8 = new Uint8Array(this.data, offset, bytes);
var srcU8 = new Uint8Array(src.data, srcOffset, bytes);
dstU8.set(srcU8);
}
}
/** Minimal api we expect on typed arrays */
export interface TypedArray {
set(data:any[]):void;
length:number;
}
/** Vritual memory block point; A pointer to a section of a memory block */
export class Buffer<T extends TypedArray> {
/** Data view only */
public data:T;
/** Size of each element in this view */
public block:number;
/** Number of elements in this buffer */
public length:number;
/** The actual memory block */
public mem:Mem;
/*
* Construct a new public view to a memory block
* @param T The type; eg. Float32Array
* @param data The data block to use for this.
* @param offset The offset into the data block to use for this.
*/
constructor(T:any, data:Mem, length:number, offset:number = 0) {
this.block = T['BYTES_PER_ELEMENT'];
this.mem = data;
this.data = this._factory(T, length, offset);
this.length = length;
}
/** Returns as an array for debugging or whatever */
public asArray():number[] {
var rtn = [];
for (var i = 0; i < this.data.length; ++i) {
rtn.push(this.data[i]);
}
return rtn;
}
/*
* Element factory
* <p>
* NB. that these are binary extensions, so using constructor.apply()
* on a new object doesn't work; you have to manually check for types.
* */
private _factory(type:any, length:number, offset:number):T {
var max = this.mem.size / this.block;
if (length > max) {
throw Error('Invalid length ' + length + ' when memory block is only ' + max + ' long');
}
switch (type) {
case Float32Array:
var rtn:any = new Float32Array(this.mem.data, this.block * offset, length);
return <T> rtn;
}
throw new Error('Invalid type: ' + type);
}
/*
* Replace a section of the data in this data block
* @param offset The offset into this VP to set data from.
* @param src The source to read new data from
* @param srcOffset The offset into the source for elements
* @param items The number of items to copy over
*/
public memset(offset:number, src:Buffer<T>, srcOffset:number, items:number) {
this.mem.memset(offset * this.block, src.mem, srcOffset * this.block, items * this.block);
}
/** Set values from an array of the correct type */
public set(data:any[]):Buffer<T> {
if (data.length != this.length) {
throw Error('Invalid set length ' + data.length + ' != buffer size ' + this.length);
}
this.data.set(data);
return this;
}
}
/** Convenience function for a simple data element */
export function factory(length:number):Buffer<Float32Array> {
var size = Float32Array['BYTES_PER_ELEMENT'] * length;
var mem = new Mem(size);
return new Buffer<Float32Array>(Float32Array, mem, length, 0);
} | this.data = new ArrayBuffer(size);
this.size = size;
} | random_line_split |
buffer.ts | import c = require('../context');
/** A memory block of values */
export class Mem {
/** The GL buffer target, or any other native binding */
public buffer:any = null;
/** A data block of memeory */
public data:ArrayBuffer;
/** Open gl context */
private _glc:c.Context;
/** Size of this memory block */
public size:number;
| (size:number) {
this.data = new ArrayBuffer(size);
this.size = size;
}
/** Replace a section of the data in this data block */
public memset(offset:number, src:Mem, srcOffset:number, bytes:number) {
var dstU8 = new Uint8Array(this.data, offset, bytes);
var srcU8 = new Uint8Array(src.data, srcOffset, bytes);
dstU8.set(srcU8);
}
}
/** Minimal api we expect on typed arrays */
export interface TypedArray {
set(data:any[]):void;
length:number;
}
/** Vritual memory block point; A pointer to a section of a memory block */
export class Buffer<T extends TypedArray> {
/** Data view only */
public data:T;
/** Size of each element in this view */
public block:number;
/** Number of elements in this buffer */
public length:number;
/** The actual memory block */
public mem:Mem;
/*
* Construct a new public view to a memory block
* @param T The type; eg. Float32Array
* @param data The data block to use for this.
* @param offset The offset into the data block to use for this.
*/
constructor(T:any, data:Mem, length:number, offset:number = 0) {
this.block = T['BYTES_PER_ELEMENT'];
this.mem = data;
this.data = this._factory(T, length, offset);
this.length = length;
}
/** Returns as an array for debugging or whatever */
public asArray():number[] {
var rtn = [];
for (var i = 0; i < this.data.length; ++i) {
rtn.push(this.data[i]);
}
return rtn;
}
/*
* Element factory
* <p>
* NB. that these are binary extensions, so using constructor.apply()
* on a new object doesn't work; you have to manually check for types.
* */
private _factory(type:any, length:number, offset:number):T {
var max = this.mem.size / this.block;
if (length > max) {
throw Error('Invalid length ' + length + ' when memory block is only ' + max + ' long');
}
switch (type) {
case Float32Array:
var rtn:any = new Float32Array(this.mem.data, this.block * offset, length);
return <T> rtn;
}
throw new Error('Invalid type: ' + type);
}
/*
* Replace a section of the data in this data block
* @param offset The offset into this VP to set data from.
* @param src The source to read new data from
* @param srcOffset The offset into the source for elements
* @param items The number of items to copy over
*/
public memset(offset:number, src:Buffer<T>, srcOffset:number, items:number) {
this.mem.memset(offset * this.block, src.mem, srcOffset * this.block, items * this.block);
}
/** Set values from an array of the correct type */
public set(data:any[]):Buffer<T> {
if (data.length != this.length) {
throw Error('Invalid set length ' + data.length + ' != buffer size ' + this.length);
}
this.data.set(data);
return this;
}
}
/** Convenience function for a simple data element */
export function factory(length:number):Buffer<Float32Array> {
var size = Float32Array['BYTES_PER_ELEMENT'] * length;
var mem = new Mem(size);
return new Buffer<Float32Array>(Float32Array, mem, length, 0);
}
| constructor | identifier_name |
test_method_message_parser.py | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from ddp.messages.client import MethodMessage
from ddp.messages.client import MethodMessageParser
class MethodMessageParserTestCase(unittest.TestCase):
| def setUp(self):
self.parser = MethodMessageParser()
def test_parse(self):
id = 'id'
method = 'method'
params = [True, 1.0]
message = self.parser.parse({'msg': 'method', 'id': id,
'method': method, 'params': params})
self.assertEqual(message, MethodMessage(id, method, params)) | identifier_body |
|
test_method_message_parser.py | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| from ddp.messages.client import MethodMessage
from ddp.messages.client import MethodMessageParser
class MethodMessageParserTestCase(unittest.TestCase):
def setUp(self):
self.parser = MethodMessageParser()
def test_parse(self):
id = 'id'
method = 'method'
params = [True, 1.0]
message = self.parser.parse({'msg': 'method', 'id': id,
'method': method, 'params': params})
self.assertEqual(message, MethodMessage(id, method, params)) | import unittest
| random_line_split |
test_method_message_parser.py | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from ddp.messages.client import MethodMessage
from ddp.messages.client import MethodMessageParser
class MethodMessageParserTestCase(unittest.TestCase):
def | (self):
self.parser = MethodMessageParser()
def test_parse(self):
id = 'id'
method = 'method'
params = [True, 1.0]
message = self.parser.parse({'msg': 'method', 'id': id,
'method': method, 'params': params})
self.assertEqual(message, MethodMessage(id, method, params))
| setUp | identifier_name |
tunnels.py | # coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® Network tunnels module.
REST URI
``http://localhost/mgmt/tm/net/tunnels``
GUI Path
``Network --> tunnels``
REST Kind
``tm:net:tunnels:*``
"""
from f5.bigip.resource import Collection
from f5.bigip.resource import OrganizingCollection
from f5.bigip.resource import Resource
class T | OrganizingCollection):
"""BIG-IP® network tunnels collection"""
def __init__(self, net):
super(TunnelS, self).__init__(net)
self._meta_data['allowed_lazy_attributes'] = [
Gres,
Tunnels,
Vxlans,
]
class Tunnels(Collection):
"""BIG-IP® network tunnels resource (collection for GRE, Tunnel, VXLANs"""
def __init__(self, tunnelS):
super(Tunnels, self).__init__(tunnelS)
self._meta_data['allowed_lazy_attributes'] = [Gres, Tunnel, Vxlans]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:tunnel:tunnelstate': Tunnel}
class Tunnel(Resource):
"""BIG-IP® tunnels tunnel resource"""
def __init__(self, tunnels):
super(Tunnel, self).__init__(tunnels)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:tunnel:tunnelstate'
class Gres(Collection):
"""BIG-IP® tunnels GRE sub-collection"""
def __init__(self, tunnels):
super(Gres, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Gre]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:gre:grestate': Gre}
class Gre(Resource):
"""BIG-IP® tunnels GRE sub-collection resource"""
def __init__(self, gres):
super(Gre, self).__init__(gres)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:gre:grestate'
class Vxlans(Collection):
"""BIG-IP® tunnels VXLAN sub-collection"""
def __init__(self, tunnels):
super(Vxlans, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Vxlan]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:vxlan:vxlanstate': Vxlan}
class Vxlan(Resource):
"""BIG-IP® tunnels VXLAN sub-collection resource"""
def __init__(self, vxlans):
super(Vxlan, self).__init__(vxlans)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:vxlan:vxlanstate'
| unnelS( | identifier_name |
tunnels.py | # coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. | #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® Network tunnels module.
REST URI
``http://localhost/mgmt/tm/net/tunnels``
GUI Path
``Network --> tunnels``
REST Kind
``tm:net:tunnels:*``
"""
from f5.bigip.resource import Collection
from f5.bigip.resource import OrganizingCollection
from f5.bigip.resource import Resource
class TunnelS(OrganizingCollection):
"""BIG-IP® network tunnels collection"""
def __init__(self, net):
super(TunnelS, self).__init__(net)
self._meta_data['allowed_lazy_attributes'] = [
Gres,
Tunnels,
Vxlans,
]
class Tunnels(Collection):
"""BIG-IP® network tunnels resource (collection for GRE, Tunnel, VXLANs"""
def __init__(self, tunnelS):
super(Tunnels, self).__init__(tunnelS)
self._meta_data['allowed_lazy_attributes'] = [Gres, Tunnel, Vxlans]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:tunnel:tunnelstate': Tunnel}
class Tunnel(Resource):
"""BIG-IP® tunnels tunnel resource"""
def __init__(self, tunnels):
super(Tunnel, self).__init__(tunnels)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:tunnel:tunnelstate'
class Gres(Collection):
"""BIG-IP® tunnels GRE sub-collection"""
def __init__(self, tunnels):
super(Gres, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Gre]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:gre:grestate': Gre}
class Gre(Resource):
"""BIG-IP® tunnels GRE sub-collection resource"""
def __init__(self, gres):
super(Gre, self).__init__(gres)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:gre:grestate'
class Vxlans(Collection):
"""BIG-IP® tunnels VXLAN sub-collection"""
def __init__(self, tunnels):
super(Vxlans, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Vxlan]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:vxlan:vxlanstate': Vxlan}
class Vxlan(Resource):
"""BIG-IP® tunnels VXLAN sub-collection resource"""
def __init__(self, vxlans):
super(Vxlan, self).__init__(vxlans)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:vxlan:vxlanstate' | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
tunnels.py | # coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® Network tunnels module.
REST URI
``http://localhost/mgmt/tm/net/tunnels``
GUI Path
``Network --> tunnels``
REST Kind
``tm:net:tunnels:*``
"""
from f5.bigip.resource import Collection
from f5.bigip.resource import OrganizingCollection
from f5.bigip.resource import Resource
class TunnelS(OrganizingCollection):
" |
class Tunnels(Collection):
"""BIG-IP® network tunnels resource (collection for GRE, Tunnel, VXLANs"""
def __init__(self, tunnelS):
super(Tunnels, self).__init__(tunnelS)
self._meta_data['allowed_lazy_attributes'] = [Gres, Tunnel, Vxlans]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:tunnel:tunnelstate': Tunnel}
class Tunnel(Resource):
"""BIG-IP® tunnels tunnel resource"""
def __init__(self, tunnels):
super(Tunnel, self).__init__(tunnels)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:tunnel:tunnelstate'
class Gres(Collection):
"""BIG-IP® tunnels GRE sub-collection"""
def __init__(self, tunnels):
super(Gres, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Gre]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:gre:grestate': Gre}
class Gre(Resource):
"""BIG-IP® tunnels GRE sub-collection resource"""
def __init__(self, gres):
super(Gre, self).__init__(gres)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:gre:grestate'
class Vxlans(Collection):
"""BIG-IP® tunnels VXLAN sub-collection"""
def __init__(self, tunnels):
super(Vxlans, self).__init__(tunnels)
self._meta_data['allowed_lazy_attributes'] = [Vxlan]
self._meta_data['attribute_registry'] =\
{'tm:net:tunnels:vxlan:vxlanstate': Vxlan}
class Vxlan(Resource):
"""BIG-IP® tunnels VXLAN sub-collection resource"""
def __init__(self, vxlans):
super(Vxlan, self).__init__(vxlans)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:net:tunnels:vxlan:vxlanstate'
| ""BIG-IP® network tunnels collection"""
def __init__(self, net):
super(TunnelS, self).__init__(net)
self._meta_data['allowed_lazy_attributes'] = [
Gres,
Tunnels,
Vxlans,
]
| identifier_body |
purchase_order.py | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.one
@api.depends('price_unit',
'price_subtotal',
'order_id.pricelist_id.currency_id',
'order_id.requisition_id.date_exchange_rate',
'order_id.requisition_id.currency_id')
def _compute_prices_in_company_currency(self):
|
@api.multi
def _requisition_currency(self):
for rec in self:
requisition = rec.order_id.requisition_id
if requisition:
rec.requisition_currency = requisition.currency_id
price_unit_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Unit Price",
digits=dp.get_precision('Account'),
store=True,
help="Unit Price in company currency."
)
price_subtotal_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Subtotal",
digits=dp.get_precision('Account'),
store=True,
help="Subtotal in company currency."
)
order_currency = fields.Many2one(string="Currency", readonly=True,
related="order_id.currency_id")
requisition_currency = fields.Many2one(
"res.currency", string="Requisition Currency", readonly=True,
compute="_requisition_currency")
| """ """
requisition = self.order_id.requisition_id
date = requisition.date_exchange_rate or fields.Date.today()
from_curr = self.order_id.currency_id.with_context(date=date)
if requisition and requisition.currency_id:
to_curr = requisition.currency_id
else:
to_curr = self.order_id.company_id.currency_id
self.price_unit_co = from_curr.compute(self.price_unit,
to_curr, round=False)
self.price_subtotal_co = from_curr.compute(self.price_subtotal,
to_curr, round=False) | identifier_body |
purchase_order.py | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.one
@api.depends('price_unit',
'price_subtotal',
'order_id.pricelist_id.currency_id',
'order_id.requisition_id.date_exchange_rate',
'order_id.requisition_id.currency_id')
def | (self):
""" """
requisition = self.order_id.requisition_id
date = requisition.date_exchange_rate or fields.Date.today()
from_curr = self.order_id.currency_id.with_context(date=date)
if requisition and requisition.currency_id:
to_curr = requisition.currency_id
else:
to_curr = self.order_id.company_id.currency_id
self.price_unit_co = from_curr.compute(self.price_unit,
to_curr, round=False)
self.price_subtotal_co = from_curr.compute(self.price_subtotal,
to_curr, round=False)
@api.multi
def _requisition_currency(self):
for rec in self:
requisition = rec.order_id.requisition_id
if requisition:
rec.requisition_currency = requisition.currency_id
price_unit_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Unit Price",
digits=dp.get_precision('Account'),
store=True,
help="Unit Price in company currency."
)
price_subtotal_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Subtotal",
digits=dp.get_precision('Account'),
store=True,
help="Subtotal in company currency."
)
order_currency = fields.Many2one(string="Currency", readonly=True,
related="order_id.currency_id")
requisition_currency = fields.Many2one(
"res.currency", string="Requisition Currency", readonly=True,
compute="_requisition_currency")
| _compute_prices_in_company_currency | identifier_name |
purchase_order.py | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.one
@api.depends('price_unit',
'price_subtotal',
'order_id.pricelist_id.currency_id',
'order_id.requisition_id.date_exchange_rate',
'order_id.requisition_id.currency_id')
def _compute_prices_in_company_currency(self):
""" """
requisition = self.order_id.requisition_id
date = requisition.date_exchange_rate or fields.Date.today()
from_curr = self.order_id.currency_id.with_context(date=date)
if requisition and requisition.currency_id:
to_curr = requisition.currency_id
else:
to_curr = self.order_id.company_id.currency_id
self.price_unit_co = from_curr.compute(self.price_unit,
to_curr, round=False)
self.price_subtotal_co = from_curr.compute(self.price_subtotal,
to_curr, round=False)
@api.multi
def _requisition_currency(self):
for rec in self:
requisition = rec.order_id.requisition_id
if requisition:
|
price_unit_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Unit Price",
digits=dp.get_precision('Account'),
store=True,
help="Unit Price in company currency."
)
price_subtotal_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Subtotal",
digits=dp.get_precision('Account'),
store=True,
help="Subtotal in company currency."
)
order_currency = fields.Many2one(string="Currency", readonly=True,
related="order_id.currency_id")
requisition_currency = fields.Many2one(
"res.currency", string="Requisition Currency", readonly=True,
compute="_requisition_currency")
| rec.requisition_currency = requisition.currency_id | conditional_block |
purchase_order.py | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.one
@api.depends('price_unit',
'price_subtotal',
'order_id.pricelist_id.currency_id',
'order_id.requisition_id.date_exchange_rate',
'order_id.requisition_id.currency_id')
def _compute_prices_in_company_currency(self):
""" """
requisition = self.order_id.requisition_id
date = requisition.date_exchange_rate or fields.Date.today()
from_curr = self.order_id.currency_id.with_context(date=date)
if requisition and requisition.currency_id:
to_curr = requisition.currency_id
else:
to_curr = self.order_id.company_id.currency_id
self.price_unit_co = from_curr.compute(self.price_unit,
to_curr, round=False)
self.price_subtotal_co = from_curr.compute(self.price_subtotal,
to_curr, round=False)
@api.multi
def _requisition_currency(self):
for rec in self:
requisition = rec.order_id.requisition_id
if requisition:
rec.requisition_currency = requisition.currency_id
price_unit_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Unit Price",
digits=dp.get_precision('Account'),
store=True,
help="Unit Price in company currency."
)
price_subtotal_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Subtotal",
digits=dp.get_precision('Account'),
store=True,
help="Subtotal in company currency."
)
order_currency = fields.Many2one(string="Currency", readonly=True,
related="order_id.currency_id")
requisition_currency = fields.Many2one(
"res.currency", string="Requisition Currency", readonly=True,
compute="_requisition_currency") | # GNU Affero General Public License for more details.
# | random_line_split |
api.py | from better_zoom import BetterZoom
from better_selecting_zoom import BetterSelectingZoom
from broadcaster import BroadcasterTool
from dataprinter import DataPrinter
from data_label_tool import DataLabelTool
from enable.tools.drag_tool import DragTool
from draw_points_tool import DrawPointsTool | from image_inspector_tool import ImageInspectorTool, ImageInspectorOverlay
from lasso_selection import LassoSelection
from legend_tool import LegendTool
from legend_highlighter import LegendHighlighter
from line_inspector import LineInspector
from line_segment_tool import LineSegmentTool
from move_tool import MoveTool
from pan_tool import PanTool
from point_marker import PointMarker
from range_selection import RangeSelection
from range_selection_2d import RangeSelection2D
from range_selection_overlay import RangeSelectionOverlay
from regression_lasso import RegressionLasso, RegressionOverlay
from save_tool import SaveTool
from scatter_inspector import ScatterInspector
from select_tool import SelectTool
from simple_inspector import SimpleInspectorTool
from tool_states import ZoomState, PanState, GroupedToolState, SelectedZoomState
from tracking_pan_tool import TrackingPanTool
from tracking_zoom import TrackingZoom
from traits_tool import TraitsTool
from zoom_tool import ZoomTool
# EOF | from drag_zoom import DragZoom
from highlight_tool import HighlightTool | random_line_split |
analisi.py | import numpy
import math
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
import lab
def fit_function(x, a, b):
return b*(numpy.exp(x/a)-1)
FileName='/home/federico/Documenti/Laboratorio2/Diodo/dati_arduino/dati.txt'
N1, N2 = pylab.loadtxt(FileName, unpack="True")
errN2 = numpy.array([1.0 for i in range(len(N2))])
errN1 = numpy.array([1.0 for i in range(len(N1))])
Rd = 3280.0
errRd = 30.0
eta = 4.89/1000
erreta = 0.02/1000
V1 = eta*N1
V2 = eta*N2
I = (V1-V2)/Rd
#Inserire errori per i i V
errV2 = (erreta/eta + errN2/N2)*V2
errV1 = (erreta/eta + errN1/N1)*V1
errI = (errRd/Rd)*I
#for i in range(len(I)):
# errI[i] = 50e-06
for i in range(len(I)):
if(I[i]==0.0): I[i] = 1.0e-11*i
for i in range(len(V2)):
if(V2[i]==0.0): V2[i] = 1.0e-11*i
#da finire vorrei implementare quella cosa che sostituisco le colonne di punti con un solo punto ma non ne ho voglia
#number = 150
#minV = 0.30
#maxV = 0.70
#inc = (maxV - minV)/number
#volt = numpy.array([(minV + i*inc) for i in range(number)])
#voltaggiVeri = numpy.array([])
#ampere = numpy.array([])
#errVolt = numpy.array([0.0 for i in range(number)])
#errAmpere = numpy.array([0.0 for i in range(number)])
#count = numpy.array([])
#for i in range(number):
# for j in range(len(V2)):
# if(volt[i]<=V2[j]<=volt[i+1]):
# voltaggiVeri = numpy.append(voltaggiVeri, V2[
# errVolt[i] = errV2[j]
# errAmpere[i] = errI[j]
# ampere[i] += I[j]
# count[i] += 1
#nonnulli = len(numpy.nonzero(count))
#aNonNulli = numpy.array([0.0 for i in range(nonnulli)])
#for i in range(nonnulli):
# index = (numpy.nonzero(ampere))[i] | #V2 = volt
#I = ampere
#errI = errAmpere
#errV2 = errVolt
print(V2, I, errV2, errI)
pylab.title("Curva corrente tensione")
pylab.xlabel("V (V)")
pylab.ylabel("I (A)")
pylab.grid(color = "gray")
pylab.grid(color = "gray")
pylab.errorbar(V2, I, errI, errV2, "o", color="black")
initial = numpy.array([0.0515, 6.75e-09])
error = errI+errV2/100 #NON posso prendere errore squadrato perchè mescolerei le unità di misura
popt, pcov = curve_fit(fit_function, V2, I, initial, error)
a, b = popt
print(a, b)
print(pcov)
div = 1000
bucket = numpy.array([0.0 for i in range(div)])
funzione = numpy.array([0.0 for i in range(div)])
inc = (V2.max()-V2.min())/div
for i in range(len(bucket)):
bucket[i]=float(i)*inc + V2.min()
funzione[i] = fit_function(bucket[i], a, b)
pylab.plot(bucket, funzione, color = "red")
#calcolo il chi quadro
chisq = (((I - fit_function(V2, a, b))/error)**2).sum()
ndof = len(V2) - 2
p=1.0-scipy.stats.chi2.cdf(chisq, len(V2)-3)
print("Carica Chisquare/ndof = %f/%d" % (chisq, ndof))
print("p = ", p)
pylab.show()
#number = 150
#minV = 0.30
#maxV = 0.70
#inc = (maxV -minV)/number
#volt = numpy.array([(minV + i*inc) for i in range(number)])
#ampere = numpy.array([0.0 for i in range(number)])
#count = numpy.array([0 for i in range(number)])
#for i in range(number):
# for j in range(len(V2)):
# if(V2[j] == volt[i]):
# ampere[j] += I[i]
# count[j] += 1
#ampere = ampere/count
#V2 = volt
#I = ampere | # print(index)
# aNonNulli[i] = ampere[index]
| random_line_split |
analisi.py | import numpy
import math
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
import lab
def fit_function(x, a, b):
return b*(numpy.exp(x/a)-1)
FileName='/home/federico/Documenti/Laboratorio2/Diodo/dati_arduino/dati.txt'
N1, N2 = pylab.loadtxt(FileName, unpack="True")
errN2 = numpy.array([1.0 for i in range(len(N2))])
errN1 = numpy.array([1.0 for i in range(len(N1))])
Rd = 3280.0
errRd = 30.0
eta = 4.89/1000
erreta = 0.02/1000
V1 = eta*N1
V2 = eta*N2
I = (V1-V2)/Rd
#Inserire errori per i i V
errV2 = (erreta/eta + errN2/N2)*V2
errV1 = (erreta/eta + errN1/N1)*V1
errI = (errRd/Rd)*I
#for i in range(len(I)):
# errI[i] = 50e-06
for i in range(len(I)):
if(I[i]==0.0): I[i] = 1.0e-11*i
for i in range(len(V2)):
if(V2[i]==0.0): V2 | #da finire vorrei implementare quella cosa che sostituisco le colonne di punti con un solo punto ma non ne ho voglia
#number = 150
#minV = 0.30
#maxV = 0.70
#inc = (maxV - minV)/number
#volt = numpy.array([(minV + i*inc) for i in range(number)])
#voltaggiVeri = numpy.array([])
#ampere = numpy.array([])
#errVolt = numpy.array([0.0 for i in range(number)])
#errAmpere = numpy.array([0.0 for i in range(number)])
#count = numpy.array([])
#for i in range(number):
# for j in range(len(V2)):
# if(volt[i]<=V2[j]<=volt[i+1]):
# voltaggiVeri = numpy.append(voltaggiVeri, V2[
# errVolt[i] = errV2[j]
# errAmpere[i] = errI[j]
# ampere[i] += I[j]
# count[i] += 1
#nonnulli = len(numpy.nonzero(count))
#aNonNulli = numpy.array([0.0 for i in range(nonnulli)])
#for i in range(nonnulli):
# index = (numpy.nonzero(ampere))[i]
# print(index)
# aNonNulli[i] = ampere[index]
#V2 = volt
#I = ampere
#errI = errAmpere
#errV2 = errVolt
print(V2, I, errV2, errI)
pylab.title("Curva corrente tensione")
pylab.xlabel("V (V)")
pylab.ylabel("I (A)")
pylab.grid(color = "gray")
pylab.grid(color = "gray")
pylab.errorbar(V2, I, errI, errV2, "o", color="black")
initial = numpy.array([0.0515, 6.75e-09])
error = errI+errV2/100 #NON posso prendere errore squadrato perchè mescolerei le unità di misura
popt, pcov = curve_fit(fit_function, V2, I, initial, error)
a, b = popt
print(a, b)
print(pcov)
div = 1000
bucket = numpy.array([0.0 for i in range(div)])
funzione = numpy.array([0.0 for i in range(div)])
inc = (V2.max()-V2.min())/div
for i in range(len(bucket)):
bucket[i]=float(i)*inc + V2.min()
funzione[i] = fit_function(bucket[i], a, b)
pylab.plot(bucket, funzione, color = "red")
#calcolo il chi quadro
chisq = (((I - fit_function(V2, a, b))/error)**2).sum()
ndof = len(V2) - 2
p=1.0-scipy.stats.chi2.cdf(chisq, len(V2)-3)
print("Carica Chisquare/ndof = %f/%d" % (chisq, ndof))
print("p = ", p)
pylab.show()
#number = 150
#minV = 0.30
#maxV = 0.70
#inc = (maxV -minV)/number
#volt = numpy.array([(minV + i*inc) for i in range(number)])
#ampere = numpy.array([0.0 for i in range(number)])
#count = numpy.array([0 for i in range(number)])
#for i in range(number):
# for j in range(len(V2)):
# if(V2[j] == volt[i]):
# ampere[j] += I[i]
# count[j] += 1
#ampere = ampere/count
#V2 = volt
#I = ampere
| [i] = 1.0e-11*i
| conditional_block |
analisi.py | import numpy
import math
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
import lab
def fi | , a, b):
return b*(numpy.exp(x/a)-1)
FileName='/home/federico/Documenti/Laboratorio2/Diodo/dati_arduino/dati.txt'
N1, N2 = pylab.loadtxt(FileName, unpack="True")
errN2 = numpy.array([1.0 for i in range(len(N2))])
errN1 = numpy.array([1.0 for i in range(len(N1))])
Rd = 3280.0
errRd = 30.0
eta = 4.89/1000
erreta = 0.02/1000
V1 = eta*N1
V2 = eta*N2
I = (V1-V2)/Rd
#Inserire errori per i i V
errV2 = (erreta/eta + errN2/N2)*V2
errV1 = (erreta/eta + errN1/N1)*V1
errI = (errRd/Rd)*I
#for i in range(len(I)):
# errI[i] = 50e-06
for i in range(len(I)):
if(I[i]==0.0): I[i] = 1.0e-11*i
for i in range(len(V2)):
if(V2[i]==0.0): V2[i] = 1.0e-11*i
#da finire vorrei implementare quella cosa che sostituisco le colonne di punti con un solo punto ma non ne ho voglia
#number = 150
#minV = 0.30
#maxV = 0.70
#inc = (maxV - minV)/number
#volt = numpy.array([(minV + i*inc) for i in range(number)])
#voltaggiVeri = numpy.array([])
#ampere = numpy.array([])
#errVolt = numpy.array([0.0 for i in range(number)])
#errAmpere = numpy.array([0.0 for i in range(number)])
#count = numpy.array([])
#for i in range(number):
# for j in range(len(V2)):
# if(volt[i]<=V2[j]<=volt[i+1]):
# voltaggiVeri = numpy.append(voltaggiVeri, V2[
# errVolt[i] = errV2[j]
# errAmpere[i] = errI[j]
# ampere[i] += I[j]
# count[i] += 1
#nonnulli = len(numpy.nonzero(count))
#aNonNulli = numpy.array([0.0 for i in range(nonnulli)])
#for i in range(nonnulli):
# index = (numpy.nonzero(ampere))[i]
# print(index)
# aNonNulli[i] = ampere[index]
#V2 = volt
#I = ampere
#errI = errAmpere
#errV2 = errVolt
print(V2, I, errV2, errI)
pylab.title("Curva corrente tensione")
pylab.xlabel("V (V)")
pylab.ylabel("I (A)")
pylab.grid(color = "gray")
pylab.grid(color = "gray")
pylab.errorbar(V2, I, errI, errV2, "o", color="black")
initial = numpy.array([0.0515, 6.75e-09])
error = errI+errV2/100 #NON posso prendere errore squadrato perchè mescolerei le unità di misura
popt, pcov = curve_fit(fit_function, V2, I, initial, error)
a, b = popt
print(a, b)
print(pcov)
div = 1000
bucket = numpy.array([0.0 for i in range(div)])
funzione = numpy.array([0.0 for i in range(div)])
inc = (V2.max()-V2.min())/div
for i in range(len(bucket)):
bucket[i]=float(i)*inc + V2.min()
funzione[i] = fit_function(bucket[i], a, b)
pylab.plot(bucket, funzione, color = "red")
#calcolo il chi quadro
chisq = (((I - fit_function(V2, a, b))/error)**2).sum()
ndof = len(V2) - 2
p=1.0-scipy.stats.chi2.cdf(chisq, len(V2)-3)
print("Carica Chisquare/ndof = %f/%d" % (chisq, ndof))
print("p = ", p)
pylab.show()
#number = 150
#minV = 0.30
#maxV = 0.70
#inc = (maxV -minV)/number
#volt = numpy.array([(minV + i*inc) for i in range(number)])
#ampere = numpy.array([0.0 for i in range(number)])
#count = numpy.array([0 for i in range(number)])
#for i in range(number):
# for j in range(len(V2)):
# if(V2[j] == volt[i]):
# ampere[j] += I[i]
# count[j] += 1
#ampere = ampere/count
#V2 = volt
#I = ampere
| t_function(x | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.