file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
pre_NAMD.py | # pre_NAMD.py
# Creates the files used for NAMD based on the .pdb file dowloaded from PDB bank
#
# Usage:
# python pre_NAMD.py $PDBID
#
# $PDBID=the 4 characters identification code of the .pdb file
#
# Input:
# $PDBID.pdb: .pdb file downloaded from PDB bank
#
# Output:
# $PDBID_p.pdb: .pdb file with water molecules removed
# $PDBID_p_h.pdb: .pdb file with water removed and hydrogen atoms added
# $PDBID_p_h.psf: .psf file of $PDBID_p_h.pdb
# $PDBID_p_h.log: Log file of adding hydrogen atoms
# $PDBID_wb.pdb: .pdb file of the water box model
# $PDBID_wb.psf: .psf file of $PDBID_wb.pdb
# $PDBID_wb.log: Log file of the water box model generation
# $PDBID_wb_i.pdb: .pdb file of the ionized water box model (For NAMD)
# $PDBID_wb_i.psf: .psf file of PDBID_wb_i.pdb (For NAMD)
# $PDBID.log: Log file of the whole process (output of VMD)
# $PDBID_center.txt: File contains the grid and center information of
# the ionized water box model
#
# Author: Xiaofei Zhang
# Date: June 20 2016
from __future__ import print_function
import sys, os
def print_error(*args, **kwargs):
|
# main
if len(sys.argv) != 2:
print_error("Usage: python pre_NAMD.py $PDBID")
sys.exit(-1)
mypath = os.path.realpath(__file__)
tclpath = os.path.split(mypath)[0] + os.path.sep + 'tcl' + os.path.sep
pdbid = sys.argv[1]
logfile = pdbid+'.log'
# Using the right path of VMD
vmd = "/Volumes/VMD-1.9.2/VMD 1.9.2.app/Contents/vmd/vmd_MACOSXX86"
print("Input: "+pdbid+".pdb")
# Remove water
print("Remove water..")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'remove_water.tcl' + ' ' + '-args' + ' '+ pdbid +'> '+ logfile
os.system(cmdline)
# Create .psf
print("Create PSF file...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'create_psf.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Build water box
print("Build water box...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'build_water_box.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Add ions
print("Add ions...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'add_ion.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Calculate grid and center
print("Calculate center coordinates...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'get_center.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
print("Finish!")
# end main
| print(*args, file=sys.stderr, **kwargs) | identifier_body |
pre_NAMD.py | # pre_NAMD.py
# Creates the files used for NAMD based on the .pdb file dowloaded from PDB bank
#
# Usage:
# python pre_NAMD.py $PDBID
#
# $PDBID=the 4 characters identification code of the .pdb file
#
# Input:
# $PDBID.pdb: .pdb file downloaded from PDB bank
#
# Output:
# $PDBID_p.pdb: .pdb file with water molecules removed
# $PDBID_p_h.pdb: .pdb file with water removed and hydrogen atoms added
# $PDBID_p_h.psf: .psf file of $PDBID_p_h.pdb
# $PDBID_p_h.log: Log file of adding hydrogen atoms
# $PDBID_wb.pdb: .pdb file of the water box model
# $PDBID_wb.psf: .psf file of $PDBID_wb.pdb
# $PDBID_wb.log: Log file of the water box model generation | # the ionized water box model
#
# Author: Xiaofei Zhang
# Date: June 20 2016
from __future__ import print_function
import sys, os
def print_error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# main
if len(sys.argv) != 2:
print_error("Usage: python pre_NAMD.py $PDBID")
sys.exit(-1)
mypath = os.path.realpath(__file__)
tclpath = os.path.split(mypath)[0] + os.path.sep + 'tcl' + os.path.sep
pdbid = sys.argv[1]
logfile = pdbid+'.log'
# Using the right path of VMD
vmd = "/Volumes/VMD-1.9.2/VMD 1.9.2.app/Contents/vmd/vmd_MACOSXX86"
print("Input: "+pdbid+".pdb")
# Remove water
print("Remove water..")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'remove_water.tcl' + ' ' + '-args' + ' '+ pdbid +'> '+ logfile
os.system(cmdline)
# Create .psf
print("Create PSF file...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'create_psf.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Build water box
print("Build water box...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'build_water_box.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Add ions
print("Add ions...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'add_ion.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
# Calculate grid and center
print("Calculate center coordinates...")
cmdline = '\"'+ vmd + '\"' +' -dispdev text -eofexit < '+ tclpath + 'get_center.tcl' + ' ' + '-args' + ' '+ pdbid +'>> '+ logfile
os.system(cmdline)
print("Finish!")
# end main | # $PDBID_wb_i.pdb: .pdb file of the ionized water box model (For NAMD)
# $PDBID_wb_i.psf: .psf file of PDBID_wb_i.pdb (For NAMD)
# $PDBID.log: Log file of the whole process (output of VMD)
# $PDBID_center.txt: File contains the grid and center information of | random_line_split |
lib.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
//!
//! This crate has two arenas implemented: `TypedArena`, which is a simpler
//! arena but can only hold objects of a single type, and `Arena`, which is a
//! more complex, slower arena which can hold objects of any type.
#![crate_name = "arena"]
#![experimental]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(unsafe_destructor)]
#![allow(missing_docs)]
extern crate alloc;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num::{Int, UnsignedInt};
use std::ptr;
use std::rc::Rc;
use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
data: Rc<RefCell<Vec<u8>>>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *const u8 {
self.data.borrow().as_ptr()
}
}
/// A slower reflection-based arena that can allocate objects of any type.
///
/// This arena uses `Vec<u8>` as a backing store to allocate objects from. For
/// each allocated object, the arena stores a pointer to the type descriptor
/// followed by the object (potentially with alignment padding after each
/// element). When the arena is destroyed, it iterates through all of its
/// chunks, and uses the tydesc information to trace through the objects,
/// calling the destructors on them. One subtle point that needs to be
/// addressed is how to handle panics while running the user provided
/// initializer function. It is important to not run the destructor on
/// uninitialized objects, but how to detect them is somewhat subtle. Since
/// `alloc()` can be invoked recursively, it is not sufficient to simply exclude
/// the most recent object. To solve this without requiring extra space, we
/// use the low order bit of the tydesc pointer to encode whether the object
/// it describes has been fully initialized.
///
/// As an optimization, objects with destructors are stored in different chunks
/// than objects without destructors. This reduces overhead when initializing
/// plain-old-data (`Copy` types) and means we don't need to waste time running
/// their destructors.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to access the
// head.
head: RefCell<Chunk>,
copy_head: RefCell<Chunk>,
chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
/// Allocates a new Arena with 32 bytes preallocated.
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
/// Allocates a new Arena with `initial_size` bytes preallocated.
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: RefCell::new(chunk(initial_size, false)),
copy_head: RefCell::new(chunk(initial_size, true)),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&*self.head.borrow());
for chunk in self.chunks.borrow().iter() {
if !chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(align - 1)).unwrap() & !(align - 1)
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *const uint = mem::transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*const TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *const i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::align_of::<*const TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a panic occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *const TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*const TyDesc, bool) {
((p & !1) as *const TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.borrow().capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&self, n_bytes: uint, align: uint) -> *const u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.borrow().clone());
*self.copy_head.borrow_mut() =
chunk((new_min_chunk_size + 1u).next_power_of_two(), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&self, n_bytes: uint, align: uint) -> *const u8 {
let start = round_up(self.copy_head.borrow().fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return self.alloc_copy_grow(n_bytes, align);
}
let copy_head = self.copy_head.borrow();
copy_head.fill.set(end);
unsafe {
copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<T>(&self, op: || -> T) -> &mut T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ptr = ptr as *mut T;
ptr::write(&mut (*ptr), op());
return &mut *ptr;
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&self, n_bytes: uint,
align: uint) -> (*const u8, *const u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.borrow().clone());
*self.head.borrow_mut() =
chunk((new_min_chunk_size + 1u).next_power_of_two(), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&self, n_bytes: uint,
align: uint) -> (*const u8, *const u8) {
// Be careful to not maintain any `head` borrows active, because
// `alloc_noncopy_grow` borrows it mutably.
let (start, end, tydesc_start, head_capacity) = {
let head = self.head.borrow();
let fill = head.fill.get();
let tydesc_start = fill;
let after_tydesc = fill + mem::size_of::<*const TyDesc>();
let start = round_up(after_tydesc, align);
let end = start + n_bytes;
(start, end, tydesc_start, head.capacity())
};
if end > head_capacity {
return self.alloc_noncopy_grow(n_bytes, align);
}
let head = self.head.borrow();
head.fill.set(round_up(end, mem::align_of::<*const TyDesc>()));
unsafe {
let buf = head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<T>(&self, op: || -> T) -> &mut T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ty_ptr = ty_ptr as *mut uint;
let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = mem::transmute(tydesc);
// Actually initialize it
ptr::write(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return &mut *ptr;
}
}
/// Allocates a new item in the arena, using `op` to initialize the value,
/// and returns a reference to it.
#[inline]
pub fn alloc<T>(&self, op: || -> T) -> &mut T {
unsafe {
if intrinsics::needs_drop::<T>() {
self.alloc_noncopy(op)
} else {
self.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
fn test_arena_alloc_nested() {
struct Inner { value: uint }
struct Outer<'a> { inner: &'a Inner }
let arena = Arena::new();
let result = arena.alloc(|| Outer {
inner: arena.alloc(|| Inner { value: 10 })
});
assert_eq!(result.inner.value, 10);
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { Rc::new(i) });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, panic while allocating
arena.alloc::<Rc<int>>(|| {
panic!();
});
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*const T>,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*const T>,
/// A pointer to the first arena segment.
first: RefCell<*mut TypedArenaChunk<T>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
fn calculate_size<T>(capacity: uint) -> uint {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(capacity).unwrap();
size = size.checked_add(elems_size).unwrap();
size
}
impl<T> TypedArenaChunk<T> {
#[inline]
unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
-> *mut TypedArenaChunk<T> {
let size = calculate_size::<T>(capacity);
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
as *mut TypedArenaChunk<T>;
if chunk.is_null() { alloc::oom() }
(*chunk).next = next;
(*chunk).capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
ptr::read(start as *const T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next = self.next;
let size = calculate_size::<T>(self.capacity);
deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
mem::min_align_of::<TypedArenaChunk<T>>());
if next.is_not_null() {
let capacity = (*next).capacity;
(*next).destroy(capacity);
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *const u8 {
let this: *const TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *const u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new `TypedArena` with preallocated space for eight objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new `TypedArena` with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
unsafe {
let chunk = TypedArenaChunk::<T>::new(ptr::null_mut(), capacity);
TypedArena {
ptr: Cell::new((*chunk).start() as *const T),
end: Cell::new((*chunk).end() as *const T),
first: RefCell::new(chunk),
}
}
}
/// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline]
pub fn alloc(&self, object: T) -> &mut T {
if self.ptr == self.end |
let ptr: &mut T = unsafe {
let ptr: &mut T = mem::transmute(self.ptr);
ptr::write(ptr, object);
self.ptr.set(self.ptr.get().offset(1));
ptr
};
ptr
}
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
unsafe {
let chunk = *self.first.borrow_mut();
let new_capacity = (*chunk).capacity.checked_mul(2).unwrap();
let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
self.ptr.set((*chunk).start() as *const T);
self.end.set((*chunk).end() as *const T);
*self.first.borrow_mut() = chunk
}
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
unsafe {
// Determine how much was filled.
let start = self.first.borrow().as_ref().unwrap().start() as uint;
let end = self.ptr.get() as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
(**self.first.borrow_mut()).destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
#[allow(dead_code)]
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
#[allow(dead_code)]
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
| {
self.grow()
} | conditional_block |
lib.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
//!
//! This crate has two arenas implemented: `TypedArena`, which is a simpler
//! arena but can only hold objects of a single type, and `Arena`, which is a
//! more complex, slower arena which can hold objects of any type.
#![crate_name = "arena"]
#![experimental]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(unsafe_destructor)]
#![allow(missing_docs)]
extern crate alloc;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num::{Int, UnsignedInt};
use std::ptr;
use std::rc::Rc;
use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
data: Rc<RefCell<Vec<u8>>>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *const u8 {
self.data.borrow().as_ptr()
}
}
/// A slower reflection-based arena that can allocate objects of any type.
///
/// This arena uses `Vec<u8>` as a backing store to allocate objects from. For
/// each allocated object, the arena stores a pointer to the type descriptor
/// followed by the object (potentially with alignment padding after each
/// element). When the arena is destroyed, it iterates through all of its
/// chunks, and uses the tydesc information to trace through the objects,
/// calling the destructors on them. One subtle point that needs to be
/// addressed is how to handle panics while running the user provided
/// initializer function. It is important to not run the destructor on
/// uninitialized objects, but how to detect them is somewhat subtle. Since
/// `alloc()` can be invoked recursively, it is not sufficient to simply exclude
/// the most recent object. To solve this without requiring extra space, we
/// use the low order bit of the tydesc pointer to encode whether the object
/// it describes has been fully initialized.
///
/// As an optimization, objects with destructors are stored in different chunks
/// than objects without destructors. This reduces overhead when initializing
/// plain-old-data (`Copy` types) and means we don't need to waste time running
/// their destructors.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to access the
// head.
head: RefCell<Chunk>,
copy_head: RefCell<Chunk>,
chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
/// Allocates a new Arena with 32 bytes preallocated.
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
/// Allocates a new Arena with `initial_size` bytes preallocated.
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: RefCell::new(chunk(initial_size, false)),
copy_head: RefCell::new(chunk(initial_size, true)),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&*self.head.borrow());
for chunk in self.chunks.borrow().iter() {
if !chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(align - 1)).unwrap() & !(align - 1)
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *const uint = mem::transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*const TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *const i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::align_of::<*const TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a panic occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *const TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*const TyDesc, bool) {
((p & !1) as *const TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.borrow().capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&self, n_bytes: uint, align: uint) -> *const u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.borrow().clone());
*self.copy_head.borrow_mut() =
chunk((new_min_chunk_size + 1u).next_power_of_two(), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&self, n_bytes: uint, align: uint) -> *const u8 {
let start = round_up(self.copy_head.borrow().fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return self.alloc_copy_grow(n_bytes, align);
}
let copy_head = self.copy_head.borrow();
copy_head.fill.set(end);
unsafe {
copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<T>(&self, op: || -> T) -> &mut T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ptr = ptr as *mut T;
ptr::write(&mut (*ptr), op());
return &mut *ptr;
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&self, n_bytes: uint,
align: uint) -> (*const u8, *const u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.borrow().clone());
*self.head.borrow_mut() =
chunk((new_min_chunk_size + 1u).next_power_of_two(), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&self, n_bytes: uint,
align: uint) -> (*const u8, *const u8) {
// Be careful to not maintain any `head` borrows active, because
// `alloc_noncopy_grow` borrows it mutably.
let (start, end, tydesc_start, head_capacity) = {
let head = self.head.borrow();
let fill = head.fill.get();
let tydesc_start = fill;
let after_tydesc = fill + mem::size_of::<*const TyDesc>();
let start = round_up(after_tydesc, align);
let end = start + n_bytes;
(start, end, tydesc_start, head.capacity())
};
if end > head_capacity {
return self.alloc_noncopy_grow(n_bytes, align);
}
let head = self.head.borrow();
head.fill.set(round_up(end, mem::align_of::<*const TyDesc>()));
unsafe {
let buf = head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<T>(&self, op: || -> T) -> &mut T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ty_ptr = ty_ptr as *mut uint;
let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = mem::transmute(tydesc);
// Actually initialize it
ptr::write(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return &mut *ptr;
}
}
/// Allocates a new item in the arena, using `op` to initialize the value,
/// and returns a reference to it.
#[inline]
pub fn alloc<T>(&self, op: || -> T) -> &mut T {
unsafe {
if intrinsics::needs_drop::<T>() {
self.alloc_noncopy(op)
} else {
self.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
fn test_arena_alloc_nested() {
struct Inner { value: uint }
struct Outer<'a> { inner: &'a Inner }
let arena = Arena::new();
let result = arena.alloc(|| Outer {
inner: arena.alloc(|| Inner { value: 10 })
});
assert_eq!(result.inner.value, 10);
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { Rc::new(i) });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, panic while allocating
arena.alloc::<Rc<int>>(|| {
panic!();
});
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*const T>,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*const T>,
/// A pointer to the first arena segment.
first: RefCell<*mut TypedArenaChunk<T>>,
}
struct | <T> {
/// Pointer to the next arena segment.
next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
fn calculate_size<T>(capacity: uint) -> uint {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(capacity).unwrap();
size = size.checked_add(elems_size).unwrap();
size
}
impl<T> TypedArenaChunk<T> {
#[inline]
unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
-> *mut TypedArenaChunk<T> {
let size = calculate_size::<T>(capacity);
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
as *mut TypedArenaChunk<T>;
if chunk.is_null() { alloc::oom() }
(*chunk).next = next;
(*chunk).capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
ptr::read(start as *const T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next = self.next;
let size = calculate_size::<T>(self.capacity);
deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
mem::min_align_of::<TypedArenaChunk<T>>());
if next.is_not_null() {
let capacity = (*next).capacity;
(*next).destroy(capacity);
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *const u8 {
let this: *const TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *const u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new `TypedArena` with preallocated space for eight objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new `TypedArena` with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
unsafe {
let chunk = TypedArenaChunk::<T>::new(ptr::null_mut(), capacity);
TypedArena {
ptr: Cell::new((*chunk).start() as *const T),
end: Cell::new((*chunk).end() as *const T),
first: RefCell::new(chunk),
}
}
}
/// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline]
pub fn alloc(&self, object: T) -> &mut T {
if self.ptr == self.end {
self.grow()
}
let ptr: &mut T = unsafe {
let ptr: &mut T = mem::transmute(self.ptr);
ptr::write(ptr, object);
self.ptr.set(self.ptr.get().offset(1));
ptr
};
ptr
}
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
unsafe {
let chunk = *self.first.borrow_mut();
let new_capacity = (*chunk).capacity.checked_mul(2).unwrap();
let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
self.ptr.set((*chunk).start() as *const T);
self.end.set((*chunk).end() as *const T);
*self.first.borrow_mut() = chunk
}
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
unsafe {
// Determine how much was filled.
let start = self.first.borrow().as_ref().unwrap().start() as uint;
let end = self.ptr.get() as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
(**self.first.borrow_mut()).destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
#[allow(dead_code)]
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
#[allow(dead_code)]
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
| TypedArenaChunk | identifier_name |
lib.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
//!
//! This crate has two arenas implemented: `TypedArena`, which is a simpler
//! arena but can only hold objects of a single type, and `Arena`, which is a
//! more complex, slower arena which can hold objects of any type.
#![crate_name = "arena"]
#![experimental]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(unsafe_destructor)]
#![allow(missing_docs)]
extern crate alloc;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num::{Int, UnsignedInt};
use std::ptr;
use std::rc::Rc;
use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
data: Rc<RefCell<Vec<u8>>>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *const u8 {
self.data.borrow().as_ptr()
}
}
/// A slower reflection-based arena that can allocate objects of any type.
///
/// This arena uses `Vec<u8>` as a backing store to allocate objects from. For
/// each allocated object, the arena stores a pointer to the type descriptor
/// followed by the object (potentially with alignment padding after each
/// element). When the arena is destroyed, it iterates through all of its
/// chunks, and uses the tydesc information to trace through the objects,
/// calling the destructors on them. One subtle point that needs to be
/// addressed is how to handle panics while running the user provided
/// initializer function. It is important to not run the destructor on
/// uninitialized objects, but how to detect them is somewhat subtle. Since
/// `alloc()` can be invoked recursively, it is not sufficient to simply exclude
/// the most recent object. To solve this without requiring extra space, we
/// use the low order bit of the tydesc pointer to encode whether the object
/// it describes has been fully initialized.
///
/// As an optimization, objects with destructors are stored in different chunks
/// than objects without destructors. This reduces overhead when initializing
/// plain-old-data (`Copy` types) and means we don't need to waste time running
/// their destructors.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to access the
// head.
head: RefCell<Chunk>,
copy_head: RefCell<Chunk>,
chunks: RefCell<Vec<Chunk>>,
} | }
/// Allocates a new Arena with `initial_size` bytes preallocated.
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: RefCell::new(chunk(initial_size, false)),
copy_head: RefCell::new(chunk(initial_size, true)),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&*self.head.borrow());
for chunk in self.chunks.borrow().iter() {
if !chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(align - 1)).unwrap() & !(align - 1)
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *const uint = mem::transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*const TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *const i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::align_of::<*const TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a panic occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *const TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*const TyDesc, bool) {
((p & !1) as *const TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.borrow().capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&self, n_bytes: uint, align: uint) -> *const u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.borrow().clone());
*self.copy_head.borrow_mut() =
chunk((new_min_chunk_size + 1u).next_power_of_two(), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&self, n_bytes: uint, align: uint) -> *const u8 {
let start = round_up(self.copy_head.borrow().fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return self.alloc_copy_grow(n_bytes, align);
}
let copy_head = self.copy_head.borrow();
copy_head.fill.set(end);
unsafe {
copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<T>(&self, op: || -> T) -> &mut T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ptr = ptr as *mut T;
ptr::write(&mut (*ptr), op());
return &mut *ptr;
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&self, n_bytes: uint,
align: uint) -> (*const u8, *const u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.borrow().clone());
*self.head.borrow_mut() =
chunk((new_min_chunk_size + 1u).next_power_of_two(), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&self, n_bytes: uint,
align: uint) -> (*const u8, *const u8) {
// Be careful to not maintain any `head` borrows active, because
// `alloc_noncopy_grow` borrows it mutably.
let (start, end, tydesc_start, head_capacity) = {
let head = self.head.borrow();
let fill = head.fill.get();
let tydesc_start = fill;
let after_tydesc = fill + mem::size_of::<*const TyDesc>();
let start = round_up(after_tydesc, align);
let end = start + n_bytes;
(start, end, tydesc_start, head.capacity())
};
if end > head_capacity {
return self.alloc_noncopy_grow(n_bytes, align);
}
let head = self.head.borrow();
head.fill.set(round_up(end, mem::align_of::<*const TyDesc>()));
unsafe {
let buf = head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<T>(&self, op: || -> T) -> &mut T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ty_ptr = ty_ptr as *mut uint;
let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = mem::transmute(tydesc);
// Actually initialize it
ptr::write(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return &mut *ptr;
}
}
/// Allocates a new item in the arena, using `op` to initialize the value,
/// and returns a reference to it.
#[inline]
pub fn alloc<T>(&self, op: || -> T) -> &mut T {
unsafe {
if intrinsics::needs_drop::<T>() {
self.alloc_noncopy(op)
} else {
self.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
fn test_arena_alloc_nested() {
struct Inner { value: uint }
struct Outer<'a> { inner: &'a Inner }
let arena = Arena::new();
let result = arena.alloc(|| Outer {
inner: arena.alloc(|| Inner { value: 10 })
});
assert_eq!(result.inner.value, 10);
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { Rc::new(i) });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, panic while allocating
arena.alloc::<Rc<int>>(|| {
panic!();
});
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*const T>,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*const T>,
/// A pointer to the first arena segment.
first: RefCell<*mut TypedArenaChunk<T>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
fn calculate_size<T>(capacity: uint) -> uint {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(capacity).unwrap();
size = size.checked_add(elems_size).unwrap();
size
}
impl<T> TypedArenaChunk<T> {
#[inline]
unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
-> *mut TypedArenaChunk<T> {
let size = calculate_size::<T>(capacity);
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
as *mut TypedArenaChunk<T>;
if chunk.is_null() { alloc::oom() }
(*chunk).next = next;
(*chunk).capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
ptr::read(start as *const T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next = self.next;
let size = calculate_size::<T>(self.capacity);
deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
mem::min_align_of::<TypedArenaChunk<T>>());
if next.is_not_null() {
let capacity = (*next).capacity;
(*next).destroy(capacity);
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *const u8 {
let this: *const TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *const u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new `TypedArena` with preallocated space for eight objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new `TypedArena` with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
unsafe {
let chunk = TypedArenaChunk::<T>::new(ptr::null_mut(), capacity);
TypedArena {
ptr: Cell::new((*chunk).start() as *const T),
end: Cell::new((*chunk).end() as *const T),
first: RefCell::new(chunk),
}
}
}
/// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline]
pub fn alloc(&self, object: T) -> &mut T {
if self.ptr == self.end {
self.grow()
}
let ptr: &mut T = unsafe {
let ptr: &mut T = mem::transmute(self.ptr);
ptr::write(ptr, object);
self.ptr.set(self.ptr.get().offset(1));
ptr
};
ptr
}
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
unsafe {
let chunk = *self.first.borrow_mut();
let new_capacity = (*chunk).capacity.checked_mul(2).unwrap();
let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
self.ptr.set((*chunk).start() as *const T);
self.end.set((*chunk).end() as *const T);
*self.first.borrow_mut() = chunk
}
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
unsafe {
// Determine how much was filled.
let start = self.first.borrow().as_ref().unwrap().start() as uint;
let end = self.ptr.get() as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
(**self.first.borrow_mut()).destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
#[allow(dead_code)]
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
#[allow(dead_code)]
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
} |
impl Arena {
/// Allocates a new Arena with 32 bytes preallocated.
pub fn new() -> Arena {
Arena::new_with_size(32u) | random_line_split |
lib.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
//!
//! This crate has two arenas implemented: `TypedArena`, which is a simpler
//! arena but can only hold objects of a single type, and `Arena`, which is a
//! more complex, slower arena which can hold objects of any type.
#![crate_name = "arena"]
#![experimental]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![feature(unsafe_destructor)]
#![allow(missing_docs)]
extern crate alloc;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
use std::mem;
use std::num::{Int, UnsignedInt};
use std::ptr;
use std::rc::Rc;
use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
data: Rc<RefCell<Vec<u8>>>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *const u8 {
self.data.borrow().as_ptr()
}
}
/// A slower reflection-based arena that can allocate objects of any type.
///
/// This arena uses `Vec<u8>` as a backing store to allocate objects from. For
/// each allocated object, the arena stores a pointer to the type descriptor
/// followed by the object (potentially with alignment padding after each
/// element). When the arena is destroyed, it iterates through all of its
/// chunks, and uses the tydesc information to trace through the objects,
/// calling the destructors on them. One subtle point that needs to be
/// addressed is how to handle panics while running the user provided
/// initializer function. It is important to not run the destructor on
/// uninitialized objects, but how to detect them is somewhat subtle. Since
/// `alloc()` can be invoked recursively, it is not sufficient to simply exclude
/// the most recent object. To solve this without requiring extra space, we
/// use the low order bit of the tydesc pointer to encode whether the object
/// it describes has been fully initialized.
///
/// As an optimization, objects with destructors are stored in different chunks
/// than objects without destructors. This reduces overhead when initializing
/// plain-old-data (`Copy` types) and means we don't need to waste time running
/// their destructors.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to access the
// head.
head: RefCell<Chunk>,
copy_head: RefCell<Chunk>,
chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
/// Allocates a new Arena with 32 bytes preallocated.
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
/// Allocates a new Arena with `initial_size` bytes preallocated.
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: RefCell::new(chunk(initial_size, false)),
copy_head: RefCell::new(chunk(initial_size, true)),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&*self.head.borrow());
for chunk in self.chunks.borrow().iter() {
if !chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(align - 1)).unwrap() & !(align - 1)
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *const uint = mem::transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*const TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *const i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::align_of::<*const TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a panic occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *const TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*const TyDesc, bool) {
((p & !1) as *const TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.borrow().capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&self, n_bytes: uint, align: uint) -> *const u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.borrow().clone());
*self.copy_head.borrow_mut() =
chunk((new_min_chunk_size + 1u).next_power_of_two(), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&self, n_bytes: uint, align: uint) -> *const u8 {
let start = round_up(self.copy_head.borrow().fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return self.alloc_copy_grow(n_bytes, align);
}
let copy_head = self.copy_head.borrow();
copy_head.fill.set(end);
unsafe {
copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<T>(&self, op: || -> T) -> &mut T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ptr = ptr as *mut T;
ptr::write(&mut (*ptr), op());
return &mut *ptr;
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&self, n_bytes: uint,
align: uint) -> (*const u8, *const u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.borrow().clone());
*self.head.borrow_mut() =
chunk((new_min_chunk_size + 1u).next_power_of_two(), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&self, n_bytes: uint,
align: uint) -> (*const u8, *const u8) {
// Be careful to not maintain any `head` borrows active, because
// `alloc_noncopy_grow` borrows it mutably.
let (start, end, tydesc_start, head_capacity) = {
let head = self.head.borrow();
let fill = head.fill.get();
let tydesc_start = fill;
let after_tydesc = fill + mem::size_of::<*const TyDesc>();
let start = round_up(after_tydesc, align);
let end = start + n_bytes;
(start, end, tydesc_start, head.capacity())
};
if end > head_capacity {
return self.alloc_noncopy_grow(n_bytes, align);
}
let head = self.head.borrow();
head.fill.set(round_up(end, mem::align_of::<*const TyDesc>()));
unsafe {
let buf = head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<T>(&self, op: || -> T) -> &mut T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(),
mem::min_align_of::<T>());
let ty_ptr = ty_ptr as *mut uint;
let ptr = ptr as *mut T;
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = mem::transmute(tydesc);
// Actually initialize it
ptr::write(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return &mut *ptr;
}
}
/// Allocates a new item in the arena, using `op` to initialize the value,
/// and returns a reference to it.
#[inline]
pub fn alloc<T>(&self, op: || -> T) -> &mut T {
unsafe {
if intrinsics::needs_drop::<T>() {
self.alloc_noncopy(op)
} else {
self.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| Rc::new(i));
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
fn test_arena_alloc_nested() {
struct Inner { value: uint }
struct Outer<'a> { inner: &'a Inner }
let arena = Arena::new();
let result = arena.alloc(|| Outer {
inner: arena.alloc(|| Inner { value: 10 })
});
assert_eq!(result.inner.value, 10);
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { Rc::new(i) });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, panic while allocating
arena.alloc::<Rc<int>>(|| {
panic!();
});
}
/// A faster arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
ptr: Cell<*const T>,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
end: Cell<*const T>,
/// A pointer to the first arena segment.
first: RefCell<*mut TypedArenaChunk<T>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
fn calculate_size<T>(capacity: uint) -> uint {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(capacity).unwrap();
size = size.checked_add(elems_size).unwrap();
size
}
impl<T> TypedArenaChunk<T> {
#[inline]
unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
-> *mut TypedArenaChunk<T> {
let size = calculate_size::<T>(capacity);
let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
as *mut TypedArenaChunk<T>;
if chunk.is_null() { alloc::oom() }
(*chunk).next = next;
(*chunk).capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
ptr::read(start as *const T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next = self.next;
let size = calculate_size::<T>(self.capacity);
deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
mem::min_align_of::<TypedArenaChunk<T>>());
if next.is_not_null() {
let capacity = (*next).capacity;
(*next).destroy(capacity);
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *const u8 {
let this: *const TypedArenaChunk<T> = self;
unsafe {
mem::transmute(round_up(this.offset(1) as uint,
mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *const u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new `TypedArena` with preallocated space for eight objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new `TypedArena` with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> |
/// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline]
pub fn alloc(&self, object: T) -> &mut T {
if self.ptr == self.end {
self.grow()
}
let ptr: &mut T = unsafe {
let ptr: &mut T = mem::transmute(self.ptr);
ptr::write(ptr, object);
self.ptr.set(self.ptr.get().offset(1));
ptr
};
ptr
}
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
unsafe {
let chunk = *self.first.borrow_mut();
let new_capacity = (*chunk).capacity.checked_mul(2).unwrap();
let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
self.ptr.set((*chunk).start() as *const T);
self.end.set((*chunk).end() as *const T);
*self.first.borrow_mut() = chunk
}
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
unsafe {
// Determine how much was filled.
let start = self.first.borrow().as_ref().unwrap().start() as uint;
let end = self.ptr.get() as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
(**self.first.borrow_mut()).destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::Bencher;
use super::{Arena, TypedArena};
#[allow(dead_code)]
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
#[allow(dead_code)]
struct Noncopy {
string: String,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0u, 100000) {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(b: &mut Bencher) {
let arena = TypedArena::new();
b.iter(|| {
arena.alloc(Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(b: &mut Bencher) {
b.iter(|| {
box Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(b: &mut Bencher) {
let arena = Arena::new();
b.iter(|| {
arena.alloc(|| Noncopy {
string: "hello world".to_string(),
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
| {
unsafe {
let chunk = TypedArenaChunk::<T>::new(ptr::null_mut(), capacity);
TypedArena {
ptr: Cell::new((*chunk).start() as *const T),
end: Cell::new((*chunk).end() as *const T),
first: RefCell::new(chunk),
}
}
} | identifier_body |
column_item.tsx | import React from 'react'
export default (props: any) => {
const column = props.column
return (
<div className="_20Cq3Rn7_0">
<div className="_2sej44xY_0">
<a data-seo="" href="//time.geekbang.org/course/intro/237">
<img src={column.column_cover} alt="" className="_1miPDP4s_0" />
<span className="_1wLiyUbR_0"></span>
</a>
</div>
<div className="_3M3E-ESU_0">
<div className="_3gQBs_6X_0">
<div className="_3G50nw0p_0">
<h2>{column.column_title}</h2>{' '}
<p>
{column.column_unit} <em>|</em> {column.sub_count}人已学习
</p>
</div>
<div className="_33lENDr7_0">
{column.author_name} {column.author_intro}
</div>
</div>
<div className="_14n6BJoa_0">
<ul>
{column.articles.map((article: any, index: number) => {
return (
<li key={article.id}>
<a | ) : (
''
)}
{'0' + index + ' | ' + article.article_title}
</a>
</li>
)
})}
</ul>
</div>
<div className="_2zRFFX7P_0">
<p className="_14cxbu2p_0">
<span className="_1BSc9YvC_0">
限时 ¥{column.column_price / 100}
</span>
{column.column_price_market ? (
<s className="_1EwQIhcU_0">
原价 ¥{column.column_price_market / 100}
</s>
) : (
''
)}
</p>
<div className="_1NLR_mQs_0">
<button className="_272_Yrle_0">立即订阅</button>
</div>
</div>
</div>
</div>
)
} | href=""
className={article.is_video_preview ? '_10vvBdC9_0' : ''}
>
{article.is_video_preview ? (
<span className="_ffA7FdL_0">免费</span> | random_line_split |
cpu_timing.rs | // This file is part of zinc64.
// Copyright (c) 2016-2019 Sebastian Jastrzebski. All rights reserved.
// Licensed under the GPLv3. See LICENSE file in the project root for full license text.
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use zinc64_core::{Addressable, Cpu, IoPort, IrqLine, Pin, Ram, TickFn};
use zinc64_emu::cpu::Cpu6510;
struct MockMemory {
ram: Ram,
}
impl MockMemory {
pub fn new(ram: Ram) -> Self {
MockMemory { ram }
}
}
impl Addressable for MockMemory {
fn read(&self, address: u16) -> u8 {
self.ram.read(address)
}
fn write(&mut self, address: u16, value: u8) {
self.ram.write(address, value);
}
}
fn setup_cpu() -> Cpu6510 {
let ba_line = Rc::new(RefCell::new(Pin::new_high()));
let cpu_io_port = Rc::new(RefCell::new(IoPort::new(0x00, 0xff)));
let cpu_irq = Rc::new(RefCell::new(IrqLine::new("irq")));
let cpu_nmi = Rc::new(RefCell::new(IrqLine::new("nmi")));
let mem = Rc::new(RefCell::new(MockMemory::new(Ram::new(0x10000))));
Cpu6510::new(mem, cpu_io_port, ba_line, cpu_irq, cpu_nmi)
}
// Based on 65xx Processor Data from http://www.romhacking.net/documents/318/
const OPCODE_TIMING: [u8; 256] = [
7, // 00 BRK #$ab
6, // 01 ORA ($ab,X)
0, // 02 HLT*
0, // 03 ASO* ($ab,X)
0, // 04 SKB* $ab
3, // 05 ORA $ab
5, // 06 ASL $ab
0, // 07 ASO* $ab
3, // 08 PHP
2, // 09 ORA #$ab
2, // 0A ASL A
0, // 0B ANC* #$ab
0, // 0C SKW* $abcd
4, // 0D ORA $abcd
6, // 0E ASL $abcd
0, // 0F ASO* $abcd
2, // 10 BPL nearlabel
5, // 11 ORA ($ab),Y
0, // 12 HLT*
0, // 13 ASO* ($ab),Y
0, // 14 SKB* $ab,X
4, // 15 ORA $ab,X
6, // 16 ASL $ab,X
0, // 17 ASO* $ab,X
2, // 18 CLC
4, // 19 ORA $abcd,Y
0, // 1A NOP*
0, // 1B ASO* $abcd,Y
0, // 1C SKW* $abcd,X
4, // 1D ORA $abcd,X
7, // 1E ASL $abcd,X
0, // 1F ASO* $abcd,X
6, // 20 JSR $abcd
6, // 21 AND ($ab,X)
0, // 22 HLT*
0, // 23 RLA* ($ab,X)
3, // 24 BIT $ab
3, // 25 AND $ab
5, // 26 ROL $ab
0, // 27 RLA* $ab
4, // 28 PLP
2, // 29 AND #$ab
2, // 2A ROL A
0, // 2B ANC* #$ab
4, // 2C BIT $abcd
4, // 2D AND $abcd
6, // 2E ROL $abcd
0, // 2F RLA* $abcd
2, // 30 BMI nearlabel
5, // 31 AND ($ab),Y
0, // 32 HLT*
0, // 33 RLA* ($ab),Y
0, // 34 SKB* $ab,X
4, // 35 AND $ab,X
6, // 36 ROL $ab,X
0, // 37 RLA* $ab,X
2, // 38 SEC
4, // 39 AND $abcd,Y
0, // 3A NOP*
0, // 3B RLA* $abcd,Y
0, // 3C SKW* $abcd,X
4, // 3D AND $abcd,X
7, // 3E ROL $abcd,X
0, // 3F RLA* $abcd,X
6, // 40 RTI
6, // 41 EOR ($ab,X)
0, // 42 HLT*
8, // 43 LSE* ($ab,X)
0, // 44 SKB* $ab
3, // 45 EOR $ab
5, // 46 LSR $ab
5, // 47 LSE* $ab
3, // 48 PHA
2, // 49 EOR #$ab
2, // 4A LSR A
2, // 4B ALR* #$ab
3, // 4C JMP $abcd
4, // 4D EOR $abcd
6, // 4E LSR $abcd
6, // 4F LSE* $abcd
2, // 50 BVC nearlabel
5, // 51 EOR ($ab),Y
0, // 52 HLT*
8, // 53 LSE* ($ab),Y
0, // 54 SKB* $ab,X
4, // 55 EOR $ab,X
6, // 56 LSR $ab,X
6, // 57 LSE* $ab,X
2, // 58 CLI
4, // 59 EOR $abcd,Y
0, // 5A NOP*
7, // 5B LSE* $abcd,Y
0, // 5C SKW* $abcd,X
4, // 5D EOR $abcd,X
7, // 5E LSR $abcd,X
7, // 5F LSE* $abcd,X
6, // 60 RTS
6, // 61 ADC ($ab,X)
0, // 62 HLT*
0, // 63 RRA* ($ab,X)
0, // 64 SKB* $ab
3, // 65 ADC $ab
5, // 66 ROR $ab
0, // 67 RRA* $ab
4, // 68 PLA
2, // 69 ADC #$ab
2, // 6A ROR A
0, // 6B ARR* #$ab
5, // 6C JMP ($abcd)
4, // 6D ADC $abcd
6, // 6E ROR $abcd
0, // 6F RRA* $abcd
2, // 70 BVS nearlabel
5, // 71 ADC ($ab),Y
0, // 72 HLT*
0, // 73 RRA* ($ab),Y
0, // 74 SKB* $ab,X
4, // 75 ADC $ab,X
6, // 76 ROR $ab,X
0, // 77 RRA* $ab,X
2, // 78 SEI
4, // 79 ADC $abcd,Y
0, // 7A NOP*
0, // 7B RRA* $abcd,Y
0, // 7C SKW* $abcd,X
4, // 7D ADC $abcd,X
7, // 7E ROR $abcd,X
0, // 7F RRA* $abcd,X
0, // 80 SKB* #$ab
6, // 81 STA ($ab,X)
0, // 82 SKB* #$ab
0, // 83 SAX* ($ab,X)
3, // 84 STY $ab
3, // 85 STA $ab
3, // 86 STX $ab
0, // 87 SAX* $ab
2, // 88 DEY
0, // 89 SKB* #$ab
2, // 8A TXA
2, // 8B ANE* #$ab
4, // 8C STY $abcd
4, // 8D STA $abcd
4, // 8E STX $abcd
0, // 8F SAX* $abcd
2, // 90 BCC nearlabel
6, // 91 STA ($ab),Y
0, // 92 HLT*
0, // 93 SHA* ($ab),Y
4, // 94 STY $ab,X
4, // 95 STA $ab,X
4, // 96 STX $ab,Y
0, // 97 SAX* $ab,Y
2, // 98 TYA
5, // 99 STA $abcd,Y
2, // 9A TXS
0, // 9B SHS* $abcd,Y
0, // 9C SHY* $abcd,X
5, // 9D STA $abcd,X
0, // 9E SHX* $abcd,Y
0, // 9F SHA* $abcd,Y
2, // A0 LDY #$ab
6, // A1 LDA ($ab,X)
2, // A2 LDX #$ab
6, // A3 LAX* ($ab,X)
3, // A4 LDY $ab
3, // A5 LDA $ab
3, // A6 LDX $ab
3, // A7 LAX* $ab
2, // A8 TAY
2, // A9 LDA #$ab
2, // AA TAX
2, // AB ANX* #$ab
4, // AC LDY $abcd
4, // AD LDA $abcd
4, // AE LDX $abcd
4, // AF LAX* $abcd
2, // B0 BCS nearlabel
5, // B1 LDA ($ab),Y
0, // B2 HLT*
5, // B3 LAX* ($ab),Y
4, // B4 LDY $ab,X
4, // B5 LDA $ab,X
4, // B6 LDX $ab,Y
4, // B7 LAX* $ab,Y
2, // B8 CLV
4, // B9 LDA $abcd,Y
2, // BA TSX
0, // BB LAS* $abcd,Y
4, // BC LDY $abcd,X
4, // BD LDA $abcd,X
4, // BE LDX $abcd,Y
4, // BF LAX* $abcd,Y
2, // C0 CPY #$ab
6, // C1 CMP ($ab,X)
0, // C2 SKB* #$ab
0, // C3 DCM* ($ab,X)
3, // C4 CPY $ab
3, // C5 CMP $ab
5, // C6 DEC $ab
0, // C7 DCM* $ab
2, // C8 INY
2, // C9 CMP #$ab
2, // CA DEX
2, // CB SBX* #$ab
4, // CC CPY $abcd
4, // CD CMP $abcd
6, // CE DEC $abcd
0, // CF DCM* $abcd
2, // D0 BNE nearlabel
5, // D1 CMP ($ab),Y
0, // D2 HLT*
0, // D3 DCM* ($ab),Y
0, // D4 SKB* $ab,X
4, // D5 CMP $ab,X
6, // D6 DEC $ab,X
0, // D7 DCM* $ab,X
2, // D8 CLD
4, // D9 CMP $abcd,Y
0, // DA NOP*
0, // DB DCM* $abcd,Y
0, // DC SKW* $abcd,X
4, // DD CMP $abcd,X
7, // DE DEC $abcd,X
0, // DF DCM* $abcd,X
2, // E0 CPX #$ab
6, // E1 SBC ($ab,X)
0, // E2 SKB* #$ab
0, // E3 INS* ($ab,X)
3, // E4 CPX $ab
3, // E5 SBC $ab
5, // E6 INC $ab
0, // E7 INS* $ab
2, // E8 INX
2, // E9 SBC #$ab
2, // EA NOP
0, // EB SBC* #$ab
4, // EC CPX $abcd
4, // ED SBC $abcd
6, // EE INC $abcd
0, // EF INS* $abcd
2, // F0 BEQ nearlabel
5, // F1 SBC ($ab),Y
0, // F2 HLT*
0, // F3 INS* ($ab),Y
0, // F4 SKB* $ab,X
4, // F5 SBC $ab,X
6, // F6 INC $ab,X
0, // F7 INS* $ab,X
2, // F8 SED
4, // F9 SBC $abcd,Y
0, // FA NOP*
0, // FB INS* $abcd,Y
0, // FC SKW* $abcd,X
4, // FD SBC $abcd,X
7, // FE INC $abcd,X
0, // FF INS* $abcd,X
];
#[test]
fn opcode_timing() {
let mut cpu = setup_cpu();
for opcode in 0..256 {
let cycles = OPCODE_TIMING[opcode];
if cycles > 0 |
}
}
| {
let clock = Rc::new(Cell::new(0u8));
let clock_clone = clock.clone();
let tick_fn: TickFn = Rc::new(move || {
clock_clone.set(clock_clone.get().wrapping_add(1));
});
cpu.write(0x1000, opcode as u8);
cpu.write(0x1001, 0x00);
cpu.write(0x1002, 0x10);
cpu.set_pc(0x1000);
cpu.step(&tick_fn);
assert_eq!(
cycles,
clock.get(),
"opcode {:02x} timing failed",
opcode as u8
);
} | conditional_block |
cpu_timing.rs | // This file is part of zinc64.
// Copyright (c) 2016-2019 Sebastian Jastrzebski. All rights reserved.
// Licensed under the GPLv3. See LICENSE file in the project root for full license text.
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use zinc64_core::{Addressable, Cpu, IoPort, IrqLine, Pin, Ram, TickFn};
use zinc64_emu::cpu::Cpu6510;
struct MockMemory {
ram: Ram,
}
impl MockMemory {
pub fn | (ram: Ram) -> Self {
MockMemory { ram }
}
}
impl Addressable for MockMemory {
fn read(&self, address: u16) -> u8 {
self.ram.read(address)
}
fn write(&mut self, address: u16, value: u8) {
self.ram.write(address, value);
}
}
fn setup_cpu() -> Cpu6510 {
let ba_line = Rc::new(RefCell::new(Pin::new_high()));
let cpu_io_port = Rc::new(RefCell::new(IoPort::new(0x00, 0xff)));
let cpu_irq = Rc::new(RefCell::new(IrqLine::new("irq")));
let cpu_nmi = Rc::new(RefCell::new(IrqLine::new("nmi")));
let mem = Rc::new(RefCell::new(MockMemory::new(Ram::new(0x10000))));
Cpu6510::new(mem, cpu_io_port, ba_line, cpu_irq, cpu_nmi)
}
// Based on 65xx Processor Data from http://www.romhacking.net/documents/318/
const OPCODE_TIMING: [u8; 256] = [
7, // 00 BRK #$ab
6, // 01 ORA ($ab,X)
0, // 02 HLT*
0, // 03 ASO* ($ab,X)
0, // 04 SKB* $ab
3, // 05 ORA $ab
5, // 06 ASL $ab
0, // 07 ASO* $ab
3, // 08 PHP
2, // 09 ORA #$ab
2, // 0A ASL A
0, // 0B ANC* #$ab
0, // 0C SKW* $abcd
4, // 0D ORA $abcd
6, // 0E ASL $abcd
0, // 0F ASO* $abcd
2, // 10 BPL nearlabel
5, // 11 ORA ($ab),Y
0, // 12 HLT*
0, // 13 ASO* ($ab),Y
0, // 14 SKB* $ab,X
4, // 15 ORA $ab,X
6, // 16 ASL $ab,X
0, // 17 ASO* $ab,X
2, // 18 CLC
4, // 19 ORA $abcd,Y
0, // 1A NOP*
0, // 1B ASO* $abcd,Y
0, // 1C SKW* $abcd,X
4, // 1D ORA $abcd,X
7, // 1E ASL $abcd,X
0, // 1F ASO* $abcd,X
6, // 20 JSR $abcd
6, // 21 AND ($ab,X)
0, // 22 HLT*
0, // 23 RLA* ($ab,X)
3, // 24 BIT $ab
3, // 25 AND $ab
5, // 26 ROL $ab
0, // 27 RLA* $ab
4, // 28 PLP
2, // 29 AND #$ab
2, // 2A ROL A
0, // 2B ANC* #$ab
4, // 2C BIT $abcd
4, // 2D AND $abcd
6, // 2E ROL $abcd
0, // 2F RLA* $abcd
2, // 30 BMI nearlabel
5, // 31 AND ($ab),Y
0, // 32 HLT*
0, // 33 RLA* ($ab),Y
0, // 34 SKB* $ab,X
4, // 35 AND $ab,X
6, // 36 ROL $ab,X
0, // 37 RLA* $ab,X
2, // 38 SEC
4, // 39 AND $abcd,Y
0, // 3A NOP*
0, // 3B RLA* $abcd,Y
0, // 3C SKW* $abcd,X
4, // 3D AND $abcd,X
7, // 3E ROL $abcd,X
0, // 3F RLA* $abcd,X
6, // 40 RTI
6, // 41 EOR ($ab,X)
0, // 42 HLT*
8, // 43 LSE* ($ab,X)
0, // 44 SKB* $ab
3, // 45 EOR $ab
5, // 46 LSR $ab
5, // 47 LSE* $ab
3, // 48 PHA
2, // 49 EOR #$ab
2, // 4A LSR A
2, // 4B ALR* #$ab
3, // 4C JMP $abcd
4, // 4D EOR $abcd
6, // 4E LSR $abcd
6, // 4F LSE* $abcd
2, // 50 BVC nearlabel
5, // 51 EOR ($ab),Y
0, // 52 HLT*
8, // 53 LSE* ($ab),Y
0, // 54 SKB* $ab,X
4, // 55 EOR $ab,X
6, // 56 LSR $ab,X
6, // 57 LSE* $ab,X
2, // 58 CLI
4, // 59 EOR $abcd,Y
0, // 5A NOP*
7, // 5B LSE* $abcd,Y
0, // 5C SKW* $abcd,X
4, // 5D EOR $abcd,X
7, // 5E LSR $abcd,X
7, // 5F LSE* $abcd,X
6, // 60 RTS
6, // 61 ADC ($ab,X)
0, // 62 HLT*
0, // 63 RRA* ($ab,X)
0, // 64 SKB* $ab
3, // 65 ADC $ab
5, // 66 ROR $ab
0, // 67 RRA* $ab
4, // 68 PLA
2, // 69 ADC #$ab
2, // 6A ROR A
0, // 6B ARR* #$ab
5, // 6C JMP ($abcd)
4, // 6D ADC $abcd
6, // 6E ROR $abcd
0, // 6F RRA* $abcd
2, // 70 BVS nearlabel
5, // 71 ADC ($ab),Y
0, // 72 HLT*
0, // 73 RRA* ($ab),Y
0, // 74 SKB* $ab,X
4, // 75 ADC $ab,X
6, // 76 ROR $ab,X
0, // 77 RRA* $ab,X
2, // 78 SEI
4, // 79 ADC $abcd,Y
0, // 7A NOP*
0, // 7B RRA* $abcd,Y
0, // 7C SKW* $abcd,X
4, // 7D ADC $abcd,X
7, // 7E ROR $abcd,X
0, // 7F RRA* $abcd,X
0, // 80 SKB* #$ab
6, // 81 STA ($ab,X)
0, // 82 SKB* #$ab
0, // 83 SAX* ($ab,X)
3, // 84 STY $ab
3, // 85 STA $ab
3, // 86 STX $ab
0, // 87 SAX* $ab
2, // 88 DEY
0, // 89 SKB* #$ab
2, // 8A TXA
2, // 8B ANE* #$ab
4, // 8C STY $abcd
4, // 8D STA $abcd
4, // 8E STX $abcd
0, // 8F SAX* $abcd
2, // 90 BCC nearlabel
6, // 91 STA ($ab),Y
0, // 92 HLT*
0, // 93 SHA* ($ab),Y
4, // 94 STY $ab,X
4, // 95 STA $ab,X
4, // 96 STX $ab,Y
0, // 97 SAX* $ab,Y
2, // 98 TYA
5, // 99 STA $abcd,Y
2, // 9A TXS
0, // 9B SHS* $abcd,Y
0, // 9C SHY* $abcd,X
5, // 9D STA $abcd,X
0, // 9E SHX* $abcd,Y
0, // 9F SHA* $abcd,Y
2, // A0 LDY #$ab
6, // A1 LDA ($ab,X)
2, // A2 LDX #$ab
6, // A3 LAX* ($ab,X)
3, // A4 LDY $ab
3, // A5 LDA $ab
3, // A6 LDX $ab
3, // A7 LAX* $ab
2, // A8 TAY
2, // A9 LDA #$ab
2, // AA TAX
2, // AB ANX* #$ab
4, // AC LDY $abcd
4, // AD LDA $abcd
4, // AE LDX $abcd
4, // AF LAX* $abcd
2, // B0 BCS nearlabel
5, // B1 LDA ($ab),Y
0, // B2 HLT*
5, // B3 LAX* ($ab),Y
4, // B4 LDY $ab,X
4, // B5 LDA $ab,X
4, // B6 LDX $ab,Y
4, // B7 LAX* $ab,Y
2, // B8 CLV
4, // B9 LDA $abcd,Y
2, // BA TSX
0, // BB LAS* $abcd,Y
4, // BC LDY $abcd,X
4, // BD LDA $abcd,X
4, // BE LDX $abcd,Y
4, // BF LAX* $abcd,Y
2, // C0 CPY #$ab
6, // C1 CMP ($ab,X)
0, // C2 SKB* #$ab
0, // C3 DCM* ($ab,X)
3, // C4 CPY $ab
3, // C5 CMP $ab
5, // C6 DEC $ab
0, // C7 DCM* $ab
2, // C8 INY
2, // C9 CMP #$ab
2, // CA DEX
2, // CB SBX* #$ab
4, // CC CPY $abcd
4, // CD CMP $abcd
6, // CE DEC $abcd
0, // CF DCM* $abcd
2, // D0 BNE nearlabel
5, // D1 CMP ($ab),Y
0, // D2 HLT*
0, // D3 DCM* ($ab),Y
0, // D4 SKB* $ab,X
4, // D5 CMP $ab,X
6, // D6 DEC $ab,X
0, // D7 DCM* $ab,X
2, // D8 CLD
4, // D9 CMP $abcd,Y
0, // DA NOP*
0, // DB DCM* $abcd,Y
0, // DC SKW* $abcd,X
4, // DD CMP $abcd,X
7, // DE DEC $abcd,X
0, // DF DCM* $abcd,X
2, // E0 CPX #$ab
6, // E1 SBC ($ab,X)
0, // E2 SKB* #$ab
0, // E3 INS* ($ab,X)
3, // E4 CPX $ab
3, // E5 SBC $ab
5, // E6 INC $ab
0, // E7 INS* $ab
2, // E8 INX
2, // E9 SBC #$ab
2, // EA NOP
0, // EB SBC* #$ab
4, // EC CPX $abcd
4, // ED SBC $abcd
6, // EE INC $abcd
0, // EF INS* $abcd
2, // F0 BEQ nearlabel
5, // F1 SBC ($ab),Y
0, // F2 HLT*
0, // F3 INS* ($ab),Y
0, // F4 SKB* $ab,X
4, // F5 SBC $ab,X
6, // F6 INC $ab,X
0, // F7 INS* $ab,X
2, // F8 SED
4, // F9 SBC $abcd,Y
0, // FA NOP*
0, // FB INS* $abcd,Y
0, // FC SKW* $abcd,X
4, // FD SBC $abcd,X
7, // FE INC $abcd,X
0, // FF INS* $abcd,X
];
#[test]
fn opcode_timing() {
let mut cpu = setup_cpu();
for opcode in 0..256 {
let cycles = OPCODE_TIMING[opcode];
if cycles > 0 {
let clock = Rc::new(Cell::new(0u8));
let clock_clone = clock.clone();
let tick_fn: TickFn = Rc::new(move || {
clock_clone.set(clock_clone.get().wrapping_add(1));
});
cpu.write(0x1000, opcode as u8);
cpu.write(0x1001, 0x00);
cpu.write(0x1002, 0x10);
cpu.set_pc(0x1000);
cpu.step(&tick_fn);
assert_eq!(
cycles,
clock.get(),
"opcode {:02x} timing failed",
opcode as u8
);
}
}
}
| new | identifier_name |
cpu_timing.rs | // This file is part of zinc64.
// Copyright (c) 2016-2019 Sebastian Jastrzebski. All rights reserved.
// Licensed under the GPLv3. See LICENSE file in the project root for full license text.
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use zinc64_core::{Addressable, Cpu, IoPort, IrqLine, Pin, Ram, TickFn};
use zinc64_emu::cpu::Cpu6510;
struct MockMemory {
ram: Ram,
}
impl MockMemory {
pub fn new(ram: Ram) -> Self {
MockMemory { ram }
}
}
impl Addressable for MockMemory {
fn read(&self, address: u16) -> u8 {
self.ram.read(address)
}
fn write(&mut self, address: u16, value: u8) {
self.ram.write(address, value);
}
}
fn setup_cpu() -> Cpu6510 {
let ba_line = Rc::new(RefCell::new(Pin::new_high()));
let cpu_io_port = Rc::new(RefCell::new(IoPort::new(0x00, 0xff)));
let cpu_irq = Rc::new(RefCell::new(IrqLine::new("irq")));
let cpu_nmi = Rc::new(RefCell::new(IrqLine::new("nmi")));
let mem = Rc::new(RefCell::new(MockMemory::new(Ram::new(0x10000))));
Cpu6510::new(mem, cpu_io_port, ba_line, cpu_irq, cpu_nmi)
}
// Based on 65xx Processor Data from http://www.romhacking.net/documents/318/
const OPCODE_TIMING: [u8; 256] = [
7, // 00 BRK #$ab
6, // 01 ORA ($ab,X)
0, // 02 HLT*
0, // 03 ASO* ($ab,X)
0, // 04 SKB* $ab
3, // 05 ORA $ab
5, // 06 ASL $ab
0, // 07 ASO* $ab
3, // 08 PHP
2, // 09 ORA #$ab
2, // 0A ASL A
0, // 0B ANC* #$ab
0, // 0C SKW* $abcd
4, // 0D ORA $abcd
6, // 0E ASL $abcd
0, // 0F ASO* $abcd
2, // 10 BPL nearlabel
5, // 11 ORA ($ab),Y
0, // 12 HLT*
0, // 13 ASO* ($ab),Y
0, // 14 SKB* $ab,X
4, // 15 ORA $ab,X
6, // 16 ASL $ab,X
0, // 17 ASO* $ab,X
2, // 18 CLC
4, // 19 ORA $abcd,Y
0, // 1A NOP*
0, // 1B ASO* $abcd,Y
0, // 1C SKW* $abcd,X
4, // 1D ORA $abcd,X
7, // 1E ASL $abcd,X
0, // 1F ASO* $abcd,X
6, // 20 JSR $abcd
6, // 21 AND ($ab,X)
0, // 22 HLT*
0, // 23 RLA* ($ab,X)
3, // 24 BIT $ab
3, // 25 AND $ab
5, // 26 ROL $ab
0, // 27 RLA* $ab
4, // 28 PLP
2, // 29 AND #$ab
2, // 2A ROL A
0, // 2B ANC* #$ab
4, // 2C BIT $abcd
4, // 2D AND $abcd
6, // 2E ROL $abcd
0, // 2F RLA* $abcd
2, // 30 BMI nearlabel
5, // 31 AND ($ab),Y
0, // 32 HLT*
0, // 33 RLA* ($ab),Y
0, // 34 SKB* $ab,X
4, // 35 AND $ab,X
6, // 36 ROL $ab,X
0, // 37 RLA* $ab,X
2, // 38 SEC
4, // 39 AND $abcd,Y
0, // 3A NOP*
0, // 3B RLA* $abcd,Y
0, // 3C SKW* $abcd,X
4, // 3D AND $abcd,X
7, // 3E ROL $abcd,X
0, // 3F RLA* $abcd,X
6, // 40 RTI
6, // 41 EOR ($ab,X)
0, // 42 HLT*
8, // 43 LSE* ($ab,X)
0, // 44 SKB* $ab
3, // 45 EOR $ab
5, // 46 LSR $ab
5, // 47 LSE* $ab
3, // 48 PHA
2, // 49 EOR #$ab
2, // 4A LSR A
2, // 4B ALR* #$ab
3, // 4C JMP $abcd
4, // 4D EOR $abcd
6, // 4E LSR $abcd
6, // 4F LSE* $abcd
2, // 50 BVC nearlabel
5, // 51 EOR ($ab),Y
0, // 52 HLT*
8, // 53 LSE* ($ab),Y
0, // 54 SKB* $ab,X
4, // 55 EOR $ab,X
6, // 56 LSR $ab,X
6, // 57 LSE* $ab,X
2, // 58 CLI
4, // 59 EOR $abcd,Y
0, // 5A NOP*
7, // 5B LSE* $abcd,Y
0, // 5C SKW* $abcd,X
4, // 5D EOR $abcd,X
7, // 5E LSR $abcd,X
7, // 5F LSE* $abcd,X
6, // 60 RTS
6, // 61 ADC ($ab,X)
0, // 62 HLT*
0, // 63 RRA* ($ab,X)
0, // 64 SKB* $ab
3, // 65 ADC $ab
5, // 66 ROR $ab
0, // 67 RRA* $ab
4, // 68 PLA
2, // 69 ADC #$ab
2, // 6A ROR A
0, // 6B ARR* #$ab
5, // 6C JMP ($abcd)
4, // 6D ADC $abcd
6, // 6E ROR $abcd
0, // 6F RRA* $abcd
2, // 70 BVS nearlabel
5, // 71 ADC ($ab),Y
0, // 72 HLT*
0, // 73 RRA* ($ab),Y
0, // 74 SKB* $ab,X
4, // 75 ADC $ab,X
6, // 76 ROR $ab,X
0, // 77 RRA* $ab,X
2, // 78 SEI
4, // 79 ADC $abcd,Y
0, // 7A NOP*
0, // 7B RRA* $abcd,Y
0, // 7C SKW* $abcd,X
4, // 7D ADC $abcd,X
7, // 7E ROR $abcd,X
0, // 7F RRA* $abcd,X
0, // 80 SKB* #$ab
6, // 81 STA ($ab,X)
0, // 82 SKB* #$ab
0, // 83 SAX* ($ab,X)
3, // 84 STY $ab
3, // 85 STA $ab
3, // 86 STX $ab
0, // 87 SAX* $ab
2, // 88 DEY
0, // 89 SKB* #$ab
2, // 8A TXA
2, // 8B ANE* #$ab
4, // 8C STY $abcd
4, // 8D STA $abcd
4, // 8E STX $abcd
0, // 8F SAX* $abcd
2, // 90 BCC nearlabel
6, // 91 STA ($ab),Y
0, // 92 HLT*
0, // 93 SHA* ($ab),Y
4, // 94 STY $ab,X
4, // 95 STA $ab,X
4, // 96 STX $ab,Y
0, // 97 SAX* $ab,Y
2, // 98 TYA
5, // 99 STA $abcd,Y
2, // 9A TXS
0, // 9B SHS* $abcd,Y
0, // 9C SHY* $abcd,X
5, // 9D STA $abcd,X
0, // 9E SHX* $abcd,Y
0, // 9F SHA* $abcd,Y
2, // A0 LDY #$ab
6, // A1 LDA ($ab,X)
2, // A2 LDX #$ab
6, // A3 LAX* ($ab,X)
3, // A4 LDY $ab
3, // A5 LDA $ab
3, // A6 LDX $ab
3, // A7 LAX* $ab
2, // A8 TAY
2, // A9 LDA #$ab
2, // AA TAX
2, // AB ANX* #$ab
4, // AC LDY $abcd
4, // AD LDA $abcd
4, // AE LDX $abcd
4, // AF LAX* $abcd
2, // B0 BCS nearlabel
5, // B1 LDA ($ab),Y
0, // B2 HLT*
5, // B3 LAX* ($ab),Y
4, // B4 LDY $ab,X
4, // B5 LDA $ab,X
4, // B6 LDX $ab,Y
4, // B7 LAX* $ab,Y
2, // B8 CLV
4, // B9 LDA $abcd,Y
2, // BA TSX
0, // BB LAS* $abcd,Y | 4, // BE LDX $abcd,Y
4, // BF LAX* $abcd,Y
2, // C0 CPY #$ab
6, // C1 CMP ($ab,X)
0, // C2 SKB* #$ab
0, // C3 DCM* ($ab,X)
3, // C4 CPY $ab
3, // C5 CMP $ab
5, // C6 DEC $ab
0, // C7 DCM* $ab
2, // C8 INY
2, // C9 CMP #$ab
2, // CA DEX
2, // CB SBX* #$ab
4, // CC CPY $abcd
4, // CD CMP $abcd
6, // CE DEC $abcd
0, // CF DCM* $abcd
2, // D0 BNE nearlabel
5, // D1 CMP ($ab),Y
0, // D2 HLT*
0, // D3 DCM* ($ab),Y
0, // D4 SKB* $ab,X
4, // D5 CMP $ab,X
6, // D6 DEC $ab,X
0, // D7 DCM* $ab,X
2, // D8 CLD
4, // D9 CMP $abcd,Y
0, // DA NOP*
0, // DB DCM* $abcd,Y
0, // DC SKW* $abcd,X
4, // DD CMP $abcd,X
7, // DE DEC $abcd,X
0, // DF DCM* $abcd,X
2, // E0 CPX #$ab
6, // E1 SBC ($ab,X)
0, // E2 SKB* #$ab
0, // E3 INS* ($ab,X)
3, // E4 CPX $ab
3, // E5 SBC $ab
5, // E6 INC $ab
0, // E7 INS* $ab
2, // E8 INX
2, // E9 SBC #$ab
2, // EA NOP
0, // EB SBC* #$ab
4, // EC CPX $abcd
4, // ED SBC $abcd
6, // EE INC $abcd
0, // EF INS* $abcd
2, // F0 BEQ nearlabel
5, // F1 SBC ($ab),Y
0, // F2 HLT*
0, // F3 INS* ($ab),Y
0, // F4 SKB* $ab,X
4, // F5 SBC $ab,X
6, // F6 INC $ab,X
0, // F7 INS* $ab,X
2, // F8 SED
4, // F9 SBC $abcd,Y
0, // FA NOP*
0, // FB INS* $abcd,Y
0, // FC SKW* $abcd,X
4, // FD SBC $abcd,X
7, // FE INC $abcd,X
0, // FF INS* $abcd,X
];
#[test]
fn opcode_timing() {
let mut cpu = setup_cpu();
for opcode in 0..256 {
let cycles = OPCODE_TIMING[opcode];
if cycles > 0 {
let clock = Rc::new(Cell::new(0u8));
let clock_clone = clock.clone();
let tick_fn: TickFn = Rc::new(move || {
clock_clone.set(clock_clone.get().wrapping_add(1));
});
cpu.write(0x1000, opcode as u8);
cpu.write(0x1001, 0x00);
cpu.write(0x1002, 0x10);
cpu.set_pc(0x1000);
cpu.step(&tick_fn);
assert_eq!(
cycles,
clock.get(),
"opcode {:02x} timing failed",
opcode as u8
);
}
}
} | 4, // BC LDY $abcd,X
4, // BD LDA $abcd,X | random_line_split |
cpu_timing.rs | // This file is part of zinc64.
// Copyright (c) 2016-2019 Sebastian Jastrzebski. All rights reserved.
// Licensed under the GPLv3. See LICENSE file in the project root for full license text.
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use zinc64_core::{Addressable, Cpu, IoPort, IrqLine, Pin, Ram, TickFn};
use zinc64_emu::cpu::Cpu6510;
struct MockMemory {
ram: Ram,
}
impl MockMemory {
pub fn new(ram: Ram) -> Self {
MockMemory { ram }
}
}
impl Addressable for MockMemory {
fn read(&self, address: u16) -> u8 |
fn write(&mut self, address: u16, value: u8) {
self.ram.write(address, value);
}
}
fn setup_cpu() -> Cpu6510 {
let ba_line = Rc::new(RefCell::new(Pin::new_high()));
let cpu_io_port = Rc::new(RefCell::new(IoPort::new(0x00, 0xff)));
let cpu_irq = Rc::new(RefCell::new(IrqLine::new("irq")));
let cpu_nmi = Rc::new(RefCell::new(IrqLine::new("nmi")));
let mem = Rc::new(RefCell::new(MockMemory::new(Ram::new(0x10000))));
Cpu6510::new(mem, cpu_io_port, ba_line, cpu_irq, cpu_nmi)
}
// Based on 65xx Processor Data from http://www.romhacking.net/documents/318/
const OPCODE_TIMING: [u8; 256] = [
7, // 00 BRK #$ab
6, // 01 ORA ($ab,X)
0, // 02 HLT*
0, // 03 ASO* ($ab,X)
0, // 04 SKB* $ab
3, // 05 ORA $ab
5, // 06 ASL $ab
0, // 07 ASO* $ab
3, // 08 PHP
2, // 09 ORA #$ab
2, // 0A ASL A
0, // 0B ANC* #$ab
0, // 0C SKW* $abcd
4, // 0D ORA $abcd
6, // 0E ASL $abcd
0, // 0F ASO* $abcd
2, // 10 BPL nearlabel
5, // 11 ORA ($ab),Y
0, // 12 HLT*
0, // 13 ASO* ($ab),Y
0, // 14 SKB* $ab,X
4, // 15 ORA $ab,X
6, // 16 ASL $ab,X
0, // 17 ASO* $ab,X
2, // 18 CLC
4, // 19 ORA $abcd,Y
0, // 1A NOP*
0, // 1B ASO* $abcd,Y
0, // 1C SKW* $abcd,X
4, // 1D ORA $abcd,X
7, // 1E ASL $abcd,X
0, // 1F ASO* $abcd,X
6, // 20 JSR $abcd
6, // 21 AND ($ab,X)
0, // 22 HLT*
0, // 23 RLA* ($ab,X)
3, // 24 BIT $ab
3, // 25 AND $ab
5, // 26 ROL $ab
0, // 27 RLA* $ab
4, // 28 PLP
2, // 29 AND #$ab
2, // 2A ROL A
0, // 2B ANC* #$ab
4, // 2C BIT $abcd
4, // 2D AND $abcd
6, // 2E ROL $abcd
0, // 2F RLA* $abcd
2, // 30 BMI nearlabel
5, // 31 AND ($ab),Y
0, // 32 HLT*
0, // 33 RLA* ($ab),Y
0, // 34 SKB* $ab,X
4, // 35 AND $ab,X
6, // 36 ROL $ab,X
0, // 37 RLA* $ab,X
2, // 38 SEC
4, // 39 AND $abcd,Y
0, // 3A NOP*
0, // 3B RLA* $abcd,Y
0, // 3C SKW* $abcd,X
4, // 3D AND $abcd,X
7, // 3E ROL $abcd,X
0, // 3F RLA* $abcd,X
6, // 40 RTI
6, // 41 EOR ($ab,X)
0, // 42 HLT*
8, // 43 LSE* ($ab,X)
0, // 44 SKB* $ab
3, // 45 EOR $ab
5, // 46 LSR $ab
5, // 47 LSE* $ab
3, // 48 PHA
2, // 49 EOR #$ab
2, // 4A LSR A
2, // 4B ALR* #$ab
3, // 4C JMP $abcd
4, // 4D EOR $abcd
6, // 4E LSR $abcd
6, // 4F LSE* $abcd
2, // 50 BVC nearlabel
5, // 51 EOR ($ab),Y
0, // 52 HLT*
8, // 53 LSE* ($ab),Y
0, // 54 SKB* $ab,X
4, // 55 EOR $ab,X
6, // 56 LSR $ab,X
6, // 57 LSE* $ab,X
2, // 58 CLI
4, // 59 EOR $abcd,Y
0, // 5A NOP*
7, // 5B LSE* $abcd,Y
0, // 5C SKW* $abcd,X
4, // 5D EOR $abcd,X
7, // 5E LSR $abcd,X
7, // 5F LSE* $abcd,X
6, // 60 RTS
6, // 61 ADC ($ab,X)
0, // 62 HLT*
0, // 63 RRA* ($ab,X)
0, // 64 SKB* $ab
3, // 65 ADC $ab
5, // 66 ROR $ab
0, // 67 RRA* $ab
4, // 68 PLA
2, // 69 ADC #$ab
2, // 6A ROR A
0, // 6B ARR* #$ab
5, // 6C JMP ($abcd)
4, // 6D ADC $abcd
6, // 6E ROR $abcd
0, // 6F RRA* $abcd
2, // 70 BVS nearlabel
5, // 71 ADC ($ab),Y
0, // 72 HLT*
0, // 73 RRA* ($ab),Y
0, // 74 SKB* $ab,X
4, // 75 ADC $ab,X
6, // 76 ROR $ab,X
0, // 77 RRA* $ab,X
2, // 78 SEI
4, // 79 ADC $abcd,Y
0, // 7A NOP*
0, // 7B RRA* $abcd,Y
0, // 7C SKW* $abcd,X
4, // 7D ADC $abcd,X
7, // 7E ROR $abcd,X
0, // 7F RRA* $abcd,X
0, // 80 SKB* #$ab
6, // 81 STA ($ab,X)
0, // 82 SKB* #$ab
0, // 83 SAX* ($ab,X)
3, // 84 STY $ab
3, // 85 STA $ab
3, // 86 STX $ab
0, // 87 SAX* $ab
2, // 88 DEY
0, // 89 SKB* #$ab
2, // 8A TXA
2, // 8B ANE* #$ab
4, // 8C STY $abcd
4, // 8D STA $abcd
4, // 8E STX $abcd
0, // 8F SAX* $abcd
2, // 90 BCC nearlabel
6, // 91 STA ($ab),Y
0, // 92 HLT*
0, // 93 SHA* ($ab),Y
4, // 94 STY $ab,X
4, // 95 STA $ab,X
4, // 96 STX $ab,Y
0, // 97 SAX* $ab,Y
2, // 98 TYA
5, // 99 STA $abcd,Y
2, // 9A TXS
0, // 9B SHS* $abcd,Y
0, // 9C SHY* $abcd,X
5, // 9D STA $abcd,X
0, // 9E SHX* $abcd,Y
0, // 9F SHA* $abcd,Y
2, // A0 LDY #$ab
6, // A1 LDA ($ab,X)
2, // A2 LDX #$ab
6, // A3 LAX* ($ab,X)
3, // A4 LDY $ab
3, // A5 LDA $ab
3, // A6 LDX $ab
3, // A7 LAX* $ab
2, // A8 TAY
2, // A9 LDA #$ab
2, // AA TAX
2, // AB ANX* #$ab
4, // AC LDY $abcd
4, // AD LDA $abcd
4, // AE LDX $abcd
4, // AF LAX* $abcd
2, // B0 BCS nearlabel
5, // B1 LDA ($ab),Y
0, // B2 HLT*
5, // B3 LAX* ($ab),Y
4, // B4 LDY $ab,X
4, // B5 LDA $ab,X
4, // B6 LDX $ab,Y
4, // B7 LAX* $ab,Y
2, // B8 CLV
4, // B9 LDA $abcd,Y
2, // BA TSX
0, // BB LAS* $abcd,Y
4, // BC LDY $abcd,X
4, // BD LDA $abcd,X
4, // BE LDX $abcd,Y
4, // BF LAX* $abcd,Y
2, // C0 CPY #$ab
6, // C1 CMP ($ab,X)
0, // C2 SKB* #$ab
0, // C3 DCM* ($ab,X)
3, // C4 CPY $ab
3, // C5 CMP $ab
5, // C6 DEC $ab
0, // C7 DCM* $ab
2, // C8 INY
2, // C9 CMP #$ab
2, // CA DEX
2, // CB SBX* #$ab
4, // CC CPY $abcd
4, // CD CMP $abcd
6, // CE DEC $abcd
0, // CF DCM* $abcd
2, // D0 BNE nearlabel
5, // D1 CMP ($ab),Y
0, // D2 HLT*
0, // D3 DCM* ($ab),Y
0, // D4 SKB* $ab,X
4, // D5 CMP $ab,X
6, // D6 DEC $ab,X
0, // D7 DCM* $ab,X
2, // D8 CLD
4, // D9 CMP $abcd,Y
0, // DA NOP*
0, // DB DCM* $abcd,Y
0, // DC SKW* $abcd,X
4, // DD CMP $abcd,X
7, // DE DEC $abcd,X
0, // DF DCM* $abcd,X
2, // E0 CPX #$ab
6, // E1 SBC ($ab,X)
0, // E2 SKB* #$ab
0, // E3 INS* ($ab,X)
3, // E4 CPX $ab
3, // E5 SBC $ab
5, // E6 INC $ab
0, // E7 INS* $ab
2, // E8 INX
2, // E9 SBC #$ab
2, // EA NOP
0, // EB SBC* #$ab
4, // EC CPX $abcd
4, // ED SBC $abcd
6, // EE INC $abcd
0, // EF INS* $abcd
2, // F0 BEQ nearlabel
5, // F1 SBC ($ab),Y
0, // F2 HLT*
0, // F3 INS* ($ab),Y
0, // F4 SKB* $ab,X
4, // F5 SBC $ab,X
6, // F6 INC $ab,X
0, // F7 INS* $ab,X
2, // F8 SED
4, // F9 SBC $abcd,Y
0, // FA NOP*
0, // FB INS* $abcd,Y
0, // FC SKW* $abcd,X
4, // FD SBC $abcd,X
7, // FE INC $abcd,X
0, // FF INS* $abcd,X
];
#[test]
fn opcode_timing() {
let mut cpu = setup_cpu();
for opcode in 0..256 {
let cycles = OPCODE_TIMING[opcode];
if cycles > 0 {
let clock = Rc::new(Cell::new(0u8));
let clock_clone = clock.clone();
let tick_fn: TickFn = Rc::new(move || {
clock_clone.set(clock_clone.get().wrapping_add(1));
});
cpu.write(0x1000, opcode as u8);
cpu.write(0x1001, 0x00);
cpu.write(0x1002, 0x10);
cpu.set_pc(0x1000);
cpu.step(&tick_fn);
assert_eq!(
cycles,
clock.get(),
"opcode {:02x} timing failed",
opcode as u8
);
}
}
}
| {
self.ram.read(address)
} | identifier_body |
models.py | # Django
from django.contrib.auth.models import User
from common.utils import get_sentinel_user
from toggleproperties.models import ToggleProperty
try:
# Django < 1.10
from django.contrib.contenttypes.generic import GenericForeignKey
except ImportError:
# Django >= 1.10
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
class NestedComment(models.Model):
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type',
'object_id',
)
author = models.ForeignKey(
User,
related_name="comments",
on_delete=models.SET(get_sentinel_user),
editable=False,
)
text = models.TextField()
created = models.DateTimeField(
auto_now_add=True,
editable=False,
)
updated = models.DateTimeField(
auto_now=True,
editable=False,
)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
related_name='children',
on_delete=models.SET_NULL,
)
deleted = models.BooleanField(
default=False,
)
pending_moderation = models.NullBooleanField()
moderator = models.ForeignKey(
User,
related_name="moderated_comments",
null=True,
default=None,
on_delete=models.SET_NULL
)
@property
def depth(self):
value = 1
if self.parent:
return value + self.parent.depth
return value
| property_type='like'
).values_list('user__pk', flat=True)
def __str__(self):
return "Comment %d" % self.pk
def get_absolute_url(self):
object_url = self.content_type.get_object_for_this_type(id=self.object_id).get_absolute_url()
return '%s#c%d' % (object_url, self.id)
def delete(self, *args, **kwargs):
if not self.deleted:
self.deleted = True
self.save()
def clean(self, *args, **kwargs):
obj = self.content_type.get_object_for_this_type(pk=self.object_id)
if hasattr(obj, 'allow_comments') and obj.allow_comments is False:
raise ValidationError('Comments are closed')
class Meta:
app_label = 'nested_comments' | @property
def likes(self):
return ToggleProperty.objects.filter(
object_id=self.pk,
content_type=ContentType.objects.get_for_model(NestedComment), | random_line_split |
models.py | # Django
from django.contrib.auth.models import User
from common.utils import get_sentinel_user
from toggleproperties.models import ToggleProperty
try:
# Django < 1.10
from django.contrib.contenttypes.generic import GenericForeignKey
except ImportError:
# Django >= 1.10
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
class NestedComment(models.Model):
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type',
'object_id',
)
author = models.ForeignKey(
User,
related_name="comments",
on_delete=models.SET(get_sentinel_user),
editable=False,
)
text = models.TextField()
created = models.DateTimeField(
auto_now_add=True,
editable=False,
)
updated = models.DateTimeField(
auto_now=True,
editable=False,
)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
related_name='children',
on_delete=models.SET_NULL,
)
deleted = models.BooleanField(
default=False,
)
pending_moderation = models.NullBooleanField()
moderator = models.ForeignKey(
User,
related_name="moderated_comments",
null=True,
default=None,
on_delete=models.SET_NULL
)
@property
def depth(self):
value = 1
if self.parent:
return value + self.parent.depth
return value
@property
def likes(self):
return ToggleProperty.objects.filter(
object_id=self.pk,
content_type=ContentType.objects.get_for_model(NestedComment),
property_type='like'
).values_list('user__pk', flat=True)
def __str__(self):
return "Comment %d" % self.pk
def get_absolute_url(self):
object_url = self.content_type.get_object_for_this_type(id=self.object_id).get_absolute_url()
return '%s#c%d' % (object_url, self.id)
def delete(self, *args, **kwargs):
if not self.deleted:
self.deleted = True
self.save()
def clean(self, *args, **kwargs):
obj = self.content_type.get_object_for_this_type(pk=self.object_id)
if hasattr(obj, 'allow_comments') and obj.allow_comments is False:
|
class Meta:
app_label = 'nested_comments'
| raise ValidationError('Comments are closed') | conditional_block |
models.py | # Django
from django.contrib.auth.models import User
from common.utils import get_sentinel_user
from toggleproperties.models import ToggleProperty
try:
# Django < 1.10
from django.contrib.contenttypes.generic import GenericForeignKey
except ImportError:
# Django >= 1.10
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
class NestedComment(models.Model):
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type',
'object_id',
)
author = models.ForeignKey(
User,
related_name="comments",
on_delete=models.SET(get_sentinel_user),
editable=False,
)
text = models.TextField()
created = models.DateTimeField(
auto_now_add=True,
editable=False,
)
updated = models.DateTimeField(
auto_now=True,
editable=False,
)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
related_name='children',
on_delete=models.SET_NULL,
)
deleted = models.BooleanField(
default=False,
)
pending_moderation = models.NullBooleanField()
moderator = models.ForeignKey(
User,
related_name="moderated_comments",
null=True,
default=None,
on_delete=models.SET_NULL
)
@property
def depth(self):
value = 1
if self.parent:
return value + self.parent.depth
return value
@property
def likes(self):
return ToggleProperty.objects.filter(
object_id=self.pk,
content_type=ContentType.objects.get_for_model(NestedComment),
property_type='like'
).values_list('user__pk', flat=True)
def __str__(self):
return "Comment %d" % self.pk
def get_absolute_url(self):
object_url = self.content_type.get_object_for_this_type(id=self.object_id).get_absolute_url()
return '%s#c%d' % (object_url, self.id)
def delete(self, *args, **kwargs):
if not self.deleted:
self.deleted = True
self.save()
def clean(self, *args, **kwargs):
obj = self.content_type.get_object_for_this_type(pk=self.object_id)
if hasattr(obj, 'allow_comments') and obj.allow_comments is False:
raise ValidationError('Comments are closed')
class | :
app_label = 'nested_comments'
| Meta | identifier_name |
models.py | # Django
from django.contrib.auth.models import User
from common.utils import get_sentinel_user
from toggleproperties.models import ToggleProperty
try:
# Django < 1.10
from django.contrib.contenttypes.generic import GenericForeignKey
except ImportError:
# Django >= 1.10
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
class NestedComment(models.Model):
| content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type',
'object_id',
)
author = models.ForeignKey(
User,
related_name="comments",
on_delete=models.SET(get_sentinel_user),
editable=False,
)
text = models.TextField()
created = models.DateTimeField(
auto_now_add=True,
editable=False,
)
updated = models.DateTimeField(
auto_now=True,
editable=False,
)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
related_name='children',
on_delete=models.SET_NULL,
)
deleted = models.BooleanField(
default=False,
)
pending_moderation = models.NullBooleanField()
moderator = models.ForeignKey(
User,
related_name="moderated_comments",
null=True,
default=None,
on_delete=models.SET_NULL
)
@property
def depth(self):
value = 1
if self.parent:
return value + self.parent.depth
return value
@property
def likes(self):
return ToggleProperty.objects.filter(
object_id=self.pk,
content_type=ContentType.objects.get_for_model(NestedComment),
property_type='like'
).values_list('user__pk', flat=True)
def __str__(self):
return "Comment %d" % self.pk
def get_absolute_url(self):
object_url = self.content_type.get_object_for_this_type(id=self.object_id).get_absolute_url()
return '%s#c%d' % (object_url, self.id)
def delete(self, *args, **kwargs):
if not self.deleted:
self.deleted = True
self.save()
def clean(self, *args, **kwargs):
obj = self.content_type.get_object_for_this_type(pk=self.object_id)
if hasattr(obj, 'allow_comments') and obj.allow_comments is False:
raise ValidationError('Comments are closed')
class Meta:
app_label = 'nested_comments' | identifier_body |
|
Logo.tsx | import React from 'react'
import SvgIcon, { Props as SvgIconProps } from './SvgIcon'
export const logoAspect = 90 / 103.9 | export default class Logo extends React.Component<SvgIconProps> {
render() {
return (
<SvgIcon viewBox='0 0 90 103.9' {...this.props}>
<path opacity='.6' fill='#009741' d='M45 34.6l15 8.7V26L30 8.7 0 26v34.6l30 17.3 30-17.3-15 8.7-15-8.7V43.3z'/>
<path opacity='.6' fill='#dedc0a' d='M30 60.6V43.3l15-8.7L30 26 0 43.3v34.6l30 17.4 30-17.4V60.6l-15 8.7z'/>
<path opacity='.6' fill='#f39208' d='M45 34.6l15 8.7v17.3l-15 8.7-15-8.7V43.3l15-8.7L15 52v34.6l30 17.3 30-17.3V52z'/>
<path opacity='.6' fill='#e72174' d='M60 26l-15 8.6 15 8.7v17.3l-15 8.7-15-8.7v17.3l30 17.4 30-17.4V43.3z'/>
<path opacity='.6' fill='#253887' d='M60 8.7L30 26v17.3l15-8.7 15 8.7v17.3l-15 8.7-15-8.7 30 17.3 30-17.3V26z'/>
<path opacity='.6' fill='#3ba9e0' d='M45 0L15 17.3V52l15 8.6V43.3l15-8.7 15 8.7v17.3L75 52V17.3z'/>
</SvgIcon>
)
}
} | random_line_split |
|
Logo.tsx | import React from 'react'
import SvgIcon, { Props as SvgIconProps } from './SvgIcon'
export const logoAspect = 90 / 103.9
export default class Logo extends React.Component<SvgIconProps> {
| () {
return (
<SvgIcon viewBox='0 0 90 103.9' {...this.props}>
<path opacity='.6' fill='#009741' d='M45 34.6l15 8.7V26L30 8.7 0 26v34.6l30 17.3 30-17.3-15 8.7-15-8.7V43.3z'/>
<path opacity='.6' fill='#dedc0a' d='M30 60.6V43.3l15-8.7L30 26 0 43.3v34.6l30 17.4 30-17.4V60.6l-15 8.7z'/>
<path opacity='.6' fill='#f39208' d='M45 34.6l15 8.7v17.3l-15 8.7-15-8.7V43.3l15-8.7L15 52v34.6l30 17.3 30-17.3V52z'/>
<path opacity='.6' fill='#e72174' d='M60 26l-15 8.6 15 8.7v17.3l-15 8.7-15-8.7v17.3l30 17.4 30-17.4V43.3z'/>
<path opacity='.6' fill='#253887' d='M60 8.7L30 26v17.3l15-8.7 15 8.7v17.3l-15 8.7-15-8.7 30 17.3 30-17.3V26z'/>
<path opacity='.6' fill='#3ba9e0' d='M45 0L15 17.3V52l15 8.6V43.3l15-8.7 15 8.7v17.3L75 52V17.3z'/>
</SvgIcon>
)
}
}
| render | identifier_name |
info.py | # -*- coding: utf-8 -*-
##############################################################################
# 2011 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
from Plugins.Extensions.OpenWebif.__init__ import _
from Components.About import about
from Components.config import config
from Components.NimManager import nimmanager
from Components.Harddisk import harddiskmanager
from Components.Network import iNetwork
from Components.Language import language
from RecordTimer import parseEvent
from Screens.Standby import inStandby
from timer import TimerEntry
from Tools.Directories import fileExists, pathExists
from time import time, localtime, strftime
from enigma import eDVBVolumecontrol, eServiceCenter, eServiceReference, eEnv
from twisted.web import version
from socket import has_ipv6, AF_INET6, AF_INET, inet_ntop, inet_pton, getaddrinfo
try:
from boxbranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
from enigma import getEnigmaVersionString
except:
from owibranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
def getEnigmaVersionString():
return about.getEnigmaVersionString()
import NavigationInstance
import os
import sys
import time
import string
OPENWEBIFVER = "OWIF 1.0.2"
STATICBOXINFO = None
def getOpenWebifVer():
return OPENWEBIFVER
def getFriendlyImageDistro():
dist = getImageDistro().replace("openatv","OpenATV").replace("openhdf","OpenHDF")
return dist
def getIPMethod(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipmethod = _("SLAAC")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet6" in line and iface in line:
if "static" in line:
ipmethod = _("static")
if "dhcp" in line:
ipmethod = _("DHCP")
if "manual" in line:
ipmethod = _("manual/disabled")
if "6to4" in line:
ipmethod = "6to4"
return ipmethod
def getIPv4Method(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipv4method = _("static")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet " in line and iface in line:
if "static" in line:
ipv4method = _("static")
if "dhcp" in line:
ipv4method = _("DHCP")
if "manual" in line:
ipv4method = _("manual/disabled")
return ipv4method
def getLinkSpeed(iface):
speed = _("unknown")
try:
speed = os.popen('ethtool ' + iface + ' | grep Speed: | awk \'{ print $2 }\'').read().strip()
except:
pass
speed = str(speed)
speed = speed.replace("Mb/s"," MBit/s")
speed = speed.replace("10000 MBit/s","10 GBit/s")
speed = speed.replace("1000 MBit/s","1 GBit/s")
return speed
def getNICChipSet(iface):
nic = _("unknown")
try:
nic = os.popen('ethtool -i ' + iface + ' | grep driver: | awk \'{ print $2 }\'').read().strip()
except:
pass
nic = str(nic)
return nic
def getFriendlyNICChipSet(iface):
friendlynic = getNICChipSet(iface)
friendlynic = friendlynic.replace("bcmgenet", "Broadcom Generic Gigabit Ethernet")
return friendlynic
def normalize_ipv6(orig):
net = []
if '/' in orig:
net = orig.split('/')
if net[1] == "128":
del net[1]
else:
net.append(orig)
addr = net[0]
addr = inet_ntop(AF_INET6, inet_pton(AF_INET6, addr))
if len(net) == 2:
addr += "/" + net[1]
return (addr)
def getAdapterIPv6(ifname):
addr = _("IPv4-only kernel")
firstpublic = None
if fileExists('/proc/net/if_inet6'):
addr = _("IPv4-only Python/Twisted")
if has_ipv6 and version.major >= 12:
proc = '/proc/net/if_inet6'
tempaddrs = []
for line in file(proc).readlines():
if line.startswith('fe80'):
continue
tmpaddr = ""
tmp = line.split()
if ifname == tmp[5]:
tmpaddr = ":".join([ tmp[0][i:i+4] for i in range(0,len(tmp[0]),4) ])
if firstpublic is None and (tmpaddr.startswith('2') or tmpaddr.startswith('3')):
firstpublic = normalize_ipv6(tmpaddr)
if tmp[2].lower() != "ff":
tmpaddr = "%s/%s" % (tmpaddr, int(tmp[2].lower(), 16))
tmpaddr = normalize_ipv6(tmpaddr)
tempaddrs.append(tmpaddr)
if len(tempaddrs) > 1:
tempaddrs.sort()
addr = ', '.join(tempaddrs)
elif len(tempaddrs) == 1:
addr = tempaddrs[0]
elif len(tempaddrs) == 0:
addr = _("none/IPv4-only network")
return {'addr':addr, 'firstpublic':firstpublic }
def formatIp(ip):
if ip is None or len(ip) != 4:
return "0.0.0.0"
return "%d.%d.%d.%d" % (ip[0], ip[1], ip[2], ip[3])
def getBasePath():
path = os.path.dirname(sys.modules[__name__].__file__)
chunks = path.split("/")
chunks.pop()
chunks.pop()
return "/".join(chunks)
def getPublicPath(file = ""):
return getBasePath() + "/public/" + file
def getViewsPath(file = ""):
return getBasePath() + "/controllers/views/" + file
def getPiconPath():
if pathExists("/media/usb/picon/"):
return "/media/usb/picon/"
elif pathExists("/media/cf/picon/"):
return "/media/cf/picon/"
elif pathExists("/media/hdd/picon/"):
return "/media/hdd/picon/"
elif pathExists("/usr/share/enigma2/picon/"):
return "/usr/share/enigma2/picon/"
elif pathExists("/picon/"):
return "/picon/"
else:
return ""
def getInfo(session = None):
# TODO: get webif versione somewhere!
info = {}
info['brand'] = getMachineBrand()
info['model'] = getMachineName()
info['boxtype'] = getBoxType()
info['machinebuild'] = getMachineBuild()
chipset = "unknown"
if fileExists("/etc/.box"):
f = open("/etc/.box",'r')
model = f.readline().strip().lower()
f.close()
if model.startswith("ufs") or model.startswith("ufc"):
if model in ("ufs910", "ufs922", "ufc960"):
chipset = "SH4 @266MHz"
else:
chipset = "SH4 @450MHz"
elif model in ("topf", "tf7700hdpvr"):
chipset = "SH4 @266MHz"
elif model.startswith("azbox"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "SH4 @540MHz"
else:
chipset = "SH4 @450MHz"
elif fileExists("/proc/stb/info/azmodel"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif fileExists("/proc/stb/info/model"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "tf7700hdpvr":
chipset = "SH4 @266MHz"
elif model == "nbox":
chipset = "STi7100 @266MHz"
elif model == "arivalink200":
chipset = "STi7109 @266MHz"
elif model in ("adb2850", "adb2849", "dsi87"):
chipset = "STi7111 @450MHz"
elif model in ("sagemcom88", "esi88"):
chipset = "STi7105 @450MHz"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "STi7162 @540MHz"
else:
chipset = "STi7111 @450MHz"
if fileExists("/proc/stb/info/chipset"):
f = open("/proc/stb/info/chipset",'r')
chipset = f.readline().strip()
f.close()
info['chipset'] = chipset
memFree = 0
for line in open("/proc/meminfo",'r'):
parts = line.split(':')
key = parts[0].strip()
if key == "MemTotal":
info['mem1'] = parts[1].strip().replace("kB", _("kB"))
elif key in ("MemFree", "Buffers", "Cached"):
memFree += int(parts[1].strip().split(' ',1)[0])
info['mem2'] = "%s %s" % (memFree,_("kB"))
info['mem3'] = _("%s free / %s total") % (info['mem2'],info['mem1'])
try:
f = open("/proc/uptime", "rb")
uptime = int(float(f.readline().split(' ', 2)[0].strip()))
f.close()
uptimetext = ''
if uptime > 86400:
d = uptime/86400
uptime = uptime % 86400
uptimetext += '%dd ' % d
uptimetext += "%d:%.2d" % (uptime/3600, (uptime%3600)/60)
except:
uptimetext = "?"
info['uptime'] = uptimetext
info["webifver"] = getOpenWebifVer()
info['imagedistro'] = getImageDistro()
info['friendlyimagedistro'] = getFriendlyImageDistro()
info['oever'] = getOEVersion()
info['imagever'] = getImageVersion() + '.' + getImageBuild()
info['enigmaver'] = getEnigmaVersionString()
info['driverdate'] = getDriverDate()
info['kernelver'] = about.getKernelVersionString()
try:
from Tools.StbHardware import getFPVersion
except ImportError:
from Tools.DreamboxHardware import getFPVersion
try:
info['fp_version'] = getFPVersion()
except:
info['fp_version'] = None
friendlychipsetdescription = _("Chipset")
friendlychipsettext = info['chipset'].replace("bcm","Broadcom ")
if friendlychipsettext in ("7335", "7356", "7362", "73625", "7424", "7425", "7429"):
friendlychipsettext = "Broadcom " + friendlychipsettext
if not (info['fp_version'] is None or info['fp_version'] == 0):
friendlychipsetdescription = friendlychipsetdescription + " (" + _("Frontprocessor Version") + ")"
friendlychipsettext = friendlychipsettext + " (" + str(info['fp_version']) + ")"
info['friendlychipsetdescription'] = friendlychipsetdescription
info['friendlychipsettext'] = friendlychipsettext
info['tuners'] = []
for i in range(0, nimmanager.getSlotCount()):
info['tuners'].append({
"name": nimmanager.getNim(i).getSlotName(),
"type": nimmanager.getNimName(i) + " (" + nimmanager.getNim(i).getFriendlyType() + ")",
"rec": "",
"live": ""
})
info['ifaces'] = []
ifaces = iNetwork.getConfiguredAdapters()
for iface in ifaces:
|
info['hdd'] = []
for hdd in harddiskmanager.hdd:
dev = hdd.findMount()
if dev:
stat = os.statvfs(dev)
free = int((stat.f_bfree/1024) * (stat.f_bsize/1024))
else:
free = -1
if free <= 1024:
free = "%i %s" % (free,_("MB"))
else:
free = free / 1024.
free = "%.1f %s" % (free,_("GB"))
size = hdd.diskSize() * 1000000 / 1048576.
if size > 1048576:
size = "%.1f %s" % ((size / 1048576.),_("TB"))
elif size > 1024:
size = "%.1f %s" % ((size / 1024.),_("GB"))
else:
size = "%d %s" % (size,_("MB"))
iecsize = hdd.diskSize()
# Harddisks > 1000 decimal Gigabytes are labelled in TB
if iecsize > 1000000:
iecsize = (iecsize + 50000) // float(100000) / 10
# Omit decimal fraction if it is 0
if (iecsize % 1 > 0):
iecsize = "%.1f %s" % (iecsize,_("TB"))
else:
iecsize = "%d %s" % (iecsize,_("TB"))
# Round harddisk sizes beyond ~300GB to full tens: 320, 500, 640, 750GB
elif iecsize > 300000:
iecsize = "%d %s" % (((iecsize + 5000) // 10000 * 10),_("GB"))
# ... be more precise for media < ~300GB (Sticks, SSDs, CF, MMC, ...): 1, 2, 4, 8, 16 ... 256GB
elif iecsize > 1000:
iecsize = "%d %s" % (((iecsize + 500) // 1000),_("GB"))
else:
iecsize = "%d %s" % (iecsize,_("MB"))
info['hdd'].append({
"model": hdd.model(),
"capacity": size,
"labelled_capacity": iecsize,
"free": free,
"mount": dev,
"friendlycapacity": _("%s free / %s total") % (free,size+' ("'+iecsize+'")')
})
info['shares'] = []
if fileExists('/etc/auto.network'):
autofs = '/etc/auto.network'
method = "autofs"
for line in file(autofs).readlines():
if not line.startswith('#'):
# Replace escaped spaces that can appear inside credentials with underscores
# Not elegant but we wouldn't want to expose credentials on the OWIF anyways
tmpline = line.replace("\ ","_")
tmp = tmpline.split()
if not len(tmp) == 3:
continue
name = tmp[0].strip()
type = "unknown"
if "cifs" in tmp[1]:
# Linux still defaults to SMBv1
type = "SMBv1.0"
settings = tmp[1].split(",")
for setting in settings:
if setting.startswith("vers="):
type = setting.replace("vers=", "SMBv")
elif "nfs" in tmp[1]:
type = "NFS"
# Default is r/w
mode = _("r/w")
settings = tmp[1].split(",")
for setting in settings:
if setting == "ro":
mode = _("r/o")
uri = tmp[2]
parts = []
parts = tmp[2].split(':')
if parts[0] is "":
server = uri.split('/')[2]
uri = uri.strip()[1:]
else:
server = parts[0]
ipaddress = None
if server:
# Will fail on literal IPs
try:
# Try IPv6 first, as will Linux
if has_ipv6:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET6)
if tmpaddress:
ipaddress = "[" + list(tmpaddress)[0][4][0] + "]"
# Use IPv4 if IPv6 fails or is not present
if ipaddress is None:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET)
if tmpaddress:
ipaddress = list(tmpaddress)[0][4][0]
except:
pass
friendlyaddress = server
if ipaddress is not None and not ipaddress == server:
friendlyaddress = server + " ("+ ipaddress + ")"
info['shares'].append({
"name": name,
"method": method,
"type": type,
"mode": mode,
"path": uri,
"host": server,
"ipaddress": ipaddress,
"friendlyaddress": friendlyaddress
})
# TODO: fstab
info['transcoding'] = False
if (info['model'] in ("Solo4K", "Solo²", "Duo²", "Solo SE", "Quad", "Quad Plus") or info['machinebuild'] in ('inihdp', 'hd2400', 'et10000', 'xpeedlx3', 'ew7356', 'dags3', 'dags4')):
if os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/TransCodingSetup/plugin.pyo')) or os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/TranscodingSetup/plugin.pyo')) or os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/MultiTransCodingSetup/plugin.pyo')):
info['transcoding'] = True
info['kinopoisk'] = False
lang = ['ru', 'uk', 'lv', 'lt', 'et']
for l in lang:
if l in language.getLanguage():
info['kinopoisk'] = True
info['EX'] = ''
if session:
try:
recs = NavigationInstance.instance.getRecordings()
if recs:
# only one stream and only TV
from Plugins.Extensions.OpenWebif.controllers.stream import streamList
s_name = ''
s_cip = ''
if len(streamList)==1:
from Screens.ChannelSelection import service_types_tv
from enigma import eEPGCache
epgcache = eEPGCache.getInstance()
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(eServiceReference('%s ORDER BY name'%(service_types_tv)))
channels = services and services.getContent("SN", True)
s = streamList[0]
srefs = s.ref.toString()
for channel in channels:
if srefs == channel[0]:
s_name = channel[1] + ' (' + s.clientIP + ')'
break
sname = ''
timers = []
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers.append(timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', ''))
# only one recording
if len(timers) == 1:
sname = timers[0]
if sname == '' and s_name != '':
sname = s_name
for rec in recs:
feinfo = rec.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
cur_info = feinfo.getTransponderData(True)
nr = frontendData['tuner_number']
info['tuners'][nr]['rec'] = getOrbitalText(cur_info) + ' / ' + sname
service = session.nav.getCurrentService()
if service is not None:
sname = service.info().getName()
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
cur_info = feinfo.getTransponderData(True)
if cur_info:
nr = frontendData['tuner_number']
info['tuners'][nr]['live'] = getOrbitalText(cur_info) + ' / ' + sname
except Exception, error:
info['EX'] = error
global STATICBOXINFO
STATICBOXINFO = info
return info
def getOrbitalText(cur_info):
if cur_info:
tunerType = cur_info.get('tuner_type')
if tunerType == "DVB-S":
pos = int(cur_info.get('orbital_position'))
direction = 'E'
if pos > 1800:
pos = 3600 - pos
direction = 'W'
return "%d.%d° %s" % (pos/10, pos%10, direction)
return tunerType
return ''
def getFrontendStatus(session):
inf = {}
inf['tunertype'] = ""
inf['tunernumber'] = ""
inf['snr'] = ""
inf['snr_db'] = ""
inf['agc'] = ""
inf['ber'] = ""
service = session.nav.getCurrentService()
if service is None:
return inf
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
if frontendData is not None:
inf['tunertype'] = frontendData.get("tuner_type", "UNKNOWN")
inf['tunernumber'] = frontendData.get("tuner_number")
frontendStatus = feinfo and feinfo.getFrontendStatus()
if frontendStatus is not None:
percent = frontendStatus.get("tuner_signal_quality")
if percent is not None:
inf['snr'] = int(percent * 100 / 65536)
inf['snr_db'] = inf['snr']
percent = frontendStatus.get("tuner_signal_quality_db")
if percent is not None:
inf['snr_db'] = "%3.02f" % (percent / 100.0)
percent = frontendStatus.get("tuner_signal_power")
if percent is not None:
inf['agc'] = int(percent * 100 / 65536)
percent = frontendStatus.get("tuner_bit_error_rate")
if percent is not None:
inf['ber'] = int(percent * 100 / 65536)
return inf
def getCurrentTime():
t = time.localtime()
return {
"status": True,
"time": "%2d:%02d:%02d" % (t.tm_hour, t.tm_min, t.tm_sec)
}
def getTranscodingSupport():
global STATICBOXINFO
if STATICBOXINFO is None:
getInfo()
return STATICBOXINFO['transcoding']
def getLanguage():
global STATICBOXINFO
if STATICBOXINFO is None:
getInfo()
return STATICBOXINFO['kinopoisk']
def getStatusInfo(self):
statusinfo = {}
# Get Current Volume and Mute Status
vcontrol = eDVBVolumecontrol.getInstance()
statusinfo['volume'] = vcontrol.getVolume()
statusinfo['muted'] = vcontrol.isMuted()
statusinfo['transcoding'] = getTranscodingSupport()
# Get currently running Service
event = None
serviceref = self.session.nav.getCurrentlyPlayingServiceReference()
if serviceref is not None:
serviceHandler = eServiceCenter.getInstance()
serviceHandlerInfo = serviceHandler.info(serviceref)
service = self.session.nav.getCurrentService()
serviceinfo = service and service.info()
event = serviceinfo and serviceinfo.getEvent(0)
else:
event = None
statusinfo['currservice_filename'] = ""
if event is not None:
curEvent = parseEvent(event)
statusinfo['currservice_name'] = curEvent[2].replace('\xc2\x86', '').replace('\xc2\x87', '')
statusinfo['currservice_serviceref'] = serviceref.toString()
statusinfo['currservice_begin'] = strftime("%H:%M", (localtime(int(curEvent[0])+(config.recording.margin_before.value*60))))
statusinfo['currservice_end'] = strftime("%H:%M", (localtime(int(curEvent[1])-(config.recording.margin_after.value*60))))
statusinfo['currservice_description'] = curEvent[3]
if len(curEvent[3].decode('utf-8')) > 220:
statusinfo['currservice_description'] = curEvent[3].decode('utf-8')[0:220].encode('utf-8') + "..."
statusinfo['currservice_station'] = serviceHandlerInfo.getName(serviceref).replace('\xc2\x86', '').replace('\xc2\x87', '')
if statusinfo['currservice_serviceref'].startswith('1:0:0'):
statusinfo['currservice_filename'] = '/' + '/'.join(serviceref.toString().split("/")[1:])
full_desc = statusinfo['currservice_name'] + '\n'
full_desc += statusinfo['currservice_begin'] + " - " + statusinfo['currservice_end'] + '\n\n'
full_desc += event.getExtendedDescription().replace('\xc2\x86', '').replace('\xc2\x87', '').replace('\xc2\x8a', '\n')
statusinfo['currservice_fulldescription'] = full_desc
else:
statusinfo['currservice_name'] = "N/A"
statusinfo['currservice_begin'] = ""
statusinfo['currservice_end'] = ""
statusinfo['currservice_description'] = ""
statusinfo['currservice_fulldescription'] = "N/A"
if serviceref:
statusinfo['currservice_serviceref'] = serviceref.toString()
if serviceHandlerInfo:
statusinfo['currservice_station'] = serviceHandlerInfo.getName(serviceref).replace('\xc2\x86', '').replace('\xc2\x87', '')
elif serviceref.toString().find("http") != -1:
statusinfo['currservice_station'] = serviceref.toString().replace('%3a', ':')[serviceref.toString().find("http"):]
else:
statusinfo['currservice_station'] = "N/A"
# Get Standby State
from Screens.Standby import inStandby
if inStandby == None:
statusinfo['inStandby'] = "false"
else:
statusinfo['inStandby'] = "true"
# Get recording state
recs = NavigationInstance.instance.getRecordings()
if recs:
statusinfo['isRecording'] = "true"
statusinfo['Recording_list'] = "\n"
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.state == TimerEntry.StateRunning:
if not timer.justplay:
statusinfo['Recording_list'] += timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '') + ": " + timer.name + "\n"
else:
statusinfo['isRecording'] = "false"
return statusinfo
def getAlternativeChannels(service):
alternativeServices = eServiceCenter.getInstance().list(eServiceReference(service))
return alternativeServices and alternativeServices.getContent("S", True)
def GetWithAlternative(service,onlyFirst = True):
if service.startswith('1:134:'):
channels = getAlternativeChannels(service)
if channels:
if onlyFirst:
return channels[0]
else:
return channels
if onlyFirst:
return service
else:
return None
| info['ifaces'].append({
"name": iNetwork.getAdapterName(iface),
"friendlynic": getFriendlyNICChipSet(iface),
"linkspeed": getLinkSpeed(iface),
"mac": iNetwork.getAdapterAttribute(iface, "mac"),
"dhcp": iNetwork.getAdapterAttribute(iface, "dhcp"),
"ipv4method": getIPv4Method(iface),
"ip": formatIp(iNetwork.getAdapterAttribute(iface, "ip")),
"mask": formatIp(iNetwork.getAdapterAttribute(iface, "netmask")),
"v4prefix": sum([bin(int(x)).count('1') for x in formatIp(iNetwork.getAdapterAttribute(iface, "netmask")).split('.')]),
"gw": formatIp(iNetwork.getAdapterAttribute(iface, "gateway")),
"ipv6": getAdapterIPv6(iface)['addr'],
"ipmethod": getIPMethod(iface),
"firstpublic": getAdapterIPv6(iface)['firstpublic']
}) | conditional_block |
info.py | # -*- coding: utf-8 -*-
##############################################################################
# 2011 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
from Plugins.Extensions.OpenWebif.__init__ import _
from Components.About import about
from Components.config import config
from Components.NimManager import nimmanager
from Components.Harddisk import harddiskmanager
from Components.Network import iNetwork
from Components.Language import language
from RecordTimer import parseEvent
from Screens.Standby import inStandby
from timer import TimerEntry
from Tools.Directories import fileExists, pathExists
from time import time, localtime, strftime
from enigma import eDVBVolumecontrol, eServiceCenter, eServiceReference, eEnv
from twisted.web import version
from socket import has_ipv6, AF_INET6, AF_INET, inet_ntop, inet_pton, getaddrinfo
try:
from boxbranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
from enigma import getEnigmaVersionString
except:
from owibranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
def getEnigmaVersionString():
return about.getEnigmaVersionString()
import NavigationInstance
import os
import sys
import time
import string
OPENWEBIFVER = "OWIF 1.0.2"
STATICBOXINFO = None
def getOpenWebifVer():
return OPENWEBIFVER
def getFriendlyImageDistro():
dist = getImageDistro().replace("openatv","OpenATV").replace("openhdf","OpenHDF")
return dist
def getIPMethod(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipmethod = _("SLAAC")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet6" in line and iface in line:
if "static" in line:
ipmethod = _("static")
if "dhcp" in line:
ipmethod = _("DHCP")
if "manual" in line:
ipmethod = _("manual/disabled")
if "6to4" in line:
ipmethod = "6to4"
return ipmethod
def getIPv4Method(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipv4method = _("static")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet " in line and iface in line:
if "static" in line:
ipv4method = _("static")
if "dhcp" in line:
ipv4method = _("DHCP")
if "manual" in line:
ipv4method = _("manual/disabled")
return ipv4method
def getLinkSpeed(iface):
speed = _("unknown")
try:
speed = os.popen('ethtool ' + iface + ' | grep Speed: | awk \'{ print $2 }\'').read().strip()
except:
pass
speed = str(speed)
speed = speed.replace("Mb/s"," MBit/s")
speed = speed.replace("10000 MBit/s","10 GBit/s")
speed = speed.replace("1000 MBit/s","1 GBit/s")
return speed
def getNICChipSet(iface):
nic = _("unknown")
try:
nic = os.popen('ethtool -i ' + iface + ' | grep driver: | awk \'{ print $2 }\'').read().strip()
except:
pass
nic = str(nic)
return nic
def getFriendlyNICChipSet(iface):
friendlynic = getNICChipSet(iface)
friendlynic = friendlynic.replace("bcmgenet", "Broadcom Generic Gigabit Ethernet")
return friendlynic
def normalize_ipv6(orig):
net = []
if '/' in orig:
net = orig.split('/')
if net[1] == "128":
del net[1]
else:
net.append(orig)
addr = net[0]
addr = inet_ntop(AF_INET6, inet_pton(AF_INET6, addr))
if len(net) == 2:
addr += "/" + net[1]
return (addr)
def getAdapterIPv6(ifname):
addr = _("IPv4-only kernel")
firstpublic = None
if fileExists('/proc/net/if_inet6'):
addr = _("IPv4-only Python/Twisted")
if has_ipv6 and version.major >= 12:
proc = '/proc/net/if_inet6'
tempaddrs = []
for line in file(proc).readlines():
if line.startswith('fe80'):
continue
tmpaddr = ""
tmp = line.split()
if ifname == tmp[5]:
tmpaddr = ":".join([ tmp[0][i:i+4] for i in range(0,len(tmp[0]),4) ])
if firstpublic is None and (tmpaddr.startswith('2') or tmpaddr.startswith('3')):
firstpublic = normalize_ipv6(tmpaddr)
if tmp[2].lower() != "ff":
tmpaddr = "%s/%s" % (tmpaddr, int(tmp[2].lower(), 16))
tmpaddr = normalize_ipv6(tmpaddr)
tempaddrs.append(tmpaddr)
if len(tempaddrs) > 1:
tempaddrs.sort()
addr = ', '.join(tempaddrs)
elif len(tempaddrs) == 1:
addr = tempaddrs[0]
elif len(tempaddrs) == 0:
addr = _("none/IPv4-only network")
return {'addr':addr, 'firstpublic':firstpublic }
def formatIp(ip):
if ip is None or len(ip) != 4:
return "0.0.0.0"
return "%d.%d.%d.%d" % (ip[0], ip[1], ip[2], ip[3])
def getBasePath():
path = os.path.dirname(sys.modules[__name__].__file__)
chunks = path.split("/")
chunks.pop()
chunks.pop()
return "/".join(chunks)
def getPublicPath(file = ""):
return getBasePath() + "/public/" + file
def getViewsPath(file = ""):
return getBasePath() + "/controllers/views/" + file
def getPiconPath():
if pathExists("/media/usb/picon/"):
return "/media/usb/picon/"
elif pathExists("/media/cf/picon/"):
return "/media/cf/picon/"
elif pathExists("/media/hdd/picon/"):
return "/media/hdd/picon/"
elif pathExists("/usr/share/enigma2/picon/"):
return "/usr/share/enigma2/picon/"
elif pathExists("/picon/"):
return "/picon/"
else:
return ""
def getInfo(session = None):
# TODO: get webif versione somewhere!
info = {}
info['brand'] = getMachineBrand()
info['model'] = getMachineName()
info['boxtype'] = getBoxType()
info['machinebuild'] = getMachineBuild()
chipset = "unknown"
if fileExists("/etc/.box"):
f = open("/etc/.box",'r')
model = f.readline().strip().lower()
f.close()
if model.startswith("ufs") or model.startswith("ufc"):
if model in ("ufs910", "ufs922", "ufc960"):
chipset = "SH4 @266MHz"
else:
chipset = "SH4 @450MHz"
elif model in ("topf", "tf7700hdpvr"):
chipset = "SH4 @266MHz"
elif model.startswith("azbox"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "SH4 @540MHz"
else:
chipset = "SH4 @450MHz"
elif fileExists("/proc/stb/info/azmodel"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif fileExists("/proc/stb/info/model"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "tf7700hdpvr":
chipset = "SH4 @266MHz"
elif model == "nbox":
chipset = "STi7100 @266MHz"
elif model == "arivalink200":
chipset = "STi7109 @266MHz"
elif model in ("adb2850", "adb2849", "dsi87"):
chipset = "STi7111 @450MHz"
elif model in ("sagemcom88", "esi88"):
chipset = "STi7105 @450MHz"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "STi7162 @540MHz"
else:
chipset = "STi7111 @450MHz"
if fileExists("/proc/stb/info/chipset"):
f = open("/proc/stb/info/chipset",'r')
chipset = f.readline().strip()
f.close()
info['chipset'] = chipset
memFree = 0
for line in open("/proc/meminfo",'r'):
parts = line.split(':')
key = parts[0].strip()
if key == "MemTotal":
info['mem1'] = parts[1].strip().replace("kB", _("kB"))
elif key in ("MemFree", "Buffers", "Cached"):
memFree += int(parts[1].strip().split(' ',1)[0])
info['mem2'] = "%s %s" % (memFree,_("kB"))
info['mem3'] = _("%s free / %s total") % (info['mem2'],info['mem1'])
try:
f = open("/proc/uptime", "rb")
uptime = int(float(f.readline().split(' ', 2)[0].strip()))
f.close()
uptimetext = ''
if uptime > 86400:
d = uptime/86400
uptime = uptime % 86400
uptimetext += '%dd ' % d
uptimetext += "%d:%.2d" % (uptime/3600, (uptime%3600)/60)
except:
uptimetext = "?"
info['uptime'] = uptimetext
info["webifver"] = getOpenWebifVer()
info['imagedistro'] = getImageDistro()
info['friendlyimagedistro'] = getFriendlyImageDistro()
info['oever'] = getOEVersion()
info['imagever'] = getImageVersion() + '.' + getImageBuild()
info['enigmaver'] = getEnigmaVersionString()
info['driverdate'] = getDriverDate()
info['kernelver'] = about.getKernelVersionString()
try:
from Tools.StbHardware import getFPVersion
except ImportError:
from Tools.DreamboxHardware import getFPVersion
try:
info['fp_version'] = getFPVersion()
except:
info['fp_version'] = None
friendlychipsetdescription = _("Chipset")
friendlychipsettext = info['chipset'].replace("bcm","Broadcom ")
if friendlychipsettext in ("7335", "7356", "7362", "73625", "7424", "7425", "7429"):
friendlychipsettext = "Broadcom " + friendlychipsettext
if not (info['fp_version'] is None or info['fp_version'] == 0):
friendlychipsetdescription = friendlychipsetdescription + " (" + _("Frontprocessor Version") + ")"
friendlychipsettext = friendlychipsettext + " (" + str(info['fp_version']) + ")"
info['friendlychipsetdescription'] = friendlychipsetdescription
info['friendlychipsettext'] = friendlychipsettext
info['tuners'] = []
for i in range(0, nimmanager.getSlotCount()):
info['tuners'].append({
"name": nimmanager.getNim(i).getSlotName(),
"type": nimmanager.getNimName(i) + " (" + nimmanager.getNim(i).getFriendlyType() + ")",
"rec": "",
"live": ""
})
info['ifaces'] = []
ifaces = iNetwork.getConfiguredAdapters()
for iface in ifaces:
info['ifaces'].append({
"name": iNetwork.getAdapterName(iface),
"friendlynic": getFriendlyNICChipSet(iface),
"linkspeed": getLinkSpeed(iface),
"mac": iNetwork.getAdapterAttribute(iface, "mac"),
"dhcp": iNetwork.getAdapterAttribute(iface, "dhcp"),
"ipv4method": getIPv4Method(iface),
"ip": formatIp(iNetwork.getAdapterAttribute(iface, "ip")),
"mask": formatIp(iNetwork.getAdapterAttribute(iface, "netmask")),
"v4prefix": sum([bin(int(x)).count('1') for x in formatIp(iNetwork.getAdapterAttribute(iface, "netmask")).split('.')]),
"gw": formatIp(iNetwork.getAdapterAttribute(iface, "gateway")),
"ipv6": getAdapterIPv6(iface)['addr'],
"ipmethod": getIPMethod(iface),
"firstpublic": getAdapterIPv6(iface)['firstpublic']
})
info['hdd'] = []
for hdd in harddiskmanager.hdd:
dev = hdd.findMount()
if dev:
stat = os.statvfs(dev)
free = int((stat.f_bfree/1024) * (stat.f_bsize/1024))
else:
free = -1
if free <= 1024:
free = "%i %s" % (free,_("MB"))
else:
free = free / 1024.
free = "%.1f %s" % (free,_("GB"))
size = hdd.diskSize() * 1000000 / 1048576.
if size > 1048576:
size = "%.1f %s" % ((size / 1048576.),_("TB"))
elif size > 1024:
size = "%.1f %s" % ((size / 1024.),_("GB"))
else:
size = "%d %s" % (size,_("MB"))
iecsize = hdd.diskSize()
# Harddisks > 1000 decimal Gigabytes are labelled in TB
if iecsize > 1000000:
iecsize = (iecsize + 50000) // float(100000) / 10
# Omit decimal fraction if it is 0
if (iecsize % 1 > 0):
iecsize = "%.1f %s" % (iecsize,_("TB"))
else:
iecsize = "%d %s" % (iecsize,_("TB"))
# Round harddisk sizes beyond ~300GB to full tens: 320, 500, 640, 750GB
elif iecsize > 300000:
iecsize = "%d %s" % (((iecsize + 5000) // 10000 * 10),_("GB"))
# ... be more precise for media < ~300GB (Sticks, SSDs, CF, MMC, ...): 1, 2, 4, 8, 16 ... 256GB
elif iecsize > 1000:
iecsize = "%d %s" % (((iecsize + 500) // 1000),_("GB"))
else:
iecsize = "%d %s" % (iecsize,_("MB"))
info['hdd'].append({
"model": hdd.model(),
"capacity": size,
"labelled_capacity": iecsize,
"free": free,
"mount": dev,
"friendlycapacity": _("%s free / %s total") % (free,size+' ("'+iecsize+'")')
})
info['shares'] = []
if fileExists('/etc/auto.network'):
autofs = '/etc/auto.network'
method = "autofs"
for line in file(autofs).readlines():
if not line.startswith('#'):
# Replace escaped spaces that can appear inside credentials with underscores
# Not elegant but we wouldn't want to expose credentials on the OWIF anyways
tmpline = line.replace("\ ","_")
tmp = tmpline.split()
if not len(tmp) == 3:
continue
name = tmp[0].strip()
type = "unknown"
if "cifs" in tmp[1]:
# Linux still defaults to SMBv1
type = "SMBv1.0"
settings = tmp[1].split(",")
for setting in settings:
if setting.startswith("vers="):
type = setting.replace("vers=", "SMBv")
elif "nfs" in tmp[1]:
type = "NFS"
# Default is r/w
mode = _("r/w")
settings = tmp[1].split(",")
for setting in settings:
if setting == "ro":
mode = _("r/o")
uri = tmp[2]
parts = []
parts = tmp[2].split(':')
if parts[0] is "":
server = uri.split('/')[2]
uri = uri.strip()[1:]
else:
server = parts[0]
ipaddress = None
if server:
# Will fail on literal IPs
try:
# Try IPv6 first, as will Linux
if has_ipv6:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET6)
if tmpaddress:
ipaddress = "[" + list(tmpaddress)[0][4][0] + "]"
# Use IPv4 if IPv6 fails or is not present
if ipaddress is None:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET)
if tmpaddress:
ipaddress = list(tmpaddress)[0][4][0]
except:
pass
friendlyaddress = server
if ipaddress is not None and not ipaddress == server:
friendlyaddress = server + " ("+ ipaddress + ")"
info['shares'].append({
"name": name,
"method": method,
"type": type,
"mode": mode,
"path": uri,
"host": server,
"ipaddress": ipaddress,
"friendlyaddress": friendlyaddress
})
# TODO: fstab
info['transcoding'] = False
if (info['model'] in ("Solo4K", "Solo²", "Duo²", "Solo SE", "Quad", "Quad Plus") or info['machinebuild'] in ('inihdp', 'hd2400', 'et10000', 'xpeedlx3', 'ew7356', 'dags3', 'dags4')):
if os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/TransCodingSetup/plugin.pyo')) or os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/TranscodingSetup/plugin.pyo')) or os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/MultiTransCodingSetup/plugin.pyo')):
info['transcoding'] = True
info['kinopoisk'] = False
lang = ['ru', 'uk', 'lv', 'lt', 'et']
for l in lang:
if l in language.getLanguage():
info['kinopoisk'] = True
info['EX'] = ''
if session:
try:
recs = NavigationInstance.instance.getRecordings()
if recs:
# only one stream and only TV
from Plugins.Extensions.OpenWebif.controllers.stream import streamList
s_name = ''
s_cip = ''
if len(streamList)==1:
from Screens.ChannelSelection import service_types_tv
from enigma import eEPGCache
epgcache = eEPGCache.getInstance()
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(eServiceReference('%s ORDER BY name'%(service_types_tv)))
channels = services and services.getContent("SN", True)
s = streamList[0]
srefs = s.ref.toString()
for channel in channels:
if srefs == channel[0]:
s_name = channel[1] + ' (' + s.clientIP + ')'
break
sname = ''
timers = []
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers.append(timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', ''))
# only one recording
if len(timers) == 1:
sname = timers[0]
if sname == '' and s_name != '':
sname = s_name
for rec in recs:
feinfo = rec.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
cur_info = feinfo.getTransponderData(True)
nr = frontendData['tuner_number']
info['tuners'][nr]['rec'] = getOrbitalText(cur_info) + ' / ' + sname
service = session.nav.getCurrentService()
if service is not None:
sname = service.info().getName()
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
cur_info = feinfo.getTransponderData(True)
if cur_info:
nr = frontendData['tuner_number']
info['tuners'][nr]['live'] = getOrbitalText(cur_info) + ' / ' + sname
except Exception, error:
info['EX'] = error
global STATICBOXINFO
STATICBOXINFO = info
return info
def getOrbitalText(cur_info):
if cur_info:
tunerType = cur_info.get('tuner_type')
if tunerType == "DVB-S":
pos = int(cur_info.get('orbital_position'))
direction = 'E'
if pos > 1800:
pos = 3600 - pos
direction = 'W'
return "%d.%d° %s" % (pos/10, pos%10, direction)
return tunerType
return ''
def getFrontendStatus(session):
inf = {}
inf['tunertype'] = ""
inf['tunernumber'] = ""
inf['snr'] = ""
inf['snr_db'] = ""
inf['agc'] = ""
inf['ber'] = ""
service = session.nav.getCurrentService()
if service is None:
return inf
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
if frontendData is not None:
inf['tunertype'] = frontendData.get("tuner_type", "UNKNOWN")
inf['tunernumber'] = frontendData.get("tuner_number")
frontendStatus = feinfo and feinfo.getFrontendStatus()
if frontendStatus is not None:
percent = frontendStatus.get("tuner_signal_quality")
if percent is not None:
inf['snr'] = int(percent * 100 / 65536)
inf['snr_db'] = inf['snr']
percent = frontendStatus.get("tuner_signal_quality_db")
if percent is not None:
inf['snr_db'] = "%3.02f" % (percent / 100.0)
percent = frontendStatus.get("tuner_signal_power")
if percent is not None:
inf['agc'] = int(percent * 100 / 65536)
percent = frontendStatus.get("tuner_bit_error_rate")
if percent is not None:
inf['ber'] = int(percent * 100 / 65536)
return inf
def getCurrentTime():
t = time.localtime()
return {
"status": True,
"time": "%2d:%02d:%02d" % (t.tm_hour, t.tm_min, t.tm_sec)
}
def getTranscodingSupport():
global STATICBOXINFO
if STATICBOXINFO is None:
getInfo()
return STATICBOXINFO['transcoding']
def getLanguage():
global STATICBOXINFO
if STATICBOXINFO is None:
getInfo()
return STATICBOXINFO['kinopoisk']
def getStatusInfo(self):
statusinfo = {}
# Get Current Volume and Mute Status
vcontrol = eDVBVolumecontrol.getInstance()
statusinfo['volume'] = vcontrol.getVolume()
statusinfo['muted'] = vcontrol.isMuted()
statusinfo['transcoding'] = getTranscodingSupport()
# Get currently running Service
event = None
serviceref = self.session.nav.getCurrentlyPlayingServiceReference()
if serviceref is not None:
serviceHandler = eServiceCenter.getInstance()
serviceHandlerInfo = serviceHandler.info(serviceref)
service = self.session.nav.getCurrentService()
serviceinfo = service and service.info()
event = serviceinfo and serviceinfo.getEvent(0)
else:
event = None
statusinfo['currservice_filename'] = ""
if event is not None:
curEvent = parseEvent(event)
statusinfo['currservice_name'] = curEvent[2].replace('\xc2\x86', '').replace('\xc2\x87', '')
statusinfo['currservice_serviceref'] = serviceref.toString()
statusinfo['currservice_begin'] = strftime("%H:%M", (localtime(int(curEvent[0])+(config.recording.margin_before.value*60))))
statusinfo['currservice_end'] = strftime("%H:%M", (localtime(int(curEvent[1])-(config.recording.margin_after.value*60))))
statusinfo['currservice_description'] = curEvent[3]
if len(curEvent[3].decode('utf-8')) > 220: | statusinfo['currservice_description'] = curEvent[3].decode('utf-8')[0:220].encode('utf-8') + "..."
statusinfo['currservice_station'] = serviceHandlerInfo.getName(serviceref).replace('\xc2\x86', '').replace('\xc2\x87', '')
if statusinfo['currservice_serviceref'].startswith('1:0:0'):
statusinfo['currservice_filename'] = '/' + '/'.join(serviceref.toString().split("/")[1:])
full_desc = statusinfo['currservice_name'] + '\n'
full_desc += statusinfo['currservice_begin'] + " - " + statusinfo['currservice_end'] + '\n\n'
full_desc += event.getExtendedDescription().replace('\xc2\x86', '').replace('\xc2\x87', '').replace('\xc2\x8a', '\n')
statusinfo['currservice_fulldescription'] = full_desc
else:
statusinfo['currservice_name'] = "N/A"
statusinfo['currservice_begin'] = ""
statusinfo['currservice_end'] = ""
statusinfo['currservice_description'] = ""
statusinfo['currservice_fulldescription'] = "N/A"
if serviceref:
statusinfo['currservice_serviceref'] = serviceref.toString()
if serviceHandlerInfo:
statusinfo['currservice_station'] = serviceHandlerInfo.getName(serviceref).replace('\xc2\x86', '').replace('\xc2\x87', '')
elif serviceref.toString().find("http") != -1:
statusinfo['currservice_station'] = serviceref.toString().replace('%3a', ':')[serviceref.toString().find("http"):]
else:
statusinfo['currservice_station'] = "N/A"
# Get Standby State
from Screens.Standby import inStandby
if inStandby == None:
statusinfo['inStandby'] = "false"
else:
statusinfo['inStandby'] = "true"
# Get recording state
recs = NavigationInstance.instance.getRecordings()
if recs:
statusinfo['isRecording'] = "true"
statusinfo['Recording_list'] = "\n"
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.state == TimerEntry.StateRunning:
if not timer.justplay:
statusinfo['Recording_list'] += timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '') + ": " + timer.name + "\n"
else:
statusinfo['isRecording'] = "false"
return statusinfo
def getAlternativeChannels(service):
alternativeServices = eServiceCenter.getInstance().list(eServiceReference(service))
return alternativeServices and alternativeServices.getContent("S", True)
def GetWithAlternative(service,onlyFirst = True):
if service.startswith('1:134:'):
channels = getAlternativeChannels(service)
if channels:
if onlyFirst:
return channels[0]
else:
return channels
if onlyFirst:
return service
else:
return None | random_line_split |
|
info.py | # -*- coding: utf-8 -*-
##############################################################################
# 2011 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
from Plugins.Extensions.OpenWebif.__init__ import _
from Components.About import about
from Components.config import config
from Components.NimManager import nimmanager
from Components.Harddisk import harddiskmanager
from Components.Network import iNetwork
from Components.Language import language
from RecordTimer import parseEvent
from Screens.Standby import inStandby
from timer import TimerEntry
from Tools.Directories import fileExists, pathExists
from time import time, localtime, strftime
from enigma import eDVBVolumecontrol, eServiceCenter, eServiceReference, eEnv
from twisted.web import version
from socket import has_ipv6, AF_INET6, AF_INET, inet_ntop, inet_pton, getaddrinfo
try:
from boxbranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
from enigma import getEnigmaVersionString
except:
from owibranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
def getEnigmaVersionString():
return about.getEnigmaVersionString()
import NavigationInstance
import os
import sys
import time
import string
OPENWEBIFVER = "OWIF 1.0.2"
STATICBOXINFO = None
def getOpenWebifVer():
return OPENWEBIFVER
def getFriendlyImageDistro():
dist = getImageDistro().replace("openatv","OpenATV").replace("openhdf","OpenHDF")
return dist
def getIPMethod(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipmethod = _("SLAAC")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet6" in line and iface in line:
if "static" in line:
ipmethod = _("static")
if "dhcp" in line:
ipmethod = _("DHCP")
if "manual" in line:
ipmethod = _("manual/disabled")
if "6to4" in line:
ipmethod = "6to4"
return ipmethod
def getIPv4Method(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipv4method = _("static")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet " in line and iface in line:
if "static" in line:
ipv4method = _("static")
if "dhcp" in line:
ipv4method = _("DHCP")
if "manual" in line:
ipv4method = _("manual/disabled")
return ipv4method
def getLinkSpeed(iface):
speed = _("unknown")
try:
speed = os.popen('ethtool ' + iface + ' | grep Speed: | awk \'{ print $2 }\'').read().strip()
except:
pass
speed = str(speed)
speed = speed.replace("Mb/s"," MBit/s")
speed = speed.replace("10000 MBit/s","10 GBit/s")
speed = speed.replace("1000 MBit/s","1 GBit/s")
return speed
def getNICChipSet(iface):
nic = _("unknown")
try:
nic = os.popen('ethtool -i ' + iface + ' | grep driver: | awk \'{ print $2 }\'').read().strip()
except:
pass
nic = str(nic)
return nic
def getFriendlyNICChipSet(iface):
friendlynic = getNICChipSet(iface)
friendlynic = friendlynic.replace("bcmgenet", "Broadcom Generic Gigabit Ethernet")
return friendlynic
def normalize_ipv6(orig):
net = []
if '/' in orig:
net = orig.split('/')
if net[1] == "128":
del net[1]
else:
net.append(orig)
addr = net[0]
addr = inet_ntop(AF_INET6, inet_pton(AF_INET6, addr))
if len(net) == 2:
addr += "/" + net[1]
return (addr)
def getAdapterIPv6(ifname):
addr = _("IPv4-only kernel")
firstpublic = None
if fileExists('/proc/net/if_inet6'):
addr = _("IPv4-only Python/Twisted")
if has_ipv6 and version.major >= 12:
proc = '/proc/net/if_inet6'
tempaddrs = []
for line in file(proc).readlines():
if line.startswith('fe80'):
continue
tmpaddr = ""
tmp = line.split()
if ifname == tmp[5]:
tmpaddr = ":".join([ tmp[0][i:i+4] for i in range(0,len(tmp[0]),4) ])
if firstpublic is None and (tmpaddr.startswith('2') or tmpaddr.startswith('3')):
firstpublic = normalize_ipv6(tmpaddr)
if tmp[2].lower() != "ff":
tmpaddr = "%s/%s" % (tmpaddr, int(tmp[2].lower(), 16))
tmpaddr = normalize_ipv6(tmpaddr)
tempaddrs.append(tmpaddr)
if len(tempaddrs) > 1:
tempaddrs.sort()
addr = ', '.join(tempaddrs)
elif len(tempaddrs) == 1:
addr = tempaddrs[0]
elif len(tempaddrs) == 0:
addr = _("none/IPv4-only network")
return {'addr':addr, 'firstpublic':firstpublic }
def formatIp(ip):
if ip is None or len(ip) != 4:
return "0.0.0.0"
return "%d.%d.%d.%d" % (ip[0], ip[1], ip[2], ip[3])
def getBasePath():
path = os.path.dirname(sys.modules[__name__].__file__)
chunks = path.split("/")
chunks.pop()
chunks.pop()
return "/".join(chunks)
def getPublicPath(file = ""):
return getBasePath() + "/public/" + file
def getViewsPath(file = ""):
return getBasePath() + "/controllers/views/" + file
def getPiconPath():
if pathExists("/media/usb/picon/"):
return "/media/usb/picon/"
elif pathExists("/media/cf/picon/"):
return "/media/cf/picon/"
elif pathExists("/media/hdd/picon/"):
return "/media/hdd/picon/"
elif pathExists("/usr/share/enigma2/picon/"):
return "/usr/share/enigma2/picon/"
elif pathExists("/picon/"):
return "/picon/"
else:
return ""
def getInfo(session = None):
# TODO: get webif versione somewhere!
info = {}
info['brand'] = getMachineBrand()
info['model'] = getMachineName()
info['boxtype'] = getBoxType()
info['machinebuild'] = getMachineBuild()
chipset = "unknown"
if fileExists("/etc/.box"):
f = open("/etc/.box",'r')
model = f.readline().strip().lower()
f.close()
if model.startswith("ufs") or model.startswith("ufc"):
if model in ("ufs910", "ufs922", "ufc960"):
chipset = "SH4 @266MHz"
else:
chipset = "SH4 @450MHz"
elif model in ("topf", "tf7700hdpvr"):
chipset = "SH4 @266MHz"
elif model.startswith("azbox"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "SH4 @540MHz"
else:
chipset = "SH4 @450MHz"
elif fileExists("/proc/stb/info/azmodel"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif fileExists("/proc/stb/info/model"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "tf7700hdpvr":
chipset = "SH4 @266MHz"
elif model == "nbox":
chipset = "STi7100 @266MHz"
elif model == "arivalink200":
chipset = "STi7109 @266MHz"
elif model in ("adb2850", "adb2849", "dsi87"):
chipset = "STi7111 @450MHz"
elif model in ("sagemcom88", "esi88"):
chipset = "STi7105 @450MHz"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "STi7162 @540MHz"
else:
chipset = "STi7111 @450MHz"
if fileExists("/proc/stb/info/chipset"):
f = open("/proc/stb/info/chipset",'r')
chipset = f.readline().strip()
f.close()
info['chipset'] = chipset
memFree = 0
for line in open("/proc/meminfo",'r'):
parts = line.split(':')
key = parts[0].strip()
if key == "MemTotal":
info['mem1'] = parts[1].strip().replace("kB", _("kB"))
elif key in ("MemFree", "Buffers", "Cached"):
memFree += int(parts[1].strip().split(' ',1)[0])
info['mem2'] = "%s %s" % (memFree,_("kB"))
info['mem3'] = _("%s free / %s total") % (info['mem2'],info['mem1'])
try:
f = open("/proc/uptime", "rb")
uptime = int(float(f.readline().split(' ', 2)[0].strip()))
f.close()
uptimetext = ''
if uptime > 86400:
d = uptime/86400
uptime = uptime % 86400
uptimetext += '%dd ' % d
uptimetext += "%d:%.2d" % (uptime/3600, (uptime%3600)/60)
except:
uptimetext = "?"
info['uptime'] = uptimetext
info["webifver"] = getOpenWebifVer()
info['imagedistro'] = getImageDistro()
info['friendlyimagedistro'] = getFriendlyImageDistro()
info['oever'] = getOEVersion()
info['imagever'] = getImageVersion() + '.' + getImageBuild()
info['enigmaver'] = getEnigmaVersionString()
info['driverdate'] = getDriverDate()
info['kernelver'] = about.getKernelVersionString()
try:
from Tools.StbHardware import getFPVersion
except ImportError:
from Tools.DreamboxHardware import getFPVersion
try:
info['fp_version'] = getFPVersion()
except:
info['fp_version'] = None
friendlychipsetdescription = _("Chipset")
friendlychipsettext = info['chipset'].replace("bcm","Broadcom ")
if friendlychipsettext in ("7335", "7356", "7362", "73625", "7424", "7425", "7429"):
friendlychipsettext = "Broadcom " + friendlychipsettext
if not (info['fp_version'] is None or info['fp_version'] == 0):
friendlychipsetdescription = friendlychipsetdescription + " (" + _("Frontprocessor Version") + ")"
friendlychipsettext = friendlychipsettext + " (" + str(info['fp_version']) + ")"
info['friendlychipsetdescription'] = friendlychipsetdescription
info['friendlychipsettext'] = friendlychipsettext
info['tuners'] = []
for i in range(0, nimmanager.getSlotCount()):
info['tuners'].append({
"name": nimmanager.getNim(i).getSlotName(),
"type": nimmanager.getNimName(i) + " (" + nimmanager.getNim(i).getFriendlyType() + ")",
"rec": "",
"live": ""
})
info['ifaces'] = []
ifaces = iNetwork.getConfiguredAdapters()
for iface in ifaces:
info['ifaces'].append({
"name": iNetwork.getAdapterName(iface),
"friendlynic": getFriendlyNICChipSet(iface),
"linkspeed": getLinkSpeed(iface),
"mac": iNetwork.getAdapterAttribute(iface, "mac"),
"dhcp": iNetwork.getAdapterAttribute(iface, "dhcp"),
"ipv4method": getIPv4Method(iface),
"ip": formatIp(iNetwork.getAdapterAttribute(iface, "ip")),
"mask": formatIp(iNetwork.getAdapterAttribute(iface, "netmask")),
"v4prefix": sum([bin(int(x)).count('1') for x in formatIp(iNetwork.getAdapterAttribute(iface, "netmask")).split('.')]),
"gw": formatIp(iNetwork.getAdapterAttribute(iface, "gateway")),
"ipv6": getAdapterIPv6(iface)['addr'],
"ipmethod": getIPMethod(iface),
"firstpublic": getAdapterIPv6(iface)['firstpublic']
})
info['hdd'] = []
for hdd in harddiskmanager.hdd:
dev = hdd.findMount()
if dev:
stat = os.statvfs(dev)
free = int((stat.f_bfree/1024) * (stat.f_bsize/1024))
else:
free = -1
if free <= 1024:
free = "%i %s" % (free,_("MB"))
else:
free = free / 1024.
free = "%.1f %s" % (free,_("GB"))
size = hdd.diskSize() * 1000000 / 1048576.
if size > 1048576:
size = "%.1f %s" % ((size / 1048576.),_("TB"))
elif size > 1024:
size = "%.1f %s" % ((size / 1024.),_("GB"))
else:
size = "%d %s" % (size,_("MB"))
iecsize = hdd.diskSize()
# Harddisks > 1000 decimal Gigabytes are labelled in TB
if iecsize > 1000000:
iecsize = (iecsize + 50000) // float(100000) / 10
# Omit decimal fraction if it is 0
if (iecsize % 1 > 0):
iecsize = "%.1f %s" % (iecsize,_("TB"))
else:
iecsize = "%d %s" % (iecsize,_("TB"))
# Round harddisk sizes beyond ~300GB to full tens: 320, 500, 640, 750GB
elif iecsize > 300000:
iecsize = "%d %s" % (((iecsize + 5000) // 10000 * 10),_("GB"))
# ... be more precise for media < ~300GB (Sticks, SSDs, CF, MMC, ...): 1, 2, 4, 8, 16 ... 256GB
elif iecsize > 1000:
iecsize = "%d %s" % (((iecsize + 500) // 1000),_("GB"))
else:
iecsize = "%d %s" % (iecsize,_("MB"))
info['hdd'].append({
"model": hdd.model(),
"capacity": size,
"labelled_capacity": iecsize,
"free": free,
"mount": dev,
"friendlycapacity": _("%s free / %s total") % (free,size+' ("'+iecsize+'")')
})
info['shares'] = []
if fileExists('/etc/auto.network'):
autofs = '/etc/auto.network'
method = "autofs"
for line in file(autofs).readlines():
if not line.startswith('#'):
# Replace escaped spaces that can appear inside credentials with underscores
# Not elegant but we wouldn't want to expose credentials on the OWIF anyways
tmpline = line.replace("\ ","_")
tmp = tmpline.split()
if not len(tmp) == 3:
continue
name = tmp[0].strip()
type = "unknown"
if "cifs" in tmp[1]:
# Linux still defaults to SMBv1
type = "SMBv1.0"
settings = tmp[1].split(",")
for setting in settings:
if setting.startswith("vers="):
type = setting.replace("vers=", "SMBv")
elif "nfs" in tmp[1]:
type = "NFS"
# Default is r/w
mode = _("r/w")
settings = tmp[1].split(",")
for setting in settings:
if setting == "ro":
mode = _("r/o")
uri = tmp[2]
parts = []
parts = tmp[2].split(':')
if parts[0] is "":
server = uri.split('/')[2]
uri = uri.strip()[1:]
else:
server = parts[0]
ipaddress = None
if server:
# Will fail on literal IPs
try:
# Try IPv6 first, as will Linux
if has_ipv6:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET6)
if tmpaddress:
ipaddress = "[" + list(tmpaddress)[0][4][0] + "]"
# Use IPv4 if IPv6 fails or is not present
if ipaddress is None:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET)
if tmpaddress:
ipaddress = list(tmpaddress)[0][4][0]
except:
pass
friendlyaddress = server
if ipaddress is not None and not ipaddress == server:
friendlyaddress = server + " ("+ ipaddress + ")"
info['shares'].append({
"name": name,
"method": method,
"type": type,
"mode": mode,
"path": uri,
"host": server,
"ipaddress": ipaddress,
"friendlyaddress": friendlyaddress
})
# TODO: fstab
info['transcoding'] = False
if (info['model'] in ("Solo4K", "Solo²", "Duo²", "Solo SE", "Quad", "Quad Plus") or info['machinebuild'] in ('inihdp', 'hd2400', 'et10000', 'xpeedlx3', 'ew7356', 'dags3', 'dags4')):
if os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/TransCodingSetup/plugin.pyo')) or os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/TranscodingSetup/plugin.pyo')) or os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/MultiTransCodingSetup/plugin.pyo')):
info['transcoding'] = True
info['kinopoisk'] = False
lang = ['ru', 'uk', 'lv', 'lt', 'et']
for l in lang:
if l in language.getLanguage():
info['kinopoisk'] = True
info['EX'] = ''
if session:
try:
recs = NavigationInstance.instance.getRecordings()
if recs:
# only one stream and only TV
from Plugins.Extensions.OpenWebif.controllers.stream import streamList
s_name = ''
s_cip = ''
if len(streamList)==1:
from Screens.ChannelSelection import service_types_tv
from enigma import eEPGCache
epgcache = eEPGCache.getInstance()
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(eServiceReference('%s ORDER BY name'%(service_types_tv)))
channels = services and services.getContent("SN", True)
s = streamList[0]
srefs = s.ref.toString()
for channel in channels:
if srefs == channel[0]:
s_name = channel[1] + ' (' + s.clientIP + ')'
break
sname = ''
timers = []
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers.append(timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', ''))
# only one recording
if len(timers) == 1:
sname = timers[0]
if sname == '' and s_name != '':
sname = s_name
for rec in recs:
feinfo = rec.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
cur_info = feinfo.getTransponderData(True)
nr = frontendData['tuner_number']
info['tuners'][nr]['rec'] = getOrbitalText(cur_info) + ' / ' + sname
service = session.nav.getCurrentService()
if service is not None:
sname = service.info().getName()
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
cur_info = feinfo.getTransponderData(True)
if cur_info:
nr = frontendData['tuner_number']
info['tuners'][nr]['live'] = getOrbitalText(cur_info) + ' / ' + sname
except Exception, error:
info['EX'] = error
global STATICBOXINFO
STATICBOXINFO = info
return info
def getOrbitalText(cur_info):
if cur_info:
tunerType = cur_info.get('tuner_type')
if tunerType == "DVB-S":
pos = int(cur_info.get('orbital_position'))
direction = 'E'
if pos > 1800:
pos = 3600 - pos
direction = 'W'
return "%d.%d° %s" % (pos/10, pos%10, direction)
return tunerType
return ''
def getFrontendStatus(session):
inf = {}
inf['tunertype'] = ""
inf['tunernumber'] = ""
inf['snr'] = ""
inf['snr_db'] = ""
inf['agc'] = ""
inf['ber'] = ""
service = session.nav.getCurrentService()
if service is None:
return inf
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
if frontendData is not None:
inf['tunertype'] = frontendData.get("tuner_type", "UNKNOWN")
inf['tunernumber'] = frontendData.get("tuner_number")
frontendStatus = feinfo and feinfo.getFrontendStatus()
if frontendStatus is not None:
percent = frontendStatus.get("tuner_signal_quality")
if percent is not None:
inf['snr'] = int(percent * 100 / 65536)
inf['snr_db'] = inf['snr']
percent = frontendStatus.get("tuner_signal_quality_db")
if percent is not None:
inf['snr_db'] = "%3.02f" % (percent / 100.0)
percent = frontendStatus.get("tuner_signal_power")
if percent is not None:
inf['agc'] = int(percent * 100 / 65536)
percent = frontendStatus.get("tuner_bit_error_rate")
if percent is not None:
inf['ber'] = int(percent * 100 / 65536)
return inf
def getCurrentTime():
t = time.localtime()
return {
"status": True,
"time": "%2d:%02d:%02d" % (t.tm_hour, t.tm_min, t.tm_sec)
}
def getTranscodingSupport():
global STATICBOXINFO
if STATICBOXINFO is None:
getInfo()
return STATICBOXINFO['transcoding']
def getLanguage():
global STATICBOXINFO
if STATICBOXINFO is None:
getInfo()
return STATICBOXINFO['kinopoisk']
def getStatusInfo(self):
statusinfo = {}
# Get Current Volume and Mute Status
vcontrol = eDVBVolumecontrol.getInstance()
statusinfo['volume'] = vcontrol.getVolume()
statusinfo['muted'] = vcontrol.isMuted()
statusinfo['transcoding'] = getTranscodingSupport()
# Get currently running Service
event = None
serviceref = self.session.nav.getCurrentlyPlayingServiceReference()
if serviceref is not None:
serviceHandler = eServiceCenter.getInstance()
serviceHandlerInfo = serviceHandler.info(serviceref)
service = self.session.nav.getCurrentService()
serviceinfo = service and service.info()
event = serviceinfo and serviceinfo.getEvent(0)
else:
event = None
statusinfo['currservice_filename'] = ""
if event is not None:
curEvent = parseEvent(event)
statusinfo['currservice_name'] = curEvent[2].replace('\xc2\x86', '').replace('\xc2\x87', '')
statusinfo['currservice_serviceref'] = serviceref.toString()
statusinfo['currservice_begin'] = strftime("%H:%M", (localtime(int(curEvent[0])+(config.recording.margin_before.value*60))))
statusinfo['currservice_end'] = strftime("%H:%M", (localtime(int(curEvent[1])-(config.recording.margin_after.value*60))))
statusinfo['currservice_description'] = curEvent[3]
if len(curEvent[3].decode('utf-8')) > 220:
statusinfo['currservice_description'] = curEvent[3].decode('utf-8')[0:220].encode('utf-8') + "..."
statusinfo['currservice_station'] = serviceHandlerInfo.getName(serviceref).replace('\xc2\x86', '').replace('\xc2\x87', '')
if statusinfo['currservice_serviceref'].startswith('1:0:0'):
statusinfo['currservice_filename'] = '/' + '/'.join(serviceref.toString().split("/")[1:])
full_desc = statusinfo['currservice_name'] + '\n'
full_desc += statusinfo['currservice_begin'] + " - " + statusinfo['currservice_end'] + '\n\n'
full_desc += event.getExtendedDescription().replace('\xc2\x86', '').replace('\xc2\x87', '').replace('\xc2\x8a', '\n')
statusinfo['currservice_fulldescription'] = full_desc
else:
statusinfo['currservice_name'] = "N/A"
statusinfo['currservice_begin'] = ""
statusinfo['currservice_end'] = ""
statusinfo['currservice_description'] = ""
statusinfo['currservice_fulldescription'] = "N/A"
if serviceref:
statusinfo['currservice_serviceref'] = serviceref.toString()
if serviceHandlerInfo:
statusinfo['currservice_station'] = serviceHandlerInfo.getName(serviceref).replace('\xc2\x86', '').replace('\xc2\x87', '')
elif serviceref.toString().find("http") != -1:
statusinfo['currservice_station'] = serviceref.toString().replace('%3a', ':')[serviceref.toString().find("http"):]
else:
statusinfo['currservice_station'] = "N/A"
# Get Standby State
from Screens.Standby import inStandby
if inStandby == None:
statusinfo['inStandby'] = "false"
else:
statusinfo['inStandby'] = "true"
# Get recording state
recs = NavigationInstance.instance.getRecordings()
if recs:
statusinfo['isRecording'] = "true"
statusinfo['Recording_list'] = "\n"
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.state == TimerEntry.StateRunning:
if not timer.justplay:
statusinfo['Recording_list'] += timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '') + ": " + timer.name + "\n"
else:
statusinfo['isRecording'] = "false"
return statusinfo
def getAlternativeChannels(service):
alt | ef GetWithAlternative(service,onlyFirst = True):
if service.startswith('1:134:'):
channels = getAlternativeChannels(service)
if channels:
if onlyFirst:
return channels[0]
else:
return channels
if onlyFirst:
return service
else:
return None
| ernativeServices = eServiceCenter.getInstance().list(eServiceReference(service))
return alternativeServices and alternativeServices.getContent("S", True)
d | identifier_body |
info.py | # -*- coding: utf-8 -*-
##############################################################################
# 2011 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
from Plugins.Extensions.OpenWebif.__init__ import _
from Components.About import about
from Components.config import config
from Components.NimManager import nimmanager
from Components.Harddisk import harddiskmanager
from Components.Network import iNetwork
from Components.Language import language
from RecordTimer import parseEvent
from Screens.Standby import inStandby
from timer import TimerEntry
from Tools.Directories import fileExists, pathExists
from time import time, localtime, strftime
from enigma import eDVBVolumecontrol, eServiceCenter, eServiceReference, eEnv
from twisted.web import version
from socket import has_ipv6, AF_INET6, AF_INET, inet_ntop, inet_pton, getaddrinfo
try:
from boxbranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
from enigma import getEnigmaVersionString
except:
from owibranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
def getEnigmaVersionString():
return about.getEnigmaVersionString()
import NavigationInstance
import os
import sys
import time
import string
OPENWEBIFVER = "OWIF 1.0.2"
STATICBOXINFO = None
def getOpenWebifVer():
return OPENWEBIFVER
def getFriendlyImageDistro():
dist = getImageDistro().replace("openatv","OpenATV").replace("openhdf","OpenHDF")
return dist
def getIPMethod(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipmethod = _("SLAAC")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet6" in line and iface in line:
if "static" in line:
ipmethod = _("static")
if "dhcp" in line:
ipmethod = _("DHCP")
if "manual" in line:
ipmethod = _("manual/disabled")
if "6to4" in line:
ipmethod = "6to4"
return ipmethod
def getIPv4Method(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipv4method = _("static")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet " in line and iface in line:
if "static" in line:
ipv4method = _("static")
if "dhcp" in line:
ipv4method = _("DHCP")
if "manual" in line:
ipv4method = _("manual/disabled")
return ipv4method
def getLinkSpeed(iface):
speed = _("unknown")
try:
speed = os.popen('ethtool ' + iface + ' | grep Speed: | awk \'{ print $2 }\'').read().strip()
except:
pass
speed = str(speed)
speed = speed.replace("Mb/s"," MBit/s")
speed = speed.replace("10000 MBit/s","10 GBit/s")
speed = speed.replace("1000 MBit/s","1 GBit/s")
return speed
def getNICChipSet(iface):
nic = _("unknown")
try:
nic = os.popen('ethtool -i ' + iface + ' | grep driver: | awk \'{ print $2 }\'').read().strip()
except:
pass
nic = str(nic)
return nic
def getFriendlyNICChipSet(iface):
friendlynic = getNICChipSet(iface)
friendlynic = friendlynic.replace("bcmgenet", "Broadcom Generic Gigabit Ethernet")
return friendlynic
def normalize_ipv6(orig):
net = []
if '/' in orig:
net = orig.split('/')
if net[1] == "128":
del net[1]
else:
net.append(orig)
addr = net[0]
addr = inet_ntop(AF_INET6, inet_pton(AF_INET6, addr))
if len(net) == 2:
addr += "/" + net[1]
return (addr)
def getAdapterIPv6(ifname):
addr = _("IPv4-only kernel")
firstpublic = None
if fileExists('/proc/net/if_inet6'):
addr = _("IPv4-only Python/Twisted")
if has_ipv6 and version.major >= 12:
proc = '/proc/net/if_inet6'
tempaddrs = []
for line in file(proc).readlines():
if line.startswith('fe80'):
continue
tmpaddr = ""
tmp = line.split()
if ifname == tmp[5]:
tmpaddr = ":".join([ tmp[0][i:i+4] for i in range(0,len(tmp[0]),4) ])
if firstpublic is None and (tmpaddr.startswith('2') or tmpaddr.startswith('3')):
firstpublic = normalize_ipv6(tmpaddr)
if tmp[2].lower() != "ff":
tmpaddr = "%s/%s" % (tmpaddr, int(tmp[2].lower(), 16))
tmpaddr = normalize_ipv6(tmpaddr)
tempaddrs.append(tmpaddr)
if len(tempaddrs) > 1:
tempaddrs.sort()
addr = ', '.join(tempaddrs)
elif len(tempaddrs) == 1:
addr = tempaddrs[0]
elif len(tempaddrs) == 0:
addr = _("none/IPv4-only network")
return {'addr':addr, 'firstpublic':firstpublic }
def formatIp(ip):
if ip is None or len(ip) != 4:
return "0.0.0.0"
return "%d.%d.%d.%d" % (ip[0], ip[1], ip[2], ip[3])
def getBasePath():
path = os.path.dirname(sys.modules[__name__].__file__)
chunks = path.split("/")
chunks.pop()
chunks.pop()
return "/".join(chunks)
def getPublicPath(file = ""):
return getBasePath() + "/public/" + file
def getViewsPath(file = ""):
return getBasePath() + "/controllers/views/" + file
def getPiconPath():
if pathExists("/media/usb/picon/"):
return "/media/usb/picon/"
elif pathExists("/media/cf/picon/"):
return "/media/cf/picon/"
elif pathExists("/media/hdd/picon/"):
return "/media/hdd/picon/"
elif pathExists("/usr/share/enigma2/picon/"):
return "/usr/share/enigma2/picon/"
elif pathExists("/picon/"):
return "/picon/"
else:
return ""
def getInfo(session = None):
# TODO: get webif versione somewhere!
info = {}
info['brand'] = getMachineBrand()
info['model'] = getMachineName()
info['boxtype'] = getBoxType()
info['machinebuild'] = getMachineBuild()
chipset = "unknown"
if fileExists("/etc/.box"):
f = open("/etc/.box",'r')
model = f.readline().strip().lower()
f.close()
if model.startswith("ufs") or model.startswith("ufc"):
if model in ("ufs910", "ufs922", "ufc960"):
chipset = "SH4 @266MHz"
else:
chipset = "SH4 @450MHz"
elif model in ("topf", "tf7700hdpvr"):
chipset = "SH4 @266MHz"
elif model.startswith("azbox"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "SH4 @540MHz"
else:
chipset = "SH4 @450MHz"
elif fileExists("/proc/stb/info/azmodel"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif fileExists("/proc/stb/info/model"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "tf7700hdpvr":
chipset = "SH4 @266MHz"
elif model == "nbox":
chipset = "STi7100 @266MHz"
elif model == "arivalink200":
chipset = "STi7109 @266MHz"
elif model in ("adb2850", "adb2849", "dsi87"):
chipset = "STi7111 @450MHz"
elif model in ("sagemcom88", "esi88"):
chipset = "STi7105 @450MHz"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "STi7162 @540MHz"
else:
chipset = "STi7111 @450MHz"
if fileExists("/proc/stb/info/chipset"):
f = open("/proc/stb/info/chipset",'r')
chipset = f.readline().strip()
f.close()
info['chipset'] = chipset
memFree = 0
for line in open("/proc/meminfo",'r'):
parts = line.split(':')
key = parts[0].strip()
if key == "MemTotal":
info['mem1'] = parts[1].strip().replace("kB", _("kB"))
elif key in ("MemFree", "Buffers", "Cached"):
memFree += int(parts[1].strip().split(' ',1)[0])
info['mem2'] = "%s %s" % (memFree,_("kB"))
info['mem3'] = _("%s free / %s total") % (info['mem2'],info['mem1'])
try:
f = open("/proc/uptime", "rb")
uptime = int(float(f.readline().split(' ', 2)[0].strip()))
f.close()
uptimetext = ''
if uptime > 86400:
d = uptime/86400
uptime = uptime % 86400
uptimetext += '%dd ' % d
uptimetext += "%d:%.2d" % (uptime/3600, (uptime%3600)/60)
except:
uptimetext = "?"
info['uptime'] = uptimetext
info["webifver"] = getOpenWebifVer()
info['imagedistro'] = getImageDistro()
info['friendlyimagedistro'] = getFriendlyImageDistro()
info['oever'] = getOEVersion()
info['imagever'] = getImageVersion() + '.' + getImageBuild()
info['enigmaver'] = getEnigmaVersionString()
info['driverdate'] = getDriverDate()
info['kernelver'] = about.getKernelVersionString()
try:
from Tools.StbHardware import getFPVersion
except ImportError:
from Tools.DreamboxHardware import getFPVersion
try:
info['fp_version'] = getFPVersion()
except:
info['fp_version'] = None
friendlychipsetdescription = _("Chipset")
friendlychipsettext = info['chipset'].replace("bcm","Broadcom ")
if friendlychipsettext in ("7335", "7356", "7362", "73625", "7424", "7425", "7429"):
friendlychipsettext = "Broadcom " + friendlychipsettext
if not (info['fp_version'] is None or info['fp_version'] == 0):
friendlychipsetdescription = friendlychipsetdescription + " (" + _("Frontprocessor Version") + ")"
friendlychipsettext = friendlychipsettext + " (" + str(info['fp_version']) + ")"
info['friendlychipsetdescription'] = friendlychipsetdescription
info['friendlychipsettext'] = friendlychipsettext
info['tuners'] = []
for i in range(0, nimmanager.getSlotCount()):
info['tuners'].append({
"name": nimmanager.getNim(i).getSlotName(),
"type": nimmanager.getNimName(i) + " (" + nimmanager.getNim(i).getFriendlyType() + ")",
"rec": "",
"live": ""
})
info['ifaces'] = []
ifaces = iNetwork.getConfiguredAdapters()
for iface in ifaces:
info['ifaces'].append({
"name": iNetwork.getAdapterName(iface),
"friendlynic": getFriendlyNICChipSet(iface),
"linkspeed": getLinkSpeed(iface),
"mac": iNetwork.getAdapterAttribute(iface, "mac"),
"dhcp": iNetwork.getAdapterAttribute(iface, "dhcp"),
"ipv4method": getIPv4Method(iface),
"ip": formatIp(iNetwork.getAdapterAttribute(iface, "ip")),
"mask": formatIp(iNetwork.getAdapterAttribute(iface, "netmask")),
"v4prefix": sum([bin(int(x)).count('1') for x in formatIp(iNetwork.getAdapterAttribute(iface, "netmask")).split('.')]),
"gw": formatIp(iNetwork.getAdapterAttribute(iface, "gateway")),
"ipv6": getAdapterIPv6(iface)['addr'],
"ipmethod": getIPMethod(iface),
"firstpublic": getAdapterIPv6(iface)['firstpublic']
})
info['hdd'] = []
for hdd in harddiskmanager.hdd:
dev = hdd.findMount()
if dev:
stat = os.statvfs(dev)
free = int((stat.f_bfree/1024) * (stat.f_bsize/1024))
else:
free = -1
if free <= 1024:
free = "%i %s" % (free,_("MB"))
else:
free = free / 1024.
free = "%.1f %s" % (free,_("GB"))
size = hdd.diskSize() * 1000000 / 1048576.
if size > 1048576:
size = "%.1f %s" % ((size / 1048576.),_("TB"))
elif size > 1024:
size = "%.1f %s" % ((size / 1024.),_("GB"))
else:
size = "%d %s" % (size,_("MB"))
iecsize = hdd.diskSize()
# Harddisks > 1000 decimal Gigabytes are labelled in TB
if iecsize > 1000000:
iecsize = (iecsize + 50000) // float(100000) / 10
# Omit decimal fraction if it is 0
if (iecsize % 1 > 0):
iecsize = "%.1f %s" % (iecsize,_("TB"))
else:
iecsize = "%d %s" % (iecsize,_("TB"))
# Round harddisk sizes beyond ~300GB to full tens: 320, 500, 640, 750GB
elif iecsize > 300000:
iecsize = "%d %s" % (((iecsize + 5000) // 10000 * 10),_("GB"))
# ... be more precise for media < ~300GB (Sticks, SSDs, CF, MMC, ...): 1, 2, 4, 8, 16 ... 256GB
elif iecsize > 1000:
iecsize = "%d %s" % (((iecsize + 500) // 1000),_("GB"))
else:
iecsize = "%d %s" % (iecsize,_("MB"))
info['hdd'].append({
"model": hdd.model(),
"capacity": size,
"labelled_capacity": iecsize,
"free": free,
"mount": dev,
"friendlycapacity": _("%s free / %s total") % (free,size+' ("'+iecsize+'")')
})
info['shares'] = []
if fileExists('/etc/auto.network'):
autofs = '/etc/auto.network'
method = "autofs"
for line in file(autofs).readlines():
if not line.startswith('#'):
# Replace escaped spaces that can appear inside credentials with underscores
# Not elegant but we wouldn't want to expose credentials on the OWIF anyways
tmpline = line.replace("\ ","_")
tmp = tmpline.split()
if not len(tmp) == 3:
continue
name = tmp[0].strip()
type = "unknown"
if "cifs" in tmp[1]:
# Linux still defaults to SMBv1
type = "SMBv1.0"
settings = tmp[1].split(",")
for setting in settings:
if setting.startswith("vers="):
type = setting.replace("vers=", "SMBv")
elif "nfs" in tmp[1]:
type = "NFS"
# Default is r/w
mode = _("r/w")
settings = tmp[1].split(",")
for setting in settings:
if setting == "ro":
mode = _("r/o")
uri = tmp[2]
parts = []
parts = tmp[2].split(':')
if parts[0] is "":
server = uri.split('/')[2]
uri = uri.strip()[1:]
else:
server = parts[0]
ipaddress = None
if server:
# Will fail on literal IPs
try:
# Try IPv6 first, as will Linux
if has_ipv6:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET6)
if tmpaddress:
ipaddress = "[" + list(tmpaddress)[0][4][0] + "]"
# Use IPv4 if IPv6 fails or is not present
if ipaddress is None:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET)
if tmpaddress:
ipaddress = list(tmpaddress)[0][4][0]
except:
pass
friendlyaddress = server
if ipaddress is not None and not ipaddress == server:
friendlyaddress = server + " ("+ ipaddress + ")"
info['shares'].append({
"name": name,
"method": method,
"type": type,
"mode": mode,
"path": uri,
"host": server,
"ipaddress": ipaddress,
"friendlyaddress": friendlyaddress
})
# TODO: fstab
info['transcoding'] = False
if (info['model'] in ("Solo4K", "Solo²", "Duo²", "Solo SE", "Quad", "Quad Plus") or info['machinebuild'] in ('inihdp', 'hd2400', 'et10000', 'xpeedlx3', 'ew7356', 'dags3', 'dags4')):
if os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/TransCodingSetup/plugin.pyo')) or os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/TranscodingSetup/plugin.pyo')) or os.path.exists(eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/MultiTransCodingSetup/plugin.pyo')):
info['transcoding'] = True
info['kinopoisk'] = False
lang = ['ru', 'uk', 'lv', 'lt', 'et']
for l in lang:
if l in language.getLanguage():
info['kinopoisk'] = True
info['EX'] = ''
if session:
try:
recs = NavigationInstance.instance.getRecordings()
if recs:
# only one stream and only TV
from Plugins.Extensions.OpenWebif.controllers.stream import streamList
s_name = ''
s_cip = ''
if len(streamList)==1:
from Screens.ChannelSelection import service_types_tv
from enigma import eEPGCache
epgcache = eEPGCache.getInstance()
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(eServiceReference('%s ORDER BY name'%(service_types_tv)))
channels = services and services.getContent("SN", True)
s = streamList[0]
srefs = s.ref.toString()
for channel in channels:
if srefs == channel[0]:
s_name = channel[1] + ' (' + s.clientIP + ')'
break
sname = ''
timers = []
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers.append(timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', ''))
# only one recording
if len(timers) == 1:
sname = timers[0]
if sname == '' and s_name != '':
sname = s_name
for rec in recs:
feinfo = rec.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
cur_info = feinfo.getTransponderData(True)
nr = frontendData['tuner_number']
info['tuners'][nr]['rec'] = getOrbitalText(cur_info) + ' / ' + sname
service = session.nav.getCurrentService()
if service is not None:
sname = service.info().getName()
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
cur_info = feinfo.getTransponderData(True)
if cur_info:
nr = frontendData['tuner_number']
info['tuners'][nr]['live'] = getOrbitalText(cur_info) + ' / ' + sname
except Exception, error:
info['EX'] = error
global STATICBOXINFO
STATICBOXINFO = info
return info
def getOrbitalText(cur_info):
if cur_info:
tunerType = cur_info.get('tuner_type')
if tunerType == "DVB-S":
pos = int(cur_info.get('orbital_position'))
direction = 'E'
if pos > 1800:
pos = 3600 - pos
direction = 'W'
return "%d.%d° %s" % (pos/10, pos%10, direction)
return tunerType
return ''
def get | ssion):
inf = {}
inf['tunertype'] = ""
inf['tunernumber'] = ""
inf['snr'] = ""
inf['snr_db'] = ""
inf['agc'] = ""
inf['ber'] = ""
service = session.nav.getCurrentService()
if service is None:
return inf
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
if frontendData is not None:
inf['tunertype'] = frontendData.get("tuner_type", "UNKNOWN")
inf['tunernumber'] = frontendData.get("tuner_number")
frontendStatus = feinfo and feinfo.getFrontendStatus()
if frontendStatus is not None:
percent = frontendStatus.get("tuner_signal_quality")
if percent is not None:
inf['snr'] = int(percent * 100 / 65536)
inf['snr_db'] = inf['snr']
percent = frontendStatus.get("tuner_signal_quality_db")
if percent is not None:
inf['snr_db'] = "%3.02f" % (percent / 100.0)
percent = frontendStatus.get("tuner_signal_power")
if percent is not None:
inf['agc'] = int(percent * 100 / 65536)
percent = frontendStatus.get("tuner_bit_error_rate")
if percent is not None:
inf['ber'] = int(percent * 100 / 65536)
return inf
def getCurrentTime():
t = time.localtime()
return {
"status": True,
"time": "%2d:%02d:%02d" % (t.tm_hour, t.tm_min, t.tm_sec)
}
def getTranscodingSupport():
global STATICBOXINFO
if STATICBOXINFO is None:
getInfo()
return STATICBOXINFO['transcoding']
def getLanguage():
global STATICBOXINFO
if STATICBOXINFO is None:
getInfo()
return STATICBOXINFO['kinopoisk']
def getStatusInfo(self):
statusinfo = {}
# Get Current Volume and Mute Status
vcontrol = eDVBVolumecontrol.getInstance()
statusinfo['volume'] = vcontrol.getVolume()
statusinfo['muted'] = vcontrol.isMuted()
statusinfo['transcoding'] = getTranscodingSupport()
# Get currently running Service
event = None
serviceref = self.session.nav.getCurrentlyPlayingServiceReference()
if serviceref is not None:
serviceHandler = eServiceCenter.getInstance()
serviceHandlerInfo = serviceHandler.info(serviceref)
service = self.session.nav.getCurrentService()
serviceinfo = service and service.info()
event = serviceinfo and serviceinfo.getEvent(0)
else:
event = None
statusinfo['currservice_filename'] = ""
if event is not None:
curEvent = parseEvent(event)
statusinfo['currservice_name'] = curEvent[2].replace('\xc2\x86', '').replace('\xc2\x87', '')
statusinfo['currservice_serviceref'] = serviceref.toString()
statusinfo['currservice_begin'] = strftime("%H:%M", (localtime(int(curEvent[0])+(config.recording.margin_before.value*60))))
statusinfo['currservice_end'] = strftime("%H:%M", (localtime(int(curEvent[1])-(config.recording.margin_after.value*60))))
statusinfo['currservice_description'] = curEvent[3]
if len(curEvent[3].decode('utf-8')) > 220:
statusinfo['currservice_description'] = curEvent[3].decode('utf-8')[0:220].encode('utf-8') + "..."
statusinfo['currservice_station'] = serviceHandlerInfo.getName(serviceref).replace('\xc2\x86', '').replace('\xc2\x87', '')
if statusinfo['currservice_serviceref'].startswith('1:0:0'):
statusinfo['currservice_filename'] = '/' + '/'.join(serviceref.toString().split("/")[1:])
full_desc = statusinfo['currservice_name'] + '\n'
full_desc += statusinfo['currservice_begin'] + " - " + statusinfo['currservice_end'] + '\n\n'
full_desc += event.getExtendedDescription().replace('\xc2\x86', '').replace('\xc2\x87', '').replace('\xc2\x8a', '\n')
statusinfo['currservice_fulldescription'] = full_desc
else:
statusinfo['currservice_name'] = "N/A"
statusinfo['currservice_begin'] = ""
statusinfo['currservice_end'] = ""
statusinfo['currservice_description'] = ""
statusinfo['currservice_fulldescription'] = "N/A"
if serviceref:
statusinfo['currservice_serviceref'] = serviceref.toString()
if serviceHandlerInfo:
statusinfo['currservice_station'] = serviceHandlerInfo.getName(serviceref).replace('\xc2\x86', '').replace('\xc2\x87', '')
elif serviceref.toString().find("http") != -1:
statusinfo['currservice_station'] = serviceref.toString().replace('%3a', ':')[serviceref.toString().find("http"):]
else:
statusinfo['currservice_station'] = "N/A"
# Get Standby State
from Screens.Standby import inStandby
if inStandby == None:
statusinfo['inStandby'] = "false"
else:
statusinfo['inStandby'] = "true"
# Get recording state
recs = NavigationInstance.instance.getRecordings()
if recs:
statusinfo['isRecording'] = "true"
statusinfo['Recording_list'] = "\n"
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.state == TimerEntry.StateRunning:
if not timer.justplay:
statusinfo['Recording_list'] += timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '') + ": " + timer.name + "\n"
else:
statusinfo['isRecording'] = "false"
return statusinfo
def getAlternativeChannels(service):
alternativeServices = eServiceCenter.getInstance().list(eServiceReference(service))
return alternativeServices and alternativeServices.getContent("S", True)
def GetWithAlternative(service,onlyFirst = True):
if service.startswith('1:134:'):
channels = getAlternativeChannels(service)
if channels:
if onlyFirst:
return channels[0]
else:
return channels
if onlyFirst:
return service
else:
return None
| FrontendStatus(se | identifier_name |
step_twisted2.py |
from buildbot.status import tests
from buildbot.process.step import SUCCESS, FAILURE, BuildStep
from buildbot.process.step_twisted import RunUnitTests
from zope.interface import implements
from twisted.python import log, failure
from twisted.spread import jelly
from twisted.pb.tokens import BananaError
from twisted.web.html import PRE
from twisted.web.error import NoResource
class Null: pass
ResultTypes = Null()
ResultTypeNames = ["SKIP",
"EXPECTED_FAILURE", "FAILURE", "ERROR",
"UNEXPECTED_SUCCESS", "SUCCESS"]
try:
from twisted.trial import reporter # introduced in Twisted-1.0.5
# extract the individual result types
for name in ResultTypeNames:
|
except ImportError:
from twisted.trial import unittest # Twisted-1.0.4 has them here
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(unittest, name))
log._keepErrors = 0
from twisted.trial import remote # for trial/jelly parsing
import StringIO
class OneJellyTest(tests.OneTest):
def html(self, request):
tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
t = request.postpath[0] # one of 'short', 'long' #, or 'html'
if isinstance(self.results, failure.Failure):
# it would be nice to remove unittest functions from the
# traceback like unittest.format_exception() does.
if t == 'short':
s = StringIO.StringIO()
self.results.printTraceback(s)
return pptpl % PRE(s.getvalue())
elif t == 'long':
s = StringIO.StringIO()
self.results.printDetailedTraceback(s)
return pptpl % PRE(s.getvalue())
#elif t == 'html':
# return tpl % formatFailure(self.results)
# ACK! source lines aren't stored in the Failure, rather,
# formatFailure pulls them (by filename) from the local
# disk. Feh. Even printTraceback() won't work. Double feh.
return NoResource("No such mode '%s'" % t)
if self.results == None:
return tpl % "No results to show: test probably passed."
# maybe results are plain text?
return pptpl % PRE(self.results)
class TwistedJellyTestResults(tests.TestResults):
oneTestClass = OneJellyTest
def describeOneTest(self, testname):
return "%s: %s\n" % (testname, self.tests[testname][0])
class RunUnitTestsJelly(RunUnitTests):
"""I run the unit tests with the --jelly option, which generates
machine-parseable results as the tests are run.
"""
trialMode = "--jelly"
implements(remote.IRemoteReporter)
ourtypes = { ResultTypes.SKIP: tests.SKIP,
ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
ResultTypes.FAILURE: tests.FAILURE,
ResultTypes.ERROR: tests.ERROR,
ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
ResultTypes.SUCCESS: tests.SUCCESS,
}
def __getstate__(self):
#d = RunUnitTests.__getstate__(self)
d = self.__dict__.copy()
# Banana subclasses are Ephemeral
if d.has_key("decoder"):
del d['decoder']
return d
def start(self):
self.decoder = remote.DecodeReport(self)
# don't accept anything unpleasant from the (untrusted) build slave
# The jellied stream may have Failures, but everything inside should
# be a string
security = jelly.SecurityOptions()
security.allowBasicTypes()
security.allowInstancesOf(failure.Failure)
self.decoder.taster = security
self.results = TwistedJellyTestResults()
RunUnitTests.start(self)
def logProgress(self, progress):
# XXX: track number of tests
BuildStep.logProgress(self, progress)
def addStdout(self, data):
if not self.decoder:
return
try:
self.decoder.dataReceived(data)
except BananaError:
self.decoder = None
log.msg("trial --jelly output unparseable, traceback follows")
log.deferr()
def remote_start(self, expectedTests, times=None):
print "remote_start", expectedTests
def remote_reportImportError(self, name, aFailure, times=None):
pass
def remote_reportStart(self, testClass, method, times=None):
print "reportStart", testClass, method
def remote_reportResults(self, testClass, method, resultType, results,
times=None):
print "reportResults", testClass, method, resultType
which = testClass + "." + method
self.results.addTest(which,
self.ourtypes.get(resultType, tests.UNKNOWN),
results)
def finished(self, rc):
# give self.results to our Build object
self.build.testsFinished(self.results)
total = self.results.countTests()
count = self.results.countFailures()
result = SUCCESS
if total == None:
result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
if count:
result = (FAILURE, ["%d tes%s%s" % (count,
(count == 1 and 't' or 'ts'),
self.rtext(' (%s)'))])
return self.stepComplete(result)
def finishStatus(self, result):
total = self.results.countTests()
count = self.results.countFailures()
color = "green"
text = []
if count == 0:
text.extend(["%d %s" % \
(total,
total == 1 and "test" or "tests"),
"passed"])
else:
text.append("tests")
text.append("%d %s" % \
(count,
count == 1 and "failure" or "failures"))
color = "red"
self.updateCurrentActivity(color=color, text=text)
self.addFileToCurrentActivity("tests", self.results)
#self.finishStatusSummary()
self.finishCurrentActivity()
| setattr(ResultTypes, name, getattr(reporter, name)) | conditional_block |
step_twisted2.py |
from buildbot.status import tests
from buildbot.process.step import SUCCESS, FAILURE, BuildStep
from buildbot.process.step_twisted import RunUnitTests
from zope.interface import implements
from twisted.python import log, failure
from twisted.spread import jelly
from twisted.pb.tokens import BananaError
from twisted.web.html import PRE
from twisted.web.error import NoResource
class Null: pass
ResultTypes = Null()
ResultTypeNames = ["SKIP",
"EXPECTED_FAILURE", "FAILURE", "ERROR",
"UNEXPECTED_SUCCESS", "SUCCESS"]
try:
from twisted.trial import reporter # introduced in Twisted-1.0.5
# extract the individual result types
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(reporter, name))
except ImportError:
from twisted.trial import unittest # Twisted-1.0.4 has them here
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(unittest, name))
log._keepErrors = 0
from twisted.trial import remote # for trial/jelly parsing
import StringIO
class OneJellyTest(tests.OneTest):
def html(self, request):
tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
t = request.postpath[0] # one of 'short', 'long' #, or 'html'
if isinstance(self.results, failure.Failure):
# it would be nice to remove unittest functions from the
# traceback like unittest.format_exception() does.
if t == 'short':
s = StringIO.StringIO()
self.results.printTraceback(s)
return pptpl % PRE(s.getvalue())
elif t == 'long':
s = StringIO.StringIO()
self.results.printDetailedTraceback(s)
return pptpl % PRE(s.getvalue())
#elif t == 'html':
# return tpl % formatFailure(self.results)
# ACK! source lines aren't stored in the Failure, rather,
# formatFailure pulls them (by filename) from the local
# disk. Feh. Even printTraceback() won't work. Double feh.
return NoResource("No such mode '%s'" % t)
if self.results == None:
return tpl % "No results to show: test probably passed."
# maybe results are plain text?
return pptpl % PRE(self.results)
class TwistedJellyTestResults(tests.TestResults):
oneTestClass = OneJellyTest
def describeOneTest(self, testname):
return "%s: %s\n" % (testname, self.tests[testname][0])
class RunUnitTestsJelly(RunUnitTests):
"""I run the unit tests with the --jelly option, which generates
machine-parseable results as the tests are run.
"""
trialMode = "--jelly"
implements(remote.IRemoteReporter)
ourtypes = { ResultTypes.SKIP: tests.SKIP,
ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
ResultTypes.FAILURE: tests.FAILURE,
ResultTypes.ERROR: tests.ERROR,
ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
ResultTypes.SUCCESS: tests.SUCCESS,
}
def __getstate__(self):
#d = RunUnitTests.__getstate__(self)
d = self.__dict__.copy()
# Banana subclasses are Ephemeral
if d.has_key("decoder"):
del d['decoder']
return d
def start(self):
self.decoder = remote.DecodeReport(self)
# don't accept anything unpleasant from the (untrusted) build slave
# The jellied stream may have Failures, but everything inside should
# be a string
security = jelly.SecurityOptions()
security.allowBasicTypes()
security.allowInstancesOf(failure.Failure)
self.decoder.taster = security
self.results = TwistedJellyTestResults()
RunUnitTests.start(self)
def logProgress(self, progress):
# XXX: track number of tests
|
def addStdout(self, data):
if not self.decoder:
return
try:
self.decoder.dataReceived(data)
except BananaError:
self.decoder = None
log.msg("trial --jelly output unparseable, traceback follows")
log.deferr()
def remote_start(self, expectedTests, times=None):
print "remote_start", expectedTests
def remote_reportImportError(self, name, aFailure, times=None):
pass
def remote_reportStart(self, testClass, method, times=None):
print "reportStart", testClass, method
def remote_reportResults(self, testClass, method, resultType, results,
times=None):
print "reportResults", testClass, method, resultType
which = testClass + "." + method
self.results.addTest(which,
self.ourtypes.get(resultType, tests.UNKNOWN),
results)
def finished(self, rc):
# give self.results to our Build object
self.build.testsFinished(self.results)
total = self.results.countTests()
count = self.results.countFailures()
result = SUCCESS
if total == None:
result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
if count:
result = (FAILURE, ["%d tes%s%s" % (count,
(count == 1 and 't' or 'ts'),
self.rtext(' (%s)'))])
return self.stepComplete(result)
def finishStatus(self, result):
total = self.results.countTests()
count = self.results.countFailures()
color = "green"
text = []
if count == 0:
text.extend(["%d %s" % \
(total,
total == 1 and "test" or "tests"),
"passed"])
else:
text.append("tests")
text.append("%d %s" % \
(count,
count == 1 and "failure" or "failures"))
color = "red"
self.updateCurrentActivity(color=color, text=text)
self.addFileToCurrentActivity("tests", self.results)
#self.finishStatusSummary()
self.finishCurrentActivity()
| BuildStep.logProgress(self, progress) | identifier_body |
step_twisted2.py |
from buildbot.status import tests
from buildbot.process.step import SUCCESS, FAILURE, BuildStep
from buildbot.process.step_twisted import RunUnitTests
from zope.interface import implements
from twisted.python import log, failure
from twisted.spread import jelly
from twisted.pb.tokens import BananaError
from twisted.web.html import PRE
from twisted.web.error import NoResource
class Null: pass
ResultTypes = Null()
ResultTypeNames = ["SKIP",
"EXPECTED_FAILURE", "FAILURE", "ERROR",
"UNEXPECTED_SUCCESS", "SUCCESS"]
try:
from twisted.trial import reporter # introduced in Twisted-1.0.5
# extract the individual result types
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(reporter, name))
except ImportError:
from twisted.trial import unittest # Twisted-1.0.4 has them here
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(unittest, name))
log._keepErrors = 0
from twisted.trial import remote # for trial/jelly parsing
import StringIO
class OneJellyTest(tests.OneTest):
def html(self, request):
tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
t = request.postpath[0] # one of 'short', 'long' #, or 'html'
if isinstance(self.results, failure.Failure):
# it would be nice to remove unittest functions from the
# traceback like unittest.format_exception() does.
if t == 'short':
s = StringIO.StringIO()
self.results.printTraceback(s)
return pptpl % PRE(s.getvalue())
elif t == 'long':
s = StringIO.StringIO()
self.results.printDetailedTraceback(s)
return pptpl % PRE(s.getvalue())
#elif t == 'html':
# return tpl % formatFailure(self.results)
# ACK! source lines aren't stored in the Failure, rather,
# formatFailure pulls them (by filename) from the local
# disk. Feh. Even printTraceback() won't work. Double feh.
return NoResource("No such mode '%s'" % t)
if self.results == None:
return tpl % "No results to show: test probably passed."
# maybe results are plain text?
return pptpl % PRE(self.results)
class TwistedJellyTestResults(tests.TestResults):
oneTestClass = OneJellyTest
def describeOneTest(self, testname):
return "%s: %s\n" % (testname, self.tests[testname][0])
class RunUnitTestsJelly(RunUnitTests):
"""I run the unit tests with the --jelly option, which generates
machine-parseable results as the tests are run.
"""
trialMode = "--jelly"
implements(remote.IRemoteReporter)
ourtypes = { ResultTypes.SKIP: tests.SKIP,
ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
ResultTypes.FAILURE: tests.FAILURE,
ResultTypes.ERROR: tests.ERROR,
ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
ResultTypes.SUCCESS: tests.SUCCESS,
}
def __getstate__(self):
#d = RunUnitTests.__getstate__(self)
d = self.__dict__.copy()
# Banana subclasses are Ephemeral
if d.has_key("decoder"):
del d['decoder']
return d
def start(self):
self.decoder = remote.DecodeReport(self)
# don't accept anything unpleasant from the (untrusted) build slave
# The jellied stream may have Failures, but everything inside should
# be a string
security = jelly.SecurityOptions()
security.allowBasicTypes()
security.allowInstancesOf(failure.Failure)
self.decoder.taster = security
self.results = TwistedJellyTestResults()
RunUnitTests.start(self)
def | (self, progress):
# XXX: track number of tests
BuildStep.logProgress(self, progress)
def addStdout(self, data):
if not self.decoder:
return
try:
self.decoder.dataReceived(data)
except BananaError:
self.decoder = None
log.msg("trial --jelly output unparseable, traceback follows")
log.deferr()
def remote_start(self, expectedTests, times=None):
print "remote_start", expectedTests
def remote_reportImportError(self, name, aFailure, times=None):
pass
def remote_reportStart(self, testClass, method, times=None):
print "reportStart", testClass, method
def remote_reportResults(self, testClass, method, resultType, results,
times=None):
print "reportResults", testClass, method, resultType
which = testClass + "." + method
self.results.addTest(which,
self.ourtypes.get(resultType, tests.UNKNOWN),
results)
def finished(self, rc):
# give self.results to our Build object
self.build.testsFinished(self.results)
total = self.results.countTests()
count = self.results.countFailures()
result = SUCCESS
if total == None:
result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
if count:
result = (FAILURE, ["%d tes%s%s" % (count,
(count == 1 and 't' or 'ts'),
self.rtext(' (%s)'))])
return self.stepComplete(result)
def finishStatus(self, result):
total = self.results.countTests()
count = self.results.countFailures()
color = "green"
text = []
if count == 0:
text.extend(["%d %s" % \
(total,
total == 1 and "test" or "tests"),
"passed"])
else:
text.append("tests")
text.append("%d %s" % \
(count,
count == 1 and "failure" or "failures"))
color = "red"
self.updateCurrentActivity(color=color, text=text)
self.addFileToCurrentActivity("tests", self.results)
#self.finishStatusSummary()
self.finishCurrentActivity()
| logProgress | identifier_name |
step_twisted2.py | from buildbot.status import tests
from buildbot.process.step import SUCCESS, FAILURE, BuildStep
from buildbot.process.step_twisted import RunUnitTests
from zope.interface import implements
from twisted.python import log, failure
from twisted.spread import jelly
from twisted.pb.tokens import BananaError
from twisted.web.html import PRE
from twisted.web.error import NoResource
class Null: pass
ResultTypes = Null()
ResultTypeNames = ["SKIP",
"EXPECTED_FAILURE", "FAILURE", "ERROR",
"UNEXPECTED_SUCCESS", "SUCCESS"]
try:
from twisted.trial import reporter # introduced in Twisted-1.0.5
# extract the individual result types
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(reporter, name))
except ImportError:
from twisted.trial import unittest # Twisted-1.0.4 has them here
for name in ResultTypeNames:
setattr(ResultTypes, name, getattr(unittest, name))
log._keepErrors = 0
from twisted.trial import remote # for trial/jelly parsing
import StringIO
class OneJellyTest(tests.OneTest):
def html(self, request):
tpl = "<HTML><BODY>\n\n%s\n\n</body></html>\n"
pptpl = "<HTML><BODY>\n\n<pre>%s</pre>\n\n</body></html>\n"
t = request.postpath[0] # one of 'short', 'long' #, or 'html'
if isinstance(self.results, failure.Failure):
# it would be nice to remove unittest functions from the
# traceback like unittest.format_exception() does.
if t == 'short':
s = StringIO.StringIO()
self.results.printTraceback(s)
return pptpl % PRE(s.getvalue())
elif t == 'long':
s = StringIO.StringIO()
self.results.printDetailedTraceback(s)
return pptpl % PRE(s.getvalue())
#elif t == 'html':
# return tpl % formatFailure(self.results)
# ACK! source lines aren't stored in the Failure, rather,
# formatFailure pulls them (by filename) from the local
# disk. Feh. Even printTraceback() won't work. Double feh.
return NoResource("No such mode '%s'" % t)
if self.results == None:
return tpl % "No results to show: test probably passed."
# maybe results are plain text?
return pptpl % PRE(self.results)
class TwistedJellyTestResults(tests.TestResults):
oneTestClass = OneJellyTest
def describeOneTest(self, testname):
return "%s: %s\n" % (testname, self.tests[testname][0])
class RunUnitTestsJelly(RunUnitTests):
"""I run the unit tests with the --jelly option, which generates
machine-parseable results as the tests are run.
"""
trialMode = "--jelly"
implements(remote.IRemoteReporter)
ourtypes = { ResultTypes.SKIP: tests.SKIP,
ResultTypes.EXPECTED_FAILURE: tests.EXPECTED_FAILURE,
ResultTypes.FAILURE: tests.FAILURE,
ResultTypes.ERROR: tests.ERROR,
ResultTypes.UNEXPECTED_SUCCESS: tests.UNEXPECTED_SUCCESS,
ResultTypes.SUCCESS: tests.SUCCESS,
}
def __getstate__(self):
#d = RunUnitTests.__getstate__(self)
d = self.__dict__.copy()
# Banana subclasses are Ephemeral
if d.has_key("decoder"):
del d['decoder']
return d
def start(self):
self.decoder = remote.DecodeReport(self)
# don't accept anything unpleasant from the (untrusted) build slave
# The jellied stream may have Failures, but everything inside should
# be a string
security = jelly.SecurityOptions()
security.allowBasicTypes()
security.allowInstancesOf(failure.Failure)
self.decoder.taster = security
self.results = TwistedJellyTestResults()
RunUnitTests.start(self)
def logProgress(self, progress):
# XXX: track number of tests
BuildStep.logProgress(self, progress)
def addStdout(self, data):
if not self.decoder:
return
try:
self.decoder.dataReceived(data)
except BananaError:
self.decoder = None
log.msg("trial --jelly output unparseable, traceback follows")
log.deferr()
def remote_start(self, expectedTests, times=None):
print "remote_start", expectedTests
def remote_reportImportError(self, name, aFailure, times=None):
pass
def remote_reportStart(self, testClass, method, times=None):
print "reportStart", testClass, method
def remote_reportResults(self, testClass, method, resultType, results,
times=None):
print "reportResults", testClass, method, resultType
which = testClass + "." + method
self.results.addTest(which,
self.ourtypes.get(resultType, tests.UNKNOWN),
results)
def finished(self, rc):
# give self.results to our Build object
self.build.testsFinished(self.results) | if total == None:
result = (FAILURE, ['tests%s' % self.rtext(' (%s)')])
if count:
result = (FAILURE, ["%d tes%s%s" % (count,
(count == 1 and 't' or 'ts'),
self.rtext(' (%s)'))])
return self.stepComplete(result)
def finishStatus(self, result):
total = self.results.countTests()
count = self.results.countFailures()
color = "green"
text = []
if count == 0:
text.extend(["%d %s" % \
(total,
total == 1 and "test" or "tests"),
"passed"])
else:
text.append("tests")
text.append("%d %s" % \
(count,
count == 1 and "failure" or "failures"))
color = "red"
self.updateCurrentActivity(color=color, text=text)
self.addFileToCurrentActivity("tests", self.results)
#self.finishStatusSummary()
self.finishCurrentActivity() | total = self.results.countTests()
count = self.results.countFailures()
result = SUCCESS | random_line_split |
populate_mini_ws.py | from biokbase.workspace.client import Workspace
import requests
import json
import sys
from time import time
from fix_workspace_info import fix_all_workspace_info
from pprint import pprint
kb_port = 9999
mini_ws_url = f"http://localhost:{kb_port}/services/ws"
mini_auth_url = f"http://localhost:{kb_port}/services/auth/testmode"
mini_ws_admin = "wsadmin"
narrative_spec_file = '../../../narrative_object.spec'
old_narrative_spec_file = 'old_narrative_object.spec'
test_narrative_data = 'narrative_test_data.json'
test_user = "kbasetest"
####
# BEFORE YOU RUN THIS:
# 1. Spin up mini_kb with the workspace env pointed to my branch:
# that is, the "-env" line in the ws command points to
# "https://raw.githubusercontent.com/briehl/mini_kb/master/deployment/conf/workspace-minikb.ini"
#
# 2. When this starts up, the workspace will complain. Auth is in testmode, and there's no test user/token set up
# for the Shock configuration. Do the following:
# a. enter the mongo container
# > docker exec -it mini_kb_ci-mongo_1 /bin/bash
# b. start mongo (just "mongo" at the prompt)
# c. Run the following to use gridFS:
# > use workspace
# > db.settings.findAndModify({ query: {backend: "shock"}, update: { $set: {"backend": "gridFS"} } })
# d. Exit that container, and restart the workspace container
# > docker-compose restart ws
#
# With the setup done, this script should do the job of creating accounts, importing the Narrative type,
# loading test data, etc.
def create_user(user_id):
"""
Returns a token for that user.
"""
headers = {
"Content-Type": "application/json"
}
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/user', headers=headers, data=json.dumps({'user': user_id, 'display': "User {}".format(user_id)}))
if r.status_code != 200 and r.status_code != 400:
print("Can't create dummy user!")
r.raise_for_status()
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/token', headers=headers, data=json.dumps({'user': user_id, 'type': 'Login'}))
if r.status_code != 200:
print("Can't make dummy token!")
r.raise_for_status()
token = json.loads(r.text)
return token['token']
def load_narrative_type(ws):
"""
Loads the KBaseNarrative.Narrative type info into mini kb.
ws = Workspace client configured for admin
"""
ws.request_module_ownership("KBaseNarrative")
ws.administer({
'command': 'approveModRequest',
'module': 'KBaseNarrative'
})
with open(old_narrative_spec_file, "r") as f:
old_spec = f.read()
ws.register_typespec({
'spec': old_spec,
'dryrun': 0,
'new_types': [
'Narrative',
'Cell',
'Worksheet',
'Metadata'
]
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
old_ver = n.split('-')[-1]
with open(narrative_spec_file, "r") as f:
spec = f.read()
ws.register_typespec({
'spec': spec,
'dryrun': 0,
'new_types': []
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
new_ver = n.split('-')[-1]
return {
'old_ver': old_ver,
'new_ver': new_ver
}
def load_narrative_test_data(ws, vers):
"""
Loads the test data set into mini kb ws.
Returns this structure:
wsid: {
narrative_id: int
correct_ws_meta: {}
correct_ws_perms: {}
}
there's more than 1 wsid (should be ~7-10), but that's it.
"""
with open(test_narrative_data, 'r') as f:
test_data = json.loads(f.read().strip())
uploaded_data = list()
for ws_data in test_data["old"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['old_ver']))
for ws_data in test_data["new"]:
|
return uploaded_data
def _load_workspace_data(ws, ws_data, idx, narrative_ver):
"""
Loads up a single workspace with data and returns a dict about it.
Dict contains:
id = the workspace id
perms = the workspace permissions
correct_meta = the correct workspace metadata (for validation)
"""
print(ws_data.keys())
narratives = ws_data['narratives']
ws_meta = ws_data['ws_meta']
ws_info = ws.create_workspace({"workspace": "NarrativeWS-{}-{}".format(idx, int(time()*1000))})
ws_id = ws_info[0]
info = {
"ws_id": ws_id,
"ws_info": ws_info,
"nar_info": [],
"perms": ws_data["perms"],
"correct_meta": ws_data["correct_meta"],
"loaded_meta": ws_meta
}
if len(narratives):
for idx, nar in enumerate(narratives):
objects = ws.save_objects({
'id': ws_id,
'objects': [{
'type': 'KBaseNarrative.Narrative-{}'.format(narrative_ver),
'data': nar,
'name': 'Narrative-{}'.format(idx)
}]
})
info['nar_info'].append(objects[0])
if len(ws_meta):
ws.alter_workspace_metadata({
'wsi': {'id': ws_id},
'new': ws_meta
})
perms = ws_data["perms"]
if len(perms) > 1:
admin_perm = perms['wsadmin']
ws.set_permissions({
'id': ws_id,
'new_permission': admin_perm,
'users': ['wsadmin']
})
return info
def main():
admin_token = create_user(mini_ws_admin)
admin_ws = Workspace(url=mini_ws_url, token=admin_token)
versions = load_narrative_type(admin_ws)
versions = {
'old_ver': '1.0',
'new_ver': '2.0'
}
user_token = create_user(test_user)
user_ws = Workspace(url=mini_ws_url, token=user_token)
loaded_info = load_narrative_test_data(user_ws, versions)
pprint(loaded_info)
# fix_all_workspace_info(mini_ws_url, mini_auth_url, admin_token, 100)
# for ws_data in loaded_info:
# ws_id = ws_data['ws_id']
# ws_meta = user_ws.get_workspace_info({'id': ws_id})[8]
# try:
# assert(ws_meta == ws_data['correct_meta'])
# except:
# print("WS: {}".format(ws_id))
# pprint(ws_meta)
# print("doesn't match")
# pprint(ws_data['correct_meta'])
if __name__ == '__main__':
sys.exit(main())
| uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['new_ver'])) | conditional_block |
populate_mini_ws.py | from biokbase.workspace.client import Workspace
import requests
import json
import sys
from time import time
from fix_workspace_info import fix_all_workspace_info
from pprint import pprint
kb_port = 9999
mini_ws_url = f"http://localhost:{kb_port}/services/ws"
mini_auth_url = f"http://localhost:{kb_port}/services/auth/testmode"
mini_ws_admin = "wsadmin"
narrative_spec_file = '../../../narrative_object.spec'
old_narrative_spec_file = 'old_narrative_object.spec'
test_narrative_data = 'narrative_test_data.json'
test_user = "kbasetest"
####
# BEFORE YOU RUN THIS:
# 1. Spin up mini_kb with the workspace env pointed to my branch:
# that is, the "-env" line in the ws command points to
# "https://raw.githubusercontent.com/briehl/mini_kb/master/deployment/conf/workspace-minikb.ini"
#
# 2. When this starts up, the workspace will complain. Auth is in testmode, and there's no test user/token set up
# for the Shock configuration. Do the following:
# a. enter the mongo container
# > docker exec -it mini_kb_ci-mongo_1 /bin/bash
# b. start mongo (just "mongo" at the prompt)
# c. Run the following to use gridFS:
# > use workspace
# > db.settings.findAndModify({ query: {backend: "shock"}, update: { $set: {"backend": "gridFS"} } })
# d. Exit that container, and restart the workspace container
# > docker-compose restart ws
#
# With the setup done, this script should do the job of creating accounts, importing the Narrative type,
# loading test data, etc.
def create_user(user_id):
"""
Returns a token for that user.
"""
headers = {
"Content-Type": "application/json"
}
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/user', headers=headers, data=json.dumps({'user': user_id, 'display': "User {}".format(user_id)}))
if r.status_code != 200 and r.status_code != 400:
print("Can't create dummy user!")
r.raise_for_status()
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/token', headers=headers, data=json.dumps({'user': user_id, 'type': 'Login'}))
if r.status_code != 200:
print("Can't make dummy token!")
r.raise_for_status()
token = json.loads(r.text)
return token['token']
def load_narrative_type(ws):
|
def load_narrative_test_data(ws, vers):
"""
Loads the test data set into mini kb ws.
Returns this structure:
wsid: {
narrative_id: int
correct_ws_meta: {}
correct_ws_perms: {}
}
there's more than 1 wsid (should be ~7-10), but that's it.
"""
with open(test_narrative_data, 'r') as f:
test_data = json.loads(f.read().strip())
uploaded_data = list()
for ws_data in test_data["old"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['old_ver']))
for ws_data in test_data["new"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['new_ver']))
return uploaded_data
def _load_workspace_data(ws, ws_data, idx, narrative_ver):
"""
Loads up a single workspace with data and returns a dict about it.
Dict contains:
id = the workspace id
perms = the workspace permissions
correct_meta = the correct workspace metadata (for validation)
"""
print(ws_data.keys())
narratives = ws_data['narratives']
ws_meta = ws_data['ws_meta']
ws_info = ws.create_workspace({"workspace": "NarrativeWS-{}-{}".format(idx, int(time()*1000))})
ws_id = ws_info[0]
info = {
"ws_id": ws_id,
"ws_info": ws_info,
"nar_info": [],
"perms": ws_data["perms"],
"correct_meta": ws_data["correct_meta"],
"loaded_meta": ws_meta
}
if len(narratives):
for idx, nar in enumerate(narratives):
objects = ws.save_objects({
'id': ws_id,
'objects': [{
'type': 'KBaseNarrative.Narrative-{}'.format(narrative_ver),
'data': nar,
'name': 'Narrative-{}'.format(idx)
}]
})
info['nar_info'].append(objects[0])
if len(ws_meta):
ws.alter_workspace_metadata({
'wsi': {'id': ws_id},
'new': ws_meta
})
perms = ws_data["perms"]
if len(perms) > 1:
admin_perm = perms['wsadmin']
ws.set_permissions({
'id': ws_id,
'new_permission': admin_perm,
'users': ['wsadmin']
})
return info
def main():
admin_token = create_user(mini_ws_admin)
admin_ws = Workspace(url=mini_ws_url, token=admin_token)
versions = load_narrative_type(admin_ws)
versions = {
'old_ver': '1.0',
'new_ver': '2.0'
}
user_token = create_user(test_user)
user_ws = Workspace(url=mini_ws_url, token=user_token)
loaded_info = load_narrative_test_data(user_ws, versions)
pprint(loaded_info)
# fix_all_workspace_info(mini_ws_url, mini_auth_url, admin_token, 100)
# for ws_data in loaded_info:
# ws_id = ws_data['ws_id']
# ws_meta = user_ws.get_workspace_info({'id': ws_id})[8]
# try:
# assert(ws_meta == ws_data['correct_meta'])
# except:
# print("WS: {}".format(ws_id))
# pprint(ws_meta)
# print("doesn't match")
# pprint(ws_data['correct_meta'])
if __name__ == '__main__':
sys.exit(main())
| """
Loads the KBaseNarrative.Narrative type info into mini kb.
ws = Workspace client configured for admin
"""
ws.request_module_ownership("KBaseNarrative")
ws.administer({
'command': 'approveModRequest',
'module': 'KBaseNarrative'
})
with open(old_narrative_spec_file, "r") as f:
old_spec = f.read()
ws.register_typespec({
'spec': old_spec,
'dryrun': 0,
'new_types': [
'Narrative',
'Cell',
'Worksheet',
'Metadata'
]
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
old_ver = n.split('-')[-1]
with open(narrative_spec_file, "r") as f:
spec = f.read()
ws.register_typespec({
'spec': spec,
'dryrun': 0,
'new_types': []
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
new_ver = n.split('-')[-1]
return {
'old_ver': old_ver,
'new_ver': new_ver
} | identifier_body |
populate_mini_ws.py | from biokbase.workspace.client import Workspace
import requests
import json
import sys
from time import time
from fix_workspace_info import fix_all_workspace_info
from pprint import pprint
kb_port = 9999
mini_ws_url = f"http://localhost:{kb_port}/services/ws"
mini_auth_url = f"http://localhost:{kb_port}/services/auth/testmode"
mini_ws_admin = "wsadmin"
narrative_spec_file = '../../../narrative_object.spec'
old_narrative_spec_file = 'old_narrative_object.spec'
test_narrative_data = 'narrative_test_data.json'
test_user = "kbasetest"
####
# BEFORE YOU RUN THIS:
# 1. Spin up mini_kb with the workspace env pointed to my branch:
# that is, the "-env" line in the ws command points to
# "https://raw.githubusercontent.com/briehl/mini_kb/master/deployment/conf/workspace-minikb.ini"
#
# 2. When this starts up, the workspace will complain. Auth is in testmode, and there's no test user/token set up
# for the Shock configuration. Do the following:
# a. enter the mongo container
# > docker exec -it mini_kb_ci-mongo_1 /bin/bash
# b. start mongo (just "mongo" at the prompt)
# c. Run the following to use gridFS:
# > use workspace
# > db.settings.findAndModify({ query: {backend: "shock"}, update: { $set: {"backend": "gridFS"} } })
# d. Exit that container, and restart the workspace container
# > docker-compose restart ws
#
# With the setup done, this script should do the job of creating accounts, importing the Narrative type,
# loading test data, etc.
def create_user(user_id):
"""
Returns a token for that user.
"""
headers = {
"Content-Type": "application/json"
}
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/user', headers=headers, data=json.dumps({'user': user_id, 'display': "User {}".format(user_id)}))
if r.status_code != 200 and r.status_code != 400:
print("Can't create dummy user!")
r.raise_for_status()
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/token', headers=headers, data=json.dumps({'user': user_id, 'type': 'Login'}))
if r.status_code != 200:
print("Can't make dummy token!")
r.raise_for_status()
token = json.loads(r.text)
return token['token']
def | (ws):
"""
Loads the KBaseNarrative.Narrative type info into mini kb.
ws = Workspace client configured for admin
"""
ws.request_module_ownership("KBaseNarrative")
ws.administer({
'command': 'approveModRequest',
'module': 'KBaseNarrative'
})
with open(old_narrative_spec_file, "r") as f:
old_spec = f.read()
ws.register_typespec({
'spec': old_spec,
'dryrun': 0,
'new_types': [
'Narrative',
'Cell',
'Worksheet',
'Metadata'
]
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
old_ver = n.split('-')[-1]
with open(narrative_spec_file, "r") as f:
spec = f.read()
ws.register_typespec({
'spec': spec,
'dryrun': 0,
'new_types': []
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
new_ver = n.split('-')[-1]
return {
'old_ver': old_ver,
'new_ver': new_ver
}
def load_narrative_test_data(ws, vers):
"""
Loads the test data set into mini kb ws.
Returns this structure:
wsid: {
narrative_id: int
correct_ws_meta: {}
correct_ws_perms: {}
}
there's more than 1 wsid (should be ~7-10), but that's it.
"""
with open(test_narrative_data, 'r') as f:
test_data = json.loads(f.read().strip())
uploaded_data = list()
for ws_data in test_data["old"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['old_ver']))
for ws_data in test_data["new"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['new_ver']))
return uploaded_data
def _load_workspace_data(ws, ws_data, idx, narrative_ver):
"""
Loads up a single workspace with data and returns a dict about it.
Dict contains:
id = the workspace id
perms = the workspace permissions
correct_meta = the correct workspace metadata (for validation)
"""
print(ws_data.keys())
narratives = ws_data['narratives']
ws_meta = ws_data['ws_meta']
ws_info = ws.create_workspace({"workspace": "NarrativeWS-{}-{}".format(idx, int(time()*1000))})
ws_id = ws_info[0]
info = {
"ws_id": ws_id,
"ws_info": ws_info,
"nar_info": [],
"perms": ws_data["perms"],
"correct_meta": ws_data["correct_meta"],
"loaded_meta": ws_meta
}
if len(narratives):
for idx, nar in enumerate(narratives):
objects = ws.save_objects({
'id': ws_id,
'objects': [{
'type': 'KBaseNarrative.Narrative-{}'.format(narrative_ver),
'data': nar,
'name': 'Narrative-{}'.format(idx)
}]
})
info['nar_info'].append(objects[0])
if len(ws_meta):
ws.alter_workspace_metadata({
'wsi': {'id': ws_id},
'new': ws_meta
})
perms = ws_data["perms"]
if len(perms) > 1:
admin_perm = perms['wsadmin']
ws.set_permissions({
'id': ws_id,
'new_permission': admin_perm,
'users': ['wsadmin']
})
return info
def main():
admin_token = create_user(mini_ws_admin)
admin_ws = Workspace(url=mini_ws_url, token=admin_token)
versions = load_narrative_type(admin_ws)
versions = {
'old_ver': '1.0',
'new_ver': '2.0'
}
user_token = create_user(test_user)
user_ws = Workspace(url=mini_ws_url, token=user_token)
loaded_info = load_narrative_test_data(user_ws, versions)
pprint(loaded_info)
# fix_all_workspace_info(mini_ws_url, mini_auth_url, admin_token, 100)
# for ws_data in loaded_info:
# ws_id = ws_data['ws_id']
# ws_meta = user_ws.get_workspace_info({'id': ws_id})[8]
# try:
# assert(ws_meta == ws_data['correct_meta'])
# except:
# print("WS: {}".format(ws_id))
# pprint(ws_meta)
# print("doesn't match")
# pprint(ws_data['correct_meta'])
if __name__ == '__main__':
sys.exit(main())
| load_narrative_type | identifier_name |
populate_mini_ws.py | from biokbase.workspace.client import Workspace
import requests
import json
import sys
from time import time
from fix_workspace_info import fix_all_workspace_info
from pprint import pprint
kb_port = 9999
mini_ws_url = f"http://localhost:{kb_port}/services/ws"
mini_auth_url = f"http://localhost:{kb_port}/services/auth/testmode"
mini_ws_admin = "wsadmin"
narrative_spec_file = '../../../narrative_object.spec'
old_narrative_spec_file = 'old_narrative_object.spec'
test_narrative_data = 'narrative_test_data.json'
test_user = "kbasetest"
####
# BEFORE YOU RUN THIS:
# 1. Spin up mini_kb with the workspace env pointed to my branch:
# that is, the "-env" line in the ws command points to
# "https://raw.githubusercontent.com/briehl/mini_kb/master/deployment/conf/workspace-minikb.ini"
#
# 2. When this starts up, the workspace will complain. Auth is in testmode, and there's no test user/token set up
# for the Shock configuration. Do the following:
# a. enter the mongo container
# > docker exec -it mini_kb_ci-mongo_1 /bin/bash
# b. start mongo (just "mongo" at the prompt)
# c. Run the following to use gridFS:
# > use workspace
# > db.settings.findAndModify({ query: {backend: "shock"}, update: { $set: {"backend": "gridFS"} } })
# d. Exit that container, and restart the workspace container
# > docker-compose restart ws
#
# With the setup done, this script should do the job of creating accounts, importing the Narrative type,
# loading test data, etc.
def create_user(user_id):
"""
Returns a token for that user.
"""
headers = {
"Content-Type": "application/json"
}
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/user', headers=headers, data=json.dumps({'user': user_id, 'display': "User {}".format(user_id)}))
if r.status_code != 200 and r.status_code != 400:
print("Can't create dummy user!")
r.raise_for_status()
r = requests.post(mini_auth_url + '/api/V2/testmodeonly/token', headers=headers, data=json.dumps({'user': user_id, 'type': 'Login'}))
if r.status_code != 200:
print("Can't make dummy token!")
r.raise_for_status()
token = json.loads(r.text)
return token['token']
def load_narrative_type(ws):
"""
Loads the KBaseNarrative.Narrative type info into mini kb.
ws = Workspace client configured for admin
"""
ws.request_module_ownership("KBaseNarrative")
ws.administer({
'command': 'approveModRequest',
'module': 'KBaseNarrative'
}) | 'dryrun': 0,
'new_types': [
'Narrative',
'Cell',
'Worksheet',
'Metadata'
]
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
old_ver = n.split('-')[-1]
with open(narrative_spec_file, "r") as f:
spec = f.read()
ws.register_typespec({
'spec': spec,
'dryrun': 0,
'new_types': []
})
ws.release_module('KBaseNarrative')
for n in ws.get_module_info({'mod': 'KBaseNarrative'})['types'].keys():
if '.Narrative' in n:
new_ver = n.split('-')[-1]
return {
'old_ver': old_ver,
'new_ver': new_ver
}
def load_narrative_test_data(ws, vers):
"""
Loads the test data set into mini kb ws.
Returns this structure:
wsid: {
narrative_id: int
correct_ws_meta: {}
correct_ws_perms: {}
}
there's more than 1 wsid (should be ~7-10), but that's it.
"""
with open(test_narrative_data, 'r') as f:
test_data = json.loads(f.read().strip())
uploaded_data = list()
for ws_data in test_data["old"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['old_ver']))
for ws_data in test_data["new"]:
uploaded_data.append(_load_workspace_data(ws, ws_data, len(uploaded_data), vers['new_ver']))
return uploaded_data
def _load_workspace_data(ws, ws_data, idx, narrative_ver):
"""
Loads up a single workspace with data and returns a dict about it.
Dict contains:
id = the workspace id
perms = the workspace permissions
correct_meta = the correct workspace metadata (for validation)
"""
print(ws_data.keys())
narratives = ws_data['narratives']
ws_meta = ws_data['ws_meta']
ws_info = ws.create_workspace({"workspace": "NarrativeWS-{}-{}".format(idx, int(time()*1000))})
ws_id = ws_info[0]
info = {
"ws_id": ws_id,
"ws_info": ws_info,
"nar_info": [],
"perms": ws_data["perms"],
"correct_meta": ws_data["correct_meta"],
"loaded_meta": ws_meta
}
if len(narratives):
for idx, nar in enumerate(narratives):
objects = ws.save_objects({
'id': ws_id,
'objects': [{
'type': 'KBaseNarrative.Narrative-{}'.format(narrative_ver),
'data': nar,
'name': 'Narrative-{}'.format(idx)
}]
})
info['nar_info'].append(objects[0])
if len(ws_meta):
ws.alter_workspace_metadata({
'wsi': {'id': ws_id},
'new': ws_meta
})
perms = ws_data["perms"]
if len(perms) > 1:
admin_perm = perms['wsadmin']
ws.set_permissions({
'id': ws_id,
'new_permission': admin_perm,
'users': ['wsadmin']
})
return info
def main():
admin_token = create_user(mini_ws_admin)
admin_ws = Workspace(url=mini_ws_url, token=admin_token)
versions = load_narrative_type(admin_ws)
versions = {
'old_ver': '1.0',
'new_ver': '2.0'
}
user_token = create_user(test_user)
user_ws = Workspace(url=mini_ws_url, token=user_token)
loaded_info = load_narrative_test_data(user_ws, versions)
pprint(loaded_info)
# fix_all_workspace_info(mini_ws_url, mini_auth_url, admin_token, 100)
# for ws_data in loaded_info:
# ws_id = ws_data['ws_id']
# ws_meta = user_ws.get_workspace_info({'id': ws_id})[8]
# try:
# assert(ws_meta == ws_data['correct_meta'])
# except:
# print("WS: {}".format(ws_id))
# pprint(ws_meta)
# print("doesn't match")
# pprint(ws_data['correct_meta'])
if __name__ == '__main__':
sys.exit(main()) | with open(old_narrative_spec_file, "r") as f:
old_spec = f.read()
ws.register_typespec({
'spec': old_spec, | random_line_split |
game.js | /* global Cervus */
const material = new Cervus.materials.PhongMaterial({
requires: [
Cervus.components.Render,
Cervus.components.Transform
],
texture: Cervus.core.image_loader('../textures/4.png'),
normal_map: Cervus.core.image_loader('../textures/normal2.jpg')
});
const phong_material = new Cervus.materials.PhongMaterial({
requires: [
Cervus.components.Render,
Cervus.components.Transform
]
});
const game = new Cervus.core.Game({
width: window.innerWidth,
height: window.innerHeight,
// clear_color: 'f0f'
// fps: 1
});
game.camera.get_component(Cervus.components.Move).keyboard_controlled = true;
// game.camera.get_component(Cervus.components.Move).mouse_controlled = true;
// By default all entities face the user.
// Rotate the camera to see the scene.
const camera_transform = game.camera.get_component(Cervus.components.Transform);
camera_transform.position = [0, 2, 5];
camera_transform.rotate_rl(Math.PI);
// game.camera.keyboard_controlled = true;
const plane = new Cervus.shapes.Plane();
const plane_transform = plane.get_component(Cervus.components.Transform);
const plane_render = plane.get_component(Cervus.components.Render);
plane_transform.scale = [100, 1, 100];
plane_render.material = phong_material;
plane_render.color = "#eeeeee";
game.add(plane);
const cube = new Cervus.shapes.Box();
const cube_transform = cube.get_component(Cervus.components.Transform);
const cube_render = cube.get_component(Cervus.components.Render);
cube_render.material = material;
cube_render.color = "#00ff00";
cube_transform.position = [0, 0.5, -1];
const group = new Cervus.core.Entity({
components: [
new Cervus.components.Transform()
]
});
game.add(group);
group.add(cube);
//
game.on('tick', () => {
// group.get_component(Cervus.components.Transform).rotate_rl(16/1000); |
game.light.get_component(Cervus.components.Transform).position = game.camera.get_component(Cervus.components.Transform).position;
}); | random_line_split |
|
P4COMSTR.py | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test setting the $P4COMSTR variable.
"""
import os.path
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('Perforce', ['Perforce', 'sub'], 'sub')
sub_Perforce = os.path.join('sub', 'Perforce')
sub_SConscript = os.path.join('sub', 'SConscript')
sub_all = os.path.join('sub', 'all')
sub_ddd_in = os.path.join('sub', 'ddd.in')
sub_ddd_out = os.path.join('sub', 'ddd.out')
sub_eee_in = os.path.join('sub', 'eee.in')
sub_eee_out = os.path.join('sub', 'eee.out')
sub_fff_in = os.path.join('sub', 'fff.in')
sub_fff_out = os.path.join('sub', 'fff.out')
test.write('my-p4.py', """
import shutil
import sys
for f in sys.argv[1:]:
shutil.copy('Perforce/'+f, f)
""")
test.write('SConstruct', """
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
env = Environment(TOOLS = ['default', 'Perforce'],
BUILDERS={'Cat':Builder(action=cat)},
P4COM='%(_python_)s my-p4.py $TARGET',
P4COMSTR='Checking out $TARGET from our fake Perforce')
env.Cat('aaa.out', 'aaa.in')
env.Cat('bbb.out', 'bbb.in')
env.Cat('ccc.out', 'ccc.in')
env.Cat('all', ['aaa.out', 'bbb.out', 'ccc.out'])
env.SourceCode('.', env.Perforce())
SConscript('sub/SConscript', "env")
""" % locals())
test.write(['Perforce', 'sub', 'SConscript'], """\
Import("env")
env.Cat('ddd.out', 'ddd.in')
env.Cat('eee.out', 'eee.in')
env.Cat('fff.out', 'fff.in')
env.Cat('all', ['ddd.out', 'eee.out', 'fff.out'])
""")
test.write(['Perforce', 'aaa.in'], "Perforce/aaa.in\n")
test.write('bbb.in', "checked-out bbb.in\n")
test.write(['Perforce', 'ccc.in'], "Perforce/ccc.in\n")
test.write(['Perforce', 'sub', 'ddd.in'], "Perforce/sub/ddd.in\n")
test.write(['sub', 'eee.in'], "checked-out sub/eee.in\n")
test.write(['Perforce', 'sub', 'fff.in'], "Perforce/sub/fff.in\n")
test.run(arguments = '.',
stdout = test.wrap_stdout(read_str = """\
Checking out %(sub_SConscript)s from our fake Perforce
""" % locals(),
build_str = """\
Checking out aaa.in from our fake Perforce
cat(["aaa.out"], ["aaa.in"])
cat(["bbb.out"], ["bbb.in"])
Checking out ccc.in from our fake Perforce
cat(["ccc.out"], ["ccc.in"])
cat(["all"], ["aaa.out", "bbb.out", "ccc.out"])
Checking out %(sub_ddd_in)s from our fake Perforce
cat(["%(sub_ddd_out)s"], ["%(sub_ddd_in)s"])
cat(["%(sub_eee_out)s"], ["%(sub_eee_in)s"])
Checking out %(sub_fff_in)s from our fake Perforce
cat(["%(sub_fff_out)s"], ["%(sub_fff_in)s"])
cat(["%(sub_all)s"], ["%(sub_ddd_out)s", "%(sub_eee_out)s", "%(sub_fff_out)s"]) | "Perforce/aaa.in\nchecked-out bbb.in\nPerforce/ccc.in\n")
test.must_match(['sub', 'all'],
"Perforce/sub/ddd.in\nchecked-out sub/eee.in\nPerforce/sub/fff.in\n")
#
test.pass_test() | """ % locals()))
test.must_match('all', | random_line_split |
decodable.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The compiler code necessary for #[deriving(Decodable)]. See
encodable.rs for more.
*/
use ast::{MetaItem, Item, Expr, MutMutable, Ident};
use codemap::Span;
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use ext::deriving::generic::*;
pub fn expand_deriving_decodable(cx: &ExtCtxt,
span: Span,
mitem: @MetaItem,
in_items: ~[@Item]) -> ~[@Item] {
let trait_def = TraitDef {
cx: cx, span: span,
path: Path::new_(~["extra", "serialize", "Decodable"], None,
~[~Literal(Path::new_local("__D"))], true),
additional_bounds: ~[],
generics: LifetimeBounds {
lifetimes: ~[],
bounds: ~[("__D", ~[Path::new(~["extra", "serialize", "Decoder"])])],
},
methods: ~[
MethodDef {
name: "decode",
generics: LifetimeBounds::empty(),
explicit_self: None,
args: ~[Ptr(~Literal(Path::new_local("__D")),
Borrowed(None, MutMutable))],
ret_ty: Self,
inline: false,
const_nonmatching: true,
combine_substructure: decodable_substructure,
},
]
};
trait_def.expand(mitem, in_items)
}
fn decodable_substructure(cx: &ExtCtxt, span: Span,
substr: &Substructure) -> @Expr {
let decoder = substr.nonself_args[0];
let recurse = ~[cx.ident_of("extra"),
cx.ident_of("serialize"),
cx.ident_of("Decodable"),
cx.ident_of("decode")];
// throw an underscore in front to suppress unused variable warnings
let blkarg = cx.ident_of("_d");
let blkdecoder = cx.expr_ident(span, blkarg);
let calldecode = cx.expr_call_global(span, recurse, ~[blkdecoder]);
let lambdadecode = cx.lambda_expr_1(span, calldecode, blkarg);
return match *substr.fields {
StaticStruct(_, ref summary) => {
let nfields = match *summary {
Unnamed(ref fields) => fields.len(),
Named(ref fields) => fields.len()
};
let read_struct_field = cx.ident_of("read_struct_field");
let result = decode_static_fields(cx,
span,
substr.type_ident,
summary,
|span, name, field| {
cx.expr_method_call(span, blkdecoder, read_struct_field,
~[cx.expr_str(span, name),
cx.expr_uint(span, field),
lambdadecode])
});
cx.expr_method_call(span, decoder, cx.ident_of("read_struct"),
~[cx.expr_str(span, cx.str_of(substr.type_ident)),
cx.expr_uint(span, nfields),
cx.lambda_expr_1(span, result, blkarg)])
}
StaticEnum(_, ref fields) => {
let variant = cx.ident_of("i");
let mut arms = ~[];
let mut variants = ~[];
let rvariant_arg = cx.ident_of("read_enum_variant_arg");
for (i, f) in fields.iter().enumerate() {
let (name, parts) = match *f { (i, ref p) => (i, p) };
variants.push(cx.expr_str(span, cx.str_of(name)));
let decoded = decode_static_fields(cx,
span,
name,
parts,
|span, _, field| {
cx.expr_method_call(span, blkdecoder, rvariant_arg,
~[cx.expr_uint(span, field),
lambdadecode])
});
arms.push(cx.arm(span,
~[cx.pat_lit(span, cx.expr_uint(span, i))],
decoded));
}
arms.push(cx.arm_unreachable(span));
let result = cx.expr_match(span, cx.expr_ident(span, variant), arms);
let lambda = cx.lambda_expr(span, ~[blkarg, variant], result);
let variant_vec = cx.expr_vec(span, variants);
let result = cx.expr_method_call(span, blkdecoder,
cx.ident_of("read_enum_variant"),
~[variant_vec, lambda]);
cx.expr_method_call(span, decoder, cx.ident_of("read_enum"),
~[cx.expr_str(span, cx.str_of(substr.type_ident)),
cx.lambda_expr_1(span, result, blkarg)])
}
_ => cx.bug("expected StaticEnum or StaticStruct in deriving(Decodable)")
};
}
/// Create a decoder for a single enum variant/struct:
/// - `outer_pat_ident` is the name of this enum variant/struct
/// - `getarg` should retrieve the `uint`-th field with name `@str`.
fn decode_static_fields(cx: &ExtCtxt,
outer_span: Span,
outer_pat_ident: Ident,
fields: &StaticFields,
getarg: |Span, @str, uint| -> @Expr)
-> @Expr {
match *fields {
Unnamed(ref fields) => {
if fields.is_empty() {
cx.expr_ident(outer_span, outer_pat_ident)
} else |
}
Named(ref fields) => {
// use the field's span to get nicer error messages.
let fields = fields.iter().enumerate().map(|(i, &(name, span))| {
cx.field_imm(span, name, getarg(span, cx.str_of(name), i))
}).collect();
cx.expr_struct_ident(outer_span, outer_pat_ident, fields)
}
}
}
| {
let fields = fields.iter().enumerate().map(|(i, &span)| {
getarg(span, format!("_field{}", i).to_managed(), i)
}).collect();
cx.expr_call_ident(outer_span, outer_pat_ident, fields)
} | conditional_block |
decodable.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The compiler code necessary for #[deriving(Decodable)]. See
encodable.rs for more.
*/
use ast::{MetaItem, Item, Expr, MutMutable, Ident};
use codemap::Span;
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use ext::deriving::generic::*;
pub fn expand_deriving_decodable(cx: &ExtCtxt,
span: Span,
mitem: @MetaItem,
in_items: ~[@Item]) -> ~[@Item] {
let trait_def = TraitDef {
cx: cx, span: span,
path: Path::new_(~["extra", "serialize", "Decodable"], None,
~[~Literal(Path::new_local("__D"))], true),
additional_bounds: ~[],
generics: LifetimeBounds {
lifetimes: ~[],
bounds: ~[("__D", ~[Path::new(~["extra", "serialize", "Decoder"])])],
},
methods: ~[
MethodDef {
name: "decode",
generics: LifetimeBounds::empty(),
explicit_self: None,
args: ~[Ptr(~Literal(Path::new_local("__D")),
Borrowed(None, MutMutable))],
ret_ty: Self,
inline: false,
const_nonmatching: true,
combine_substructure: decodable_substructure,
},
]
};
trait_def.expand(mitem, in_items)
}
fn | (cx: &ExtCtxt, span: Span,
substr: &Substructure) -> @Expr {
let decoder = substr.nonself_args[0];
let recurse = ~[cx.ident_of("extra"),
cx.ident_of("serialize"),
cx.ident_of("Decodable"),
cx.ident_of("decode")];
// throw an underscore in front to suppress unused variable warnings
let blkarg = cx.ident_of("_d");
let blkdecoder = cx.expr_ident(span, blkarg);
let calldecode = cx.expr_call_global(span, recurse, ~[blkdecoder]);
let lambdadecode = cx.lambda_expr_1(span, calldecode, blkarg);
return match *substr.fields {
StaticStruct(_, ref summary) => {
let nfields = match *summary {
Unnamed(ref fields) => fields.len(),
Named(ref fields) => fields.len()
};
let read_struct_field = cx.ident_of("read_struct_field");
let result = decode_static_fields(cx,
span,
substr.type_ident,
summary,
|span, name, field| {
cx.expr_method_call(span, blkdecoder, read_struct_field,
~[cx.expr_str(span, name),
cx.expr_uint(span, field),
lambdadecode])
});
cx.expr_method_call(span, decoder, cx.ident_of("read_struct"),
~[cx.expr_str(span, cx.str_of(substr.type_ident)),
cx.expr_uint(span, nfields),
cx.lambda_expr_1(span, result, blkarg)])
}
StaticEnum(_, ref fields) => {
let variant = cx.ident_of("i");
let mut arms = ~[];
let mut variants = ~[];
let rvariant_arg = cx.ident_of("read_enum_variant_arg");
for (i, f) in fields.iter().enumerate() {
let (name, parts) = match *f { (i, ref p) => (i, p) };
variants.push(cx.expr_str(span, cx.str_of(name)));
let decoded = decode_static_fields(cx,
span,
name,
parts,
|span, _, field| {
cx.expr_method_call(span, blkdecoder, rvariant_arg,
~[cx.expr_uint(span, field),
lambdadecode])
});
arms.push(cx.arm(span,
~[cx.pat_lit(span, cx.expr_uint(span, i))],
decoded));
}
arms.push(cx.arm_unreachable(span));
let result = cx.expr_match(span, cx.expr_ident(span, variant), arms);
let lambda = cx.lambda_expr(span, ~[blkarg, variant], result);
let variant_vec = cx.expr_vec(span, variants);
let result = cx.expr_method_call(span, blkdecoder,
cx.ident_of("read_enum_variant"),
~[variant_vec, lambda]);
cx.expr_method_call(span, decoder, cx.ident_of("read_enum"),
~[cx.expr_str(span, cx.str_of(substr.type_ident)),
cx.lambda_expr_1(span, result, blkarg)])
}
_ => cx.bug("expected StaticEnum or StaticStruct in deriving(Decodable)")
};
}
/// Create a decoder for a single enum variant/struct:
/// - `outer_pat_ident` is the name of this enum variant/struct
/// - `getarg` should retrieve the `uint`-th field with name `@str`.
fn decode_static_fields(cx: &ExtCtxt,
outer_span: Span,
outer_pat_ident: Ident,
fields: &StaticFields,
getarg: |Span, @str, uint| -> @Expr)
-> @Expr {
match *fields {
Unnamed(ref fields) => {
if fields.is_empty() {
cx.expr_ident(outer_span, outer_pat_ident)
} else {
let fields = fields.iter().enumerate().map(|(i, &span)| {
getarg(span, format!("_field{}", i).to_managed(), i)
}).collect();
cx.expr_call_ident(outer_span, outer_pat_ident, fields)
}
}
Named(ref fields) => {
// use the field's span to get nicer error messages.
let fields = fields.iter().enumerate().map(|(i, &(name, span))| {
cx.field_imm(span, name, getarg(span, cx.str_of(name), i))
}).collect();
cx.expr_struct_ident(outer_span, outer_pat_ident, fields)
}
}
}
| decodable_substructure | identifier_name |
decodable.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The compiler code necessary for #[deriving(Decodable)]. See
encodable.rs for more.
*/
use ast::{MetaItem, Item, Expr, MutMutable, Ident};
use codemap::Span;
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use ext::deriving::generic::*;
pub fn expand_deriving_decodable(cx: &ExtCtxt,
span: Span,
mitem: @MetaItem,
in_items: ~[@Item]) -> ~[@Item] |
fn decodable_substructure(cx: &ExtCtxt, span: Span,
substr: &Substructure) -> @Expr {
let decoder = substr.nonself_args[0];
let recurse = ~[cx.ident_of("extra"),
cx.ident_of("serialize"),
cx.ident_of("Decodable"),
cx.ident_of("decode")];
// throw an underscore in front to suppress unused variable warnings
let blkarg = cx.ident_of("_d");
let blkdecoder = cx.expr_ident(span, blkarg);
let calldecode = cx.expr_call_global(span, recurse, ~[blkdecoder]);
let lambdadecode = cx.lambda_expr_1(span, calldecode, blkarg);
return match *substr.fields {
StaticStruct(_, ref summary) => {
let nfields = match *summary {
Unnamed(ref fields) => fields.len(),
Named(ref fields) => fields.len()
};
let read_struct_field = cx.ident_of("read_struct_field");
let result = decode_static_fields(cx,
span,
substr.type_ident,
summary,
|span, name, field| {
cx.expr_method_call(span, blkdecoder, read_struct_field,
~[cx.expr_str(span, name),
cx.expr_uint(span, field),
lambdadecode])
});
cx.expr_method_call(span, decoder, cx.ident_of("read_struct"),
~[cx.expr_str(span, cx.str_of(substr.type_ident)),
cx.expr_uint(span, nfields),
cx.lambda_expr_1(span, result, blkarg)])
}
StaticEnum(_, ref fields) => {
let variant = cx.ident_of("i");
let mut arms = ~[];
let mut variants = ~[];
let rvariant_arg = cx.ident_of("read_enum_variant_arg");
for (i, f) in fields.iter().enumerate() {
let (name, parts) = match *f { (i, ref p) => (i, p) };
variants.push(cx.expr_str(span, cx.str_of(name)));
let decoded = decode_static_fields(cx,
span,
name,
parts,
|span, _, field| {
cx.expr_method_call(span, blkdecoder, rvariant_arg,
~[cx.expr_uint(span, field),
lambdadecode])
});
arms.push(cx.arm(span,
~[cx.pat_lit(span, cx.expr_uint(span, i))],
decoded));
}
arms.push(cx.arm_unreachable(span));
let result = cx.expr_match(span, cx.expr_ident(span, variant), arms);
let lambda = cx.lambda_expr(span, ~[blkarg, variant], result);
let variant_vec = cx.expr_vec(span, variants);
let result = cx.expr_method_call(span, blkdecoder,
cx.ident_of("read_enum_variant"),
~[variant_vec, lambda]);
cx.expr_method_call(span, decoder, cx.ident_of("read_enum"),
~[cx.expr_str(span, cx.str_of(substr.type_ident)),
cx.lambda_expr_1(span, result, blkarg)])
}
_ => cx.bug("expected StaticEnum or StaticStruct in deriving(Decodable)")
};
}
/// Create a decoder for a single enum variant/struct:
/// - `outer_pat_ident` is the name of this enum variant/struct
/// - `getarg` should retrieve the `uint`-th field with name `@str`.
fn decode_static_fields(cx: &ExtCtxt,
outer_span: Span,
outer_pat_ident: Ident,
fields: &StaticFields,
getarg: |Span, @str, uint| -> @Expr)
-> @Expr {
match *fields {
Unnamed(ref fields) => {
if fields.is_empty() {
cx.expr_ident(outer_span, outer_pat_ident)
} else {
let fields = fields.iter().enumerate().map(|(i, &span)| {
getarg(span, format!("_field{}", i).to_managed(), i)
}).collect();
cx.expr_call_ident(outer_span, outer_pat_ident, fields)
}
}
Named(ref fields) => {
// use the field's span to get nicer error messages.
let fields = fields.iter().enumerate().map(|(i, &(name, span))| {
cx.field_imm(span, name, getarg(span, cx.str_of(name), i))
}).collect();
cx.expr_struct_ident(outer_span, outer_pat_ident, fields)
}
}
}
| {
let trait_def = TraitDef {
cx: cx, span: span,
path: Path::new_(~["extra", "serialize", "Decodable"], None,
~[~Literal(Path::new_local("__D"))], true),
additional_bounds: ~[],
generics: LifetimeBounds {
lifetimes: ~[],
bounds: ~[("__D", ~[Path::new(~["extra", "serialize", "Decoder"])])],
},
methods: ~[
MethodDef {
name: "decode",
generics: LifetimeBounds::empty(),
explicit_self: None,
args: ~[Ptr(~Literal(Path::new_local("__D")),
Borrowed(None, MutMutable))],
ret_ty: Self,
inline: false,
const_nonmatching: true,
combine_substructure: decodable_substructure,
},
]
};
trait_def.expand(mitem, in_items)
} | identifier_body |
decodable.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The compiler code necessary for #[deriving(Decodable)]. See
encodable.rs for more.
*/
use ast::{MetaItem, Item, Expr, MutMutable, Ident};
use codemap::Span;
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
use ext::deriving::generic::*;
pub fn expand_deriving_decodable(cx: &ExtCtxt,
span: Span,
mitem: @MetaItem,
in_items: ~[@Item]) -> ~[@Item] {
let trait_def = TraitDef {
cx: cx, span: span,
path: Path::new_(~["extra", "serialize", "Decodable"], None,
~[~Literal(Path::new_local("__D"))], true),
additional_bounds: ~[],
generics: LifetimeBounds {
lifetimes: ~[],
bounds: ~[("__D", ~[Path::new(~["extra", "serialize", "Decoder"])])],
},
methods: ~[
MethodDef {
name: "decode",
generics: LifetimeBounds::empty(),
explicit_self: None,
args: ~[Ptr(~Literal(Path::new_local("__D")),
Borrowed(None, MutMutable))],
ret_ty: Self,
inline: false,
const_nonmatching: true,
combine_substructure: decodable_substructure,
},
]
};
trait_def.expand(mitem, in_items)
}
fn decodable_substructure(cx: &ExtCtxt, span: Span,
substr: &Substructure) -> @Expr {
let decoder = substr.nonself_args[0];
let recurse = ~[cx.ident_of("extra"),
cx.ident_of("serialize"),
cx.ident_of("Decodable"),
cx.ident_of("decode")];
// throw an underscore in front to suppress unused variable warnings
let blkarg = cx.ident_of("_d");
let blkdecoder = cx.expr_ident(span, blkarg);
let calldecode = cx.expr_call_global(span, recurse, ~[blkdecoder]);
let lambdadecode = cx.lambda_expr_1(span, calldecode, blkarg);
return match *substr.fields {
StaticStruct(_, ref summary) => {
let nfields = match *summary {
Unnamed(ref fields) => fields.len(),
Named(ref fields) => fields.len()
};
let read_struct_field = cx.ident_of("read_struct_field");
let result = decode_static_fields(cx,
span,
substr.type_ident,
summary,
|span, name, field| { | cx.expr_method_call(span, decoder, cx.ident_of("read_struct"),
~[cx.expr_str(span, cx.str_of(substr.type_ident)),
cx.expr_uint(span, nfields),
cx.lambda_expr_1(span, result, blkarg)])
}
StaticEnum(_, ref fields) => {
let variant = cx.ident_of("i");
let mut arms = ~[];
let mut variants = ~[];
let rvariant_arg = cx.ident_of("read_enum_variant_arg");
for (i, f) in fields.iter().enumerate() {
let (name, parts) = match *f { (i, ref p) => (i, p) };
variants.push(cx.expr_str(span, cx.str_of(name)));
let decoded = decode_static_fields(cx,
span,
name,
parts,
|span, _, field| {
cx.expr_method_call(span, blkdecoder, rvariant_arg,
~[cx.expr_uint(span, field),
lambdadecode])
});
arms.push(cx.arm(span,
~[cx.pat_lit(span, cx.expr_uint(span, i))],
decoded));
}
arms.push(cx.arm_unreachable(span));
let result = cx.expr_match(span, cx.expr_ident(span, variant), arms);
let lambda = cx.lambda_expr(span, ~[blkarg, variant], result);
let variant_vec = cx.expr_vec(span, variants);
let result = cx.expr_method_call(span, blkdecoder,
cx.ident_of("read_enum_variant"),
~[variant_vec, lambda]);
cx.expr_method_call(span, decoder, cx.ident_of("read_enum"),
~[cx.expr_str(span, cx.str_of(substr.type_ident)),
cx.lambda_expr_1(span, result, blkarg)])
}
_ => cx.bug("expected StaticEnum or StaticStruct in deriving(Decodable)")
};
}
/// Create a decoder for a single enum variant/struct:
/// - `outer_pat_ident` is the name of this enum variant/struct
/// - `getarg` should retrieve the `uint`-th field with name `@str`.
fn decode_static_fields(cx: &ExtCtxt,
outer_span: Span,
outer_pat_ident: Ident,
fields: &StaticFields,
getarg: |Span, @str, uint| -> @Expr)
-> @Expr {
match *fields {
Unnamed(ref fields) => {
if fields.is_empty() {
cx.expr_ident(outer_span, outer_pat_ident)
} else {
let fields = fields.iter().enumerate().map(|(i, &span)| {
getarg(span, format!("_field{}", i).to_managed(), i)
}).collect();
cx.expr_call_ident(outer_span, outer_pat_ident, fields)
}
}
Named(ref fields) => {
// use the field's span to get nicer error messages.
let fields = fields.iter().enumerate().map(|(i, &(name, span))| {
cx.field_imm(span, name, getarg(span, cx.str_of(name), i))
}).collect();
cx.expr_struct_ident(outer_span, outer_pat_ident, fields)
}
}
} | cx.expr_method_call(span, blkdecoder, read_struct_field,
~[cx.expr_str(span, name),
cx.expr_uint(span, field),
lambdadecode])
}); | random_line_split |
simpleturing.js |
var simpleturing = (function() {
function Machine(initial, states) {
this.run = function(initialdata, position, defaultvalue) {
var tape = initialdata.slice();
if (position == null)
position = 0;
var state = initial;
while (true) {
// console.log(tape);
// console.log(state);
// console.log(position);
// console.log("--------");
var value = tape[position];
if (value == null)
value = defaultvalue;
next = states[state][value];
tape[position] = next[0];
state = next[1];
if (state == 'stop')
return tape;
position += next[2];
}
}
}
return function(initial, states) {
return new Machine(initial, states);
}
}());
if (typeof(window) === 'undefined') | {
module.exports = simpleturing;
} | conditional_block |
|
simpleturing.js |
var simpleturing = (function() {
function | (initial, states) {
this.run = function(initialdata, position, defaultvalue) {
var tape = initialdata.slice();
if (position == null)
position = 0;
var state = initial;
while (true) {
// console.log(tape);
// console.log(state);
// console.log(position);
// console.log("--------");
var value = tape[position];
if (value == null)
value = defaultvalue;
next = states[state][value];
tape[position] = next[0];
state = next[1];
if (state == 'stop')
return tape;
position += next[2];
}
}
}
return function(initial, states) {
return new Machine(initial, states);
}
}());
if (typeof(window) === 'undefined') {
module.exports = simpleturing;
}
| Machine | identifier_name |
simpleturing.js |
var simpleturing = (function() {
function Machine(initial, states) |
return function(initial, states) {
return new Machine(initial, states);
}
}());
if (typeof(window) === 'undefined') {
module.exports = simpleturing;
}
| {
this.run = function(initialdata, position, defaultvalue) {
var tape = initialdata.slice();
if (position == null)
position = 0;
var state = initial;
while (true) {
// console.log(tape);
// console.log(state);
// console.log(position);
// console.log("--------");
var value = tape[position];
if (value == null)
value = defaultvalue;
next = states[state][value];
tape[position] = next[0];
state = next[1];
if (state == 'stop')
return tape;
position += next[2];
}
}
} | identifier_body |
simpleturing.js | var simpleturing = (function() {
function Machine(initial, states) {
this.run = function(initialdata, position, defaultvalue) {
var tape = initialdata.slice();
if (position == null)
position = 0;
var state = initial;
while (true) {
// console.log(tape);
// console.log(state);
// console.log(position);
// console.log("--------");
var value = tape[position];
if (value == null)
value = defaultvalue;
next = states[state][value];
tape[position] = next[0];
state = next[1];
if (state == 'stop')
return tape;
position += next[2];
}
}
}
return function(initial, states) {
return new Machine(initial, states);
}
}());
if (typeof(window) === 'undefined') {
module.exports = simpleturing;
| } | random_line_split |
|
create_db.py | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import re
import operator
import digits
from digits import utils
from digits.utils import subclass, override
from digits.task import Task
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 3
@subclass
class CreateDbTask(Task):
"""Creates a database"""
def __init__(self, input_file, db_name, backend, image_dims, **kwargs):
|
def __getstate__(self):
d = super(CreateDbTask, self).__getstate__()
if 'create_db_log' in d:
# don't save file handle
del d['create_db_log']
if 'labels' in d:
del d['labels']
return d
def __setstate__(self, state):
super(CreateDbTask, self).__setstate__(state)
if self.pickver_task_createdb <= 1:
if self.image_dims[2] == 1:
self.image_channel_order = None
elif self.encode:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = 'RGB'
if self.pickver_task_createdb <= 2:
if hasattr(self, 'encode'):
if self.encode:
self.encoding = 'jpg'
else:
self.encoding = 'none'
delattr(self, 'encode')
else:
self.encoding = 'none'
self.pickver_task_createdb = PICKLE_VERSION
if not hasattr(self, 'backend') or self.backend is None:
self.backend = 'lmdb'
if not hasattr(self, 'compression') or self.compression is None:
self.compression = 'none'
@override
def name(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'Create DB (train)'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'Create DB (val)'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'Create DB (test)'
else:
return 'Create DB (%s)' % self.db_name
@override
def before_run(self):
super(CreateDbTask, self).before_run()
self.create_db_log = open(self.path(self.create_db_log_file), 'a')
@override
def html_id(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'task-create_db-train'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'task-create_db-val'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'task-create_db-test'
else:
return super(CreateDbTask, self).html_id()
@override
def offer_resources(self, resources):
key = 'create_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def task_arguments(self, resources, env):
args = [sys.executable, os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(digits.__file__))),
'tools', 'create_db.py'),
self.path(self.input_file),
self.path(self.db_name),
self.image_dims[1],
self.image_dims[0],
'--backend=%s' % self.backend,
'--channels=%s' % self.image_dims[2],
'--resize_mode=%s' % self.resize_mode,
]
if self.mean_file is not None:
args.append('--mean_file=%s' % self.path(self.mean_file))
# Add a visual mean_file
args.append('--mean_file=%s' % self.path(utils.constants.MEAN_FILE_IMAGE))
if self.image_folder:
args.append('--image_folder=%s' % self.image_folder)
if self.shuffle:
args.append('--shuffle')
if self.encoding and self.encoding != 'none':
args.append('--encoding=%s' % self.encoding)
if self.compression and self.compression != 'none':
args.append('--compression=%s' % self.compression)
if self.backend == 'hdf5':
args.append('--hdf5_dset_limit=%d' % 2**31)
return args
@override
def process_output(self, line):
from digits.webapp import socketio
self.create_db_log.write('%s\n' % line)
self.create_db_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Processed (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1))/int(match.group(2))
self.emit_progress_update()
return True
# distribution
match = re.match(r'Category (\d+) has (\d+)', message)
if match and self.labels_file is not None:
if not hasattr(self, 'distribution') or self.distribution is None:
self.distribution = {}
self.distribution[match.group(1)] = int(match.group(2))
data = self.distribution_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'distribution',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
return True
# result
match = re.match(r'(\d+) images written to database', message)
if match:
self.entries_count = int(match.group(1))
self.logger.debug(message)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
@override
def after_run(self):
from digits.webapp import socketio
super(CreateDbTask, self).after_run()
self.create_db_log.close()
if self.backend == 'lmdb':
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'exploration-ready',
},
namespace='/jobs',
room=self.job_id,
)
elif self.backend == 'hdf5':
# add more path information to the list of h5 files
lines = None
with open(self.path(self.textfile)) as infile:
lines = infile.readlines()
with open(self.path(self.textfile), 'w') as outfile:
for line in lines:
# XXX this works because the model job will be in an adjacent folder
outfile.write('%s\n' % os.path.join(
'..', self.job_id, self.db_name, line.strip()))
if self.mean_file:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'mean-image',
# XXX Can't use url_for here because we don't have a request context
'data': '/files/' + self.path('mean.jpg', relative=True),
},
namespace='/jobs',
room=self.job_id,
)
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self, 'labels_file'), 'labels_file not set'
assert self.labels_file, 'labels_file not set'
assert os.path.exists(self.path(self.labels_file)), 'labels_file does not exist'
labels = []
with open(self.path(self.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def distribution_data(self):
"""
Returns distribution data for a C3.js graph
"""
if self.distribution is None:
return None
try:
labels = self.get_labels()
except AssertionError:
return None
if len(self.distribution.keys()) != len(labels):
return None
values = ['Count']
titles = []
for key, value in sorted(
self.distribution.items(),
key=operator.itemgetter(1),
reverse=True):
values.append(value)
titles.append(labels[int(key)])
return {
'data': {
'columns': [values],
'type': 'bar'
},
'axis': {
'x': {
'type': 'category',
'categories': titles,
}
},
}
| """
Arguments:
input_file -- read images and labels from this file
db_name -- save database to this location
backend -- database backend (lmdb/hdf5)
image_dims -- (height, width, channels)
Keyword Arguments:
image_folder -- prepend image paths with this folder
shuffle -- shuffle images before saving
resize_mode -- used in utils.image.resize_image()
encoding -- 'none', 'png' or 'jpg'
compression -- 'none' or 'gzip'
mean_file -- save mean file to this location
labels_file -- used to print category distribution
"""
# Take keyword arguments out of kwargs
self.image_folder = kwargs.pop('image_folder', None)
self.shuffle = kwargs.pop('shuffle', True)
self.resize_mode = kwargs.pop('resize_mode' , None)
self.encoding = kwargs.pop('encoding', None)
self.compression = kwargs.pop('compression', None)
self.mean_file = kwargs.pop('mean_file', None)
self.labels_file = kwargs.pop('labels_file', None)
super(CreateDbTask, self).__init__(**kwargs)
self.pickver_task_createdb = PICKLE_VERSION
self.input_file = input_file
self.db_name = db_name
self.backend = backend
if backend == 'hdf5':
# the list of hdf5 files is stored in a textfile
self.textfile = os.path.join(self.db_name, 'list.txt')
self.image_dims = image_dims
if image_dims[2] == 3:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = None
self.entries_count = None
self.distribution = None
self.create_db_log_file = "create_%s.log" % db_name | identifier_body |
create_db.py | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import re
import operator
import digits
from digits import utils
from digits.utils import subclass, override
from digits.task import Task
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 3
@subclass
class CreateDbTask(Task):
"""Creates a database"""
def __init__(self, input_file, db_name, backend, image_dims, **kwargs):
"""
Arguments:
input_file -- read images and labels from this file
db_name -- save database to this location
backend -- database backend (lmdb/hdf5)
image_dims -- (height, width, channels)
Keyword Arguments:
image_folder -- prepend image paths with this folder
shuffle -- shuffle images before saving
resize_mode -- used in utils.image.resize_image()
encoding -- 'none', 'png' or 'jpg'
compression -- 'none' or 'gzip'
mean_file -- save mean file to this location
labels_file -- used to print category distribution
"""
# Take keyword arguments out of kwargs
self.image_folder = kwargs.pop('image_folder', None)
self.shuffle = kwargs.pop('shuffle', True)
self.resize_mode = kwargs.pop('resize_mode' , None)
self.encoding = kwargs.pop('encoding', None)
self.compression = kwargs.pop('compression', None)
self.mean_file = kwargs.pop('mean_file', None)
self.labels_file = kwargs.pop('labels_file', None)
super(CreateDbTask, self).__init__(**kwargs)
self.pickver_task_createdb = PICKLE_VERSION
self.input_file = input_file
self.db_name = db_name
self.backend = backend
if backend == 'hdf5':
# the list of hdf5 files is stored in a textfile
self.textfile = os.path.join(self.db_name, 'list.txt')
self.image_dims = image_dims
if image_dims[2] == 3:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = None
self.entries_count = None
self.distribution = None
self.create_db_log_file = "create_%s.log" % db_name
def __getstate__(self):
d = super(CreateDbTask, self).__getstate__()
if 'create_db_log' in d:
# don't save file handle
del d['create_db_log']
if 'labels' in d:
del d['labels']
return d
def __setstate__(self, state):
super(CreateDbTask, self).__setstate__(state)
if self.pickver_task_createdb <= 1:
if self.image_dims[2] == 1:
self.image_channel_order = None
elif self.encode:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = 'RGB'
if self.pickver_task_createdb <= 2:
if hasattr(self, 'encode'):
if self.encode:
self.encoding = 'jpg'
else:
self.encoding = 'none'
delattr(self, 'encode')
else:
self.encoding = 'none'
self.pickver_task_createdb = PICKLE_VERSION
if not hasattr(self, 'backend') or self.backend is None:
self.backend = 'lmdb'
if not hasattr(self, 'compression') or self.compression is None:
self.compression = 'none'
@override
def name(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'Create DB (train)'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'Create DB (val)'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'Create DB (test)'
else:
return 'Create DB (%s)' % self.db_name |
@override
def html_id(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'task-create_db-train'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'task-create_db-val'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'task-create_db-test'
else:
return super(CreateDbTask, self).html_id()
@override
def offer_resources(self, resources):
key = 'create_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def task_arguments(self, resources, env):
args = [sys.executable, os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(digits.__file__))),
'tools', 'create_db.py'),
self.path(self.input_file),
self.path(self.db_name),
self.image_dims[1],
self.image_dims[0],
'--backend=%s' % self.backend,
'--channels=%s' % self.image_dims[2],
'--resize_mode=%s' % self.resize_mode,
]
if self.mean_file is not None:
args.append('--mean_file=%s' % self.path(self.mean_file))
# Add a visual mean_file
args.append('--mean_file=%s' % self.path(utils.constants.MEAN_FILE_IMAGE))
if self.image_folder:
args.append('--image_folder=%s' % self.image_folder)
if self.shuffle:
args.append('--shuffle')
if self.encoding and self.encoding != 'none':
args.append('--encoding=%s' % self.encoding)
if self.compression and self.compression != 'none':
args.append('--compression=%s' % self.compression)
if self.backend == 'hdf5':
args.append('--hdf5_dset_limit=%d' % 2**31)
return args
@override
def process_output(self, line):
from digits.webapp import socketio
self.create_db_log.write('%s\n' % line)
self.create_db_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Processed (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1))/int(match.group(2))
self.emit_progress_update()
return True
# distribution
match = re.match(r'Category (\d+) has (\d+)', message)
if match and self.labels_file is not None:
if not hasattr(self, 'distribution') or self.distribution is None:
self.distribution = {}
self.distribution[match.group(1)] = int(match.group(2))
data = self.distribution_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'distribution',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
return True
# result
match = re.match(r'(\d+) images written to database', message)
if match:
self.entries_count = int(match.group(1))
self.logger.debug(message)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
@override
def after_run(self):
from digits.webapp import socketio
super(CreateDbTask, self).after_run()
self.create_db_log.close()
if self.backend == 'lmdb':
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'exploration-ready',
},
namespace='/jobs',
room=self.job_id,
)
elif self.backend == 'hdf5':
# add more path information to the list of h5 files
lines = None
with open(self.path(self.textfile)) as infile:
lines = infile.readlines()
with open(self.path(self.textfile), 'w') as outfile:
for line in lines:
# XXX this works because the model job will be in an adjacent folder
outfile.write('%s\n' % os.path.join(
'..', self.job_id, self.db_name, line.strip()))
if self.mean_file:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'mean-image',
# XXX Can't use url_for here because we don't have a request context
'data': '/files/' + self.path('mean.jpg', relative=True),
},
namespace='/jobs',
room=self.job_id,
)
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self, 'labels_file'), 'labels_file not set'
assert self.labels_file, 'labels_file not set'
assert os.path.exists(self.path(self.labels_file)), 'labels_file does not exist'
labels = []
with open(self.path(self.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def distribution_data(self):
"""
Returns distribution data for a C3.js graph
"""
if self.distribution is None:
return None
try:
labels = self.get_labels()
except AssertionError:
return None
if len(self.distribution.keys()) != len(labels):
return None
values = ['Count']
titles = []
for key, value in sorted(
self.distribution.items(),
key=operator.itemgetter(1),
reverse=True):
values.append(value)
titles.append(labels[int(key)])
return {
'data': {
'columns': [values],
'type': 'bar'
},
'axis': {
'x': {
'type': 'category',
'categories': titles,
}
},
} |
@override
def before_run(self):
super(CreateDbTask, self).before_run()
self.create_db_log = open(self.path(self.create_db_log_file), 'a') | random_line_split |
create_db.py | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import re
import operator
import digits
from digits import utils
from digits.utils import subclass, override
from digits.task import Task
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 3
@subclass
class CreateDbTask(Task):
"""Creates a database"""
def __init__(self, input_file, db_name, backend, image_dims, **kwargs):
"""
Arguments:
input_file -- read images and labels from this file
db_name -- save database to this location
backend -- database backend (lmdb/hdf5)
image_dims -- (height, width, channels)
Keyword Arguments:
image_folder -- prepend image paths with this folder
shuffle -- shuffle images before saving
resize_mode -- used in utils.image.resize_image()
encoding -- 'none', 'png' or 'jpg'
compression -- 'none' or 'gzip'
mean_file -- save mean file to this location
labels_file -- used to print category distribution
"""
# Take keyword arguments out of kwargs
self.image_folder = kwargs.pop('image_folder', None)
self.shuffle = kwargs.pop('shuffle', True)
self.resize_mode = kwargs.pop('resize_mode' , None)
self.encoding = kwargs.pop('encoding', None)
self.compression = kwargs.pop('compression', None)
self.mean_file = kwargs.pop('mean_file', None)
self.labels_file = kwargs.pop('labels_file', None)
super(CreateDbTask, self).__init__(**kwargs)
self.pickver_task_createdb = PICKLE_VERSION
self.input_file = input_file
self.db_name = db_name
self.backend = backend
if backend == 'hdf5':
# the list of hdf5 files is stored in a textfile
self.textfile = os.path.join(self.db_name, 'list.txt')
self.image_dims = image_dims
if image_dims[2] == 3:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = None
self.entries_count = None
self.distribution = None
self.create_db_log_file = "create_%s.log" % db_name
def __getstate__(self):
d = super(CreateDbTask, self).__getstate__()
if 'create_db_log' in d:
# don't save file handle
del d['create_db_log']
if 'labels' in d:
del d['labels']
return d
def __setstate__(self, state):
super(CreateDbTask, self).__setstate__(state)
if self.pickver_task_createdb <= 1:
if self.image_dims[2] == 1:
self.image_channel_order = None
elif self.encode:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = 'RGB'
if self.pickver_task_createdb <= 2:
if hasattr(self, 'encode'):
if self.encode:
self.encoding = 'jpg'
else:
self.encoding = 'none'
delattr(self, 'encode')
else:
self.encoding = 'none'
self.pickver_task_createdb = PICKLE_VERSION
if not hasattr(self, 'backend') or self.backend is None:
self.backend = 'lmdb'
if not hasattr(self, 'compression') or self.compression is None:
self.compression = 'none'
@override
def name(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'Create DB (train)'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'Create DB (val)'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'Create DB (test)'
else:
return 'Create DB (%s)' % self.db_name
@override
def before_run(self):
super(CreateDbTask, self).before_run()
self.create_db_log = open(self.path(self.create_db_log_file), 'a')
@override
def html_id(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'task-create_db-train'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'task-create_db-val'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'task-create_db-test'
else:
return super(CreateDbTask, self).html_id()
@override
def offer_resources(self, resources):
key = 'create_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def | (self, resources, env):
args = [sys.executable, os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(digits.__file__))),
'tools', 'create_db.py'),
self.path(self.input_file),
self.path(self.db_name),
self.image_dims[1],
self.image_dims[0],
'--backend=%s' % self.backend,
'--channels=%s' % self.image_dims[2],
'--resize_mode=%s' % self.resize_mode,
]
if self.mean_file is not None:
args.append('--mean_file=%s' % self.path(self.mean_file))
# Add a visual mean_file
args.append('--mean_file=%s' % self.path(utils.constants.MEAN_FILE_IMAGE))
if self.image_folder:
args.append('--image_folder=%s' % self.image_folder)
if self.shuffle:
args.append('--shuffle')
if self.encoding and self.encoding != 'none':
args.append('--encoding=%s' % self.encoding)
if self.compression and self.compression != 'none':
args.append('--compression=%s' % self.compression)
if self.backend == 'hdf5':
args.append('--hdf5_dset_limit=%d' % 2**31)
return args
@override
def process_output(self, line):
from digits.webapp import socketio
self.create_db_log.write('%s\n' % line)
self.create_db_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Processed (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1))/int(match.group(2))
self.emit_progress_update()
return True
# distribution
match = re.match(r'Category (\d+) has (\d+)', message)
if match and self.labels_file is not None:
if not hasattr(self, 'distribution') or self.distribution is None:
self.distribution = {}
self.distribution[match.group(1)] = int(match.group(2))
data = self.distribution_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'distribution',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
return True
# result
match = re.match(r'(\d+) images written to database', message)
if match:
self.entries_count = int(match.group(1))
self.logger.debug(message)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
@override
def after_run(self):
from digits.webapp import socketio
super(CreateDbTask, self).after_run()
self.create_db_log.close()
if self.backend == 'lmdb':
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'exploration-ready',
},
namespace='/jobs',
room=self.job_id,
)
elif self.backend == 'hdf5':
# add more path information to the list of h5 files
lines = None
with open(self.path(self.textfile)) as infile:
lines = infile.readlines()
with open(self.path(self.textfile), 'w') as outfile:
for line in lines:
# XXX this works because the model job will be in an adjacent folder
outfile.write('%s\n' % os.path.join(
'..', self.job_id, self.db_name, line.strip()))
if self.mean_file:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'mean-image',
# XXX Can't use url_for here because we don't have a request context
'data': '/files/' + self.path('mean.jpg', relative=True),
},
namespace='/jobs',
room=self.job_id,
)
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self, 'labels_file'), 'labels_file not set'
assert self.labels_file, 'labels_file not set'
assert os.path.exists(self.path(self.labels_file)), 'labels_file does not exist'
labels = []
with open(self.path(self.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def distribution_data(self):
"""
Returns distribution data for a C3.js graph
"""
if self.distribution is None:
return None
try:
labels = self.get_labels()
except AssertionError:
return None
if len(self.distribution.keys()) != len(labels):
return None
values = ['Count']
titles = []
for key, value in sorted(
self.distribution.items(),
key=operator.itemgetter(1),
reverse=True):
values.append(value)
titles.append(labels[int(key)])
return {
'data': {
'columns': [values],
'type': 'bar'
},
'axis': {
'x': {
'type': 'category',
'categories': titles,
}
},
}
| task_arguments | identifier_name |
create_db.py | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import re
import operator
import digits
from digits import utils
from digits.utils import subclass, override
from digits.task import Task
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 3
@subclass
class CreateDbTask(Task):
"""Creates a database"""
def __init__(self, input_file, db_name, backend, image_dims, **kwargs):
"""
Arguments:
input_file -- read images and labels from this file
db_name -- save database to this location
backend -- database backend (lmdb/hdf5)
image_dims -- (height, width, channels)
Keyword Arguments:
image_folder -- prepend image paths with this folder
shuffle -- shuffle images before saving
resize_mode -- used in utils.image.resize_image()
encoding -- 'none', 'png' or 'jpg'
compression -- 'none' or 'gzip'
mean_file -- save mean file to this location
labels_file -- used to print category distribution
"""
# Take keyword arguments out of kwargs
self.image_folder = kwargs.pop('image_folder', None)
self.shuffle = kwargs.pop('shuffle', True)
self.resize_mode = kwargs.pop('resize_mode' , None)
self.encoding = kwargs.pop('encoding', None)
self.compression = kwargs.pop('compression', None)
self.mean_file = kwargs.pop('mean_file', None)
self.labels_file = kwargs.pop('labels_file', None)
super(CreateDbTask, self).__init__(**kwargs)
self.pickver_task_createdb = PICKLE_VERSION
self.input_file = input_file
self.db_name = db_name
self.backend = backend
if backend == 'hdf5':
# the list of hdf5 files is stored in a textfile
self.textfile = os.path.join(self.db_name, 'list.txt')
self.image_dims = image_dims
if image_dims[2] == 3:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = None
self.entries_count = None
self.distribution = None
self.create_db_log_file = "create_%s.log" % db_name
def __getstate__(self):
d = super(CreateDbTask, self).__getstate__()
if 'create_db_log' in d:
# don't save file handle
del d['create_db_log']
if 'labels' in d:
del d['labels']
return d
def __setstate__(self, state):
super(CreateDbTask, self).__setstate__(state)
if self.pickver_task_createdb <= 1:
if self.image_dims[2] == 1:
self.image_channel_order = None
elif self.encode:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = 'RGB'
if self.pickver_task_createdb <= 2:
if hasattr(self, 'encode'):
if self.encode:
self.encoding = 'jpg'
else:
self.encoding = 'none'
delattr(self, 'encode')
else:
|
self.pickver_task_createdb = PICKLE_VERSION
if not hasattr(self, 'backend') or self.backend is None:
self.backend = 'lmdb'
if not hasattr(self, 'compression') or self.compression is None:
self.compression = 'none'
@override
def name(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'Create DB (train)'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'Create DB (val)'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'Create DB (test)'
else:
return 'Create DB (%s)' % self.db_name
@override
def before_run(self):
super(CreateDbTask, self).before_run()
self.create_db_log = open(self.path(self.create_db_log_file), 'a')
@override
def html_id(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'task-create_db-train'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'task-create_db-val'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'task-create_db-test'
else:
return super(CreateDbTask, self).html_id()
@override
def offer_resources(self, resources):
key = 'create_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def task_arguments(self, resources, env):
args = [sys.executable, os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(digits.__file__))),
'tools', 'create_db.py'),
self.path(self.input_file),
self.path(self.db_name),
self.image_dims[1],
self.image_dims[0],
'--backend=%s' % self.backend,
'--channels=%s' % self.image_dims[2],
'--resize_mode=%s' % self.resize_mode,
]
if self.mean_file is not None:
args.append('--mean_file=%s' % self.path(self.mean_file))
# Add a visual mean_file
args.append('--mean_file=%s' % self.path(utils.constants.MEAN_FILE_IMAGE))
if self.image_folder:
args.append('--image_folder=%s' % self.image_folder)
if self.shuffle:
args.append('--shuffle')
if self.encoding and self.encoding != 'none':
args.append('--encoding=%s' % self.encoding)
if self.compression and self.compression != 'none':
args.append('--compression=%s' % self.compression)
if self.backend == 'hdf5':
args.append('--hdf5_dset_limit=%d' % 2**31)
return args
@override
def process_output(self, line):
from digits.webapp import socketio
self.create_db_log.write('%s\n' % line)
self.create_db_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Processed (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1))/int(match.group(2))
self.emit_progress_update()
return True
# distribution
match = re.match(r'Category (\d+) has (\d+)', message)
if match and self.labels_file is not None:
if not hasattr(self, 'distribution') or self.distribution is None:
self.distribution = {}
self.distribution[match.group(1)] = int(match.group(2))
data = self.distribution_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'distribution',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
return True
# result
match = re.match(r'(\d+) images written to database', message)
if match:
self.entries_count = int(match.group(1))
self.logger.debug(message)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
@override
def after_run(self):
from digits.webapp import socketio
super(CreateDbTask, self).after_run()
self.create_db_log.close()
if self.backend == 'lmdb':
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'exploration-ready',
},
namespace='/jobs',
room=self.job_id,
)
elif self.backend == 'hdf5':
# add more path information to the list of h5 files
lines = None
with open(self.path(self.textfile)) as infile:
lines = infile.readlines()
with open(self.path(self.textfile), 'w') as outfile:
for line in lines:
# XXX this works because the model job will be in an adjacent folder
outfile.write('%s\n' % os.path.join(
'..', self.job_id, self.db_name, line.strip()))
if self.mean_file:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'mean-image',
# XXX Can't use url_for here because we don't have a request context
'data': '/files/' + self.path('mean.jpg', relative=True),
},
namespace='/jobs',
room=self.job_id,
)
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self, 'labels_file'), 'labels_file not set'
assert self.labels_file, 'labels_file not set'
assert os.path.exists(self.path(self.labels_file)), 'labels_file does not exist'
labels = []
with open(self.path(self.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def distribution_data(self):
"""
Returns distribution data for a C3.js graph
"""
if self.distribution is None:
return None
try:
labels = self.get_labels()
except AssertionError:
return None
if len(self.distribution.keys()) != len(labels):
return None
values = ['Count']
titles = []
for key, value in sorted(
self.distribution.items(),
key=operator.itemgetter(1),
reverse=True):
values.append(value)
titles.append(labels[int(key)])
return {
'data': {
'columns': [values],
'type': 'bar'
},
'axis': {
'x': {
'type': 'category',
'categories': titles,
}
},
}
| self.encoding = 'none' | conditional_block |
searchEditorModel.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { URI } from 'vs/base/common/uri';
import { ITextModel } from 'vs/editor/common/model';
import { IModelService } from 'vs/editor/common/services/modelService';
import { IModeService } from 'vs/editor/common/services/modeService';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { parseSavedSearchEditor } from 'vs/workbench/contrib/searchEditor/browser/searchEditorSerialization';
import { IBackupFileService } from 'vs/workbench/services/backup/common/backup';
import { SearchConfiguration } from './searchEditorInput';
import { assertIsDefined } from 'vs/base/common/types';
export class SearchEditorModel {
private cachedContentsModel: ITextModel | undefined = undefined;
private resolveContents!: (model: ITextModel) => void;
public onModelResolved: Promise<ITextModel>;
private ongoingResolve = Promise.resolve<any>(undefined);
constructor(
private modelUri: URI,
public config: SearchConfiguration,
private existingData: ({ config: Partial<SearchConfiguration>; backingUri?: URI; } &
({ modelUri: URI; text?: never; } |
{ text: string; modelUri?: never; } |
{ backingUri: URI; text?: never; modelUri?: never; })),
@IInstantiationService private readonly instantiationService: IInstantiationService,
@IBackupFileService readonly backupService: IBackupFileService,
@IModelService private readonly modelService: IModelService,
@IModeService private readonly modeService: IModeService) {
this.onModelResolved = new Promise<ITextModel>(resolve => this.resolveContents = resolve);
this.onModelResolved.then(model => this.cachedContentsModel = model);
this.ongoingResolve = backupService.resolve(modelUri)
.then(backup => modelService.getModel(modelUri) ?? (backup ? modelService.createModel(backup.value, modeService.create('search-result'), modelUri) : undefined))
.then(model => { if (model) { this.resolveContents(model); } });
}
async resolve(): Promise<ITextModel> {
await (this.ongoingResolve = this.ongoingResolve.then(() => this.cachedContentsModel || this.createModel()));
return assertIsDefined(this.cachedContentsModel);
}
private async createModel() {
const getContents = async () => {
if (this.existingData.text !== undefined) {
return this.existingData.text;
}
else if (this.existingData.backingUri !== undefined) {
return (await this.instantiationService.invokeFunction(parseSavedSearchEditor, this.existingData.backingUri)).text;
}
else {
return '';
}
};
const contents = await getContents();
const model = this.modelService.getModel(this.modelUri) ?? this.modelService.createModel(contents, this.modeService.create('search-result'), this.modelUri);
this.resolveContents(model);
return model;
} | } | random_line_split |
|
searchEditorModel.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { URI } from 'vs/base/common/uri';
import { ITextModel } from 'vs/editor/common/model';
import { IModelService } from 'vs/editor/common/services/modelService';
import { IModeService } from 'vs/editor/common/services/modeService';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { parseSavedSearchEditor } from 'vs/workbench/contrib/searchEditor/browser/searchEditorSerialization';
import { IBackupFileService } from 'vs/workbench/services/backup/common/backup';
import { SearchConfiguration } from './searchEditorInput';
import { assertIsDefined } from 'vs/base/common/types';
export class SearchEditorModel {
private cachedContentsModel: ITextModel | undefined = undefined;
private resolveContents!: (model: ITextModel) => void;
public onModelResolved: Promise<ITextModel>;
private ongoingResolve = Promise.resolve<any>(undefined);
constructor(
private modelUri: URI,
public config: SearchConfiguration,
private existingData: ({ config: Partial<SearchConfiguration>; backingUri?: URI; } &
({ modelUri: URI; text?: never; } |
{ text: string; modelUri?: never; } |
{ backingUri: URI; text?: never; modelUri?: never; })),
@IInstantiationService private readonly instantiationService: IInstantiationService,
@IBackupFileService readonly backupService: IBackupFileService,
@IModelService private readonly modelService: IModelService,
@IModeService private readonly modeService: IModeService) {
this.onModelResolved = new Promise<ITextModel>(resolve => this.resolveContents = resolve);
this.onModelResolved.then(model => this.cachedContentsModel = model);
this.ongoingResolve = backupService.resolve(modelUri)
.then(backup => modelService.getModel(modelUri) ?? (backup ? modelService.createModel(backup.value, modeService.create('search-result'), modelUri) : undefined))
.then(model => { if (model) { this.resolveContents(model); } });
}
async resolve(): Promise<ITextModel> |
private async createModel() {
const getContents = async () => {
if (this.existingData.text !== undefined) {
return this.existingData.text;
}
else if (this.existingData.backingUri !== undefined) {
return (await this.instantiationService.invokeFunction(parseSavedSearchEditor, this.existingData.backingUri)).text;
}
else {
return '';
}
};
const contents = await getContents();
const model = this.modelService.getModel(this.modelUri) ?? this.modelService.createModel(contents, this.modeService.create('search-result'), this.modelUri);
this.resolveContents(model);
return model;
}
}
| {
await (this.ongoingResolve = this.ongoingResolve.then(() => this.cachedContentsModel || this.createModel()));
return assertIsDefined(this.cachedContentsModel);
} | identifier_body |
searchEditorModel.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { URI } from 'vs/base/common/uri';
import { ITextModel } from 'vs/editor/common/model';
import { IModelService } from 'vs/editor/common/services/modelService';
import { IModeService } from 'vs/editor/common/services/modeService';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { parseSavedSearchEditor } from 'vs/workbench/contrib/searchEditor/browser/searchEditorSerialization';
import { IBackupFileService } from 'vs/workbench/services/backup/common/backup';
import { SearchConfiguration } from './searchEditorInput';
import { assertIsDefined } from 'vs/base/common/types';
export class SearchEditorModel {
private cachedContentsModel: ITextModel | undefined = undefined;
private resolveContents!: (model: ITextModel) => void;
public onModelResolved: Promise<ITextModel>;
private ongoingResolve = Promise.resolve<any>(undefined);
constructor(
private modelUri: URI,
public config: SearchConfiguration,
private existingData: ({ config: Partial<SearchConfiguration>; backingUri?: URI; } &
({ modelUri: URI; text?: never; } |
{ text: string; modelUri?: never; } |
{ backingUri: URI; text?: never; modelUri?: never; })),
@IInstantiationService private readonly instantiationService: IInstantiationService,
@IBackupFileService readonly backupService: IBackupFileService,
@IModelService private readonly modelService: IModelService,
@IModeService private readonly modeService: IModeService) {
this.onModelResolved = new Promise<ITextModel>(resolve => this.resolveContents = resolve);
this.onModelResolved.then(model => this.cachedContentsModel = model);
this.ongoingResolve = backupService.resolve(modelUri)
.then(backup => modelService.getModel(modelUri) ?? (backup ? modelService.createModel(backup.value, modeService.create('search-result'), modelUri) : undefined))
.then(model => { if (model) { this.resolveContents(model); } });
}
async | (): Promise<ITextModel> {
await (this.ongoingResolve = this.ongoingResolve.then(() => this.cachedContentsModel || this.createModel()));
return assertIsDefined(this.cachedContentsModel);
}
private async createModel() {
const getContents = async () => {
if (this.existingData.text !== undefined) {
return this.existingData.text;
}
else if (this.existingData.backingUri !== undefined) {
return (await this.instantiationService.invokeFunction(parseSavedSearchEditor, this.existingData.backingUri)).text;
}
else {
return '';
}
};
const contents = await getContents();
const model = this.modelService.getModel(this.modelUri) ?? this.modelService.createModel(contents, this.modeService.create('search-result'), this.modelUri);
this.resolveContents(model);
return model;
}
}
| resolve | identifier_name |
poetry_requirements_caof.py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Iterable, Mapping
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.macros.caof_utils import (
OVERRIDES_TYPE,
flatten_overrides_to_dependency_field,
)
from pants.backend.python.macros.poetry_requirements import PyProjectToml, parse_pyproject_toml
from pants.backend.python.target_types import normalize_module_mapping
from pants.core.target_types import TargetGeneratorSourcesHelperTarget
class PoetryRequirementsCAOF:
"""Translates dependencies specified in a pyproject.toml Poetry file to a set of
"python_requirements_library" targets.
For example, if pyproject.toml contains the following entries under
poetry.tool.dependencies: `foo = ">1"` and `bar = ">2.4"`,
python_requirement(
name="foo",
requirements=["foo>1"],
)
python_requirement(
name="bar",
requirements=["bar>2.4"],
)
See Poetry documentation for correct specification of pyproject.toml:
https://python-poetry.org/docs/pyproject/
You may also use the parameter `module_mapping` to teach Pants what modules each of your
requirements provide. For any requirement unspecified, Pants will default to the name of the
requirement. This setting is important for Pants to know how to convert your import
statements back into your dependencies. For example:
poetry_requirements(
module_mapping={
"ansicolors": ["colors"],
"setuptools": ["pkg_resources"],
}
)
"""
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(
self,
*,
source: str = "pyproject.toml",
module_mapping: Mapping[str, Iterable[str]] | None = None,
type_stubs_module_mapping: Mapping[str, Iterable[str]] | None = None,
overrides: OVERRIDES_TYPE = None,
) -> None:
| """
:param module_mapping: a mapping of requirement names to a list of the modules they provide.
For example, `{"ansicolors": ["colors"]}`. Any unspecified requirements will use the
requirement name as the default module, e.g. "Django" will default to
`modules=["django"]`.
"""
req_file_tgt = self._parse_context.create_object(
TargetGeneratorSourcesHelperTarget.alias,
name=source.replace(os.path.sep, "_"),
sources=[source],
)
requirements_dep = f":{req_file_tgt.name}"
normalized_module_mapping = normalize_module_mapping(module_mapping)
normalized_type_stubs_module_mapping = normalize_module_mapping(type_stubs_module_mapping)
dependencies_overrides = flatten_overrides_to_dependency_field(
overrides, macro_name="python_requirements", build_file_dir=self._parse_context.rel_path
)
requirements = parse_pyproject_toml(
PyProjectToml.deprecated_macro_create(self._parse_context, source)
)
for parsed_req in requirements:
normalized_proj_name = canonicalize_project_name(parsed_req.project_name)
self._parse_context.create_object(
"python_requirement",
name=parsed_req.project_name,
requirements=[parsed_req],
modules=normalized_module_mapping.get(normalized_proj_name),
type_stub_modules=normalized_type_stubs_module_mapping.get(normalized_proj_name),
dependencies=[
requirements_dep,
*dependencies_overrides.get(normalized_proj_name, []),
],
) | identifier_body |
|
poetry_requirements_caof.py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Iterable, Mapping
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.macros.caof_utils import (
OVERRIDES_TYPE,
flatten_overrides_to_dependency_field,
)
from pants.backend.python.macros.poetry_requirements import PyProjectToml, parse_pyproject_toml
from pants.backend.python.target_types import normalize_module_mapping
from pants.core.target_types import TargetGeneratorSourcesHelperTarget
class | :
"""Translates dependencies specified in a pyproject.toml Poetry file to a set of
"python_requirements_library" targets.
For example, if pyproject.toml contains the following entries under
poetry.tool.dependencies: `foo = ">1"` and `bar = ">2.4"`,
python_requirement(
name="foo",
requirements=["foo>1"],
)
python_requirement(
name="bar",
requirements=["bar>2.4"],
)
See Poetry documentation for correct specification of pyproject.toml:
https://python-poetry.org/docs/pyproject/
You may also use the parameter `module_mapping` to teach Pants what modules each of your
requirements provide. For any requirement unspecified, Pants will default to the name of the
requirement. This setting is important for Pants to know how to convert your import
statements back into your dependencies. For example:
poetry_requirements(
module_mapping={
"ansicolors": ["colors"],
"setuptools": ["pkg_resources"],
}
)
"""
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(
self,
*,
source: str = "pyproject.toml",
module_mapping: Mapping[str, Iterable[str]] | None = None,
type_stubs_module_mapping: Mapping[str, Iterable[str]] | None = None,
overrides: OVERRIDES_TYPE = None,
) -> None:
"""
:param module_mapping: a mapping of requirement names to a list of the modules they provide.
For example, `{"ansicolors": ["colors"]}`. Any unspecified requirements will use the
requirement name as the default module, e.g. "Django" will default to
`modules=["django"]`.
"""
req_file_tgt = self._parse_context.create_object(
TargetGeneratorSourcesHelperTarget.alias,
name=source.replace(os.path.sep, "_"),
sources=[source],
)
requirements_dep = f":{req_file_tgt.name}"
normalized_module_mapping = normalize_module_mapping(module_mapping)
normalized_type_stubs_module_mapping = normalize_module_mapping(type_stubs_module_mapping)
dependencies_overrides = flatten_overrides_to_dependency_field(
overrides, macro_name="python_requirements", build_file_dir=self._parse_context.rel_path
)
requirements = parse_pyproject_toml(
PyProjectToml.deprecated_macro_create(self._parse_context, source)
)
for parsed_req in requirements:
normalized_proj_name = canonicalize_project_name(parsed_req.project_name)
self._parse_context.create_object(
"python_requirement",
name=parsed_req.project_name,
requirements=[parsed_req],
modules=normalized_module_mapping.get(normalized_proj_name),
type_stub_modules=normalized_type_stubs_module_mapping.get(normalized_proj_name),
dependencies=[
requirements_dep,
*dependencies_overrides.get(normalized_proj_name, []),
],
)
| PoetryRequirementsCAOF | identifier_name |
poetry_requirements_caof.py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Iterable, Mapping
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.macros.caof_utils import (
OVERRIDES_TYPE,
flatten_overrides_to_dependency_field,
)
from pants.backend.python.macros.poetry_requirements import PyProjectToml, parse_pyproject_toml
from pants.backend.python.target_types import normalize_module_mapping
from pants.core.target_types import TargetGeneratorSourcesHelperTarget
class PoetryRequirementsCAOF:
"""Translates dependencies specified in a pyproject.toml Poetry file to a set of
"python_requirements_library" targets.
For example, if pyproject.toml contains the following entries under
poetry.tool.dependencies: `foo = ">1"` and `bar = ">2.4"`,
python_requirement(
name="foo",
requirements=["foo>1"],
)
python_requirement(
name="bar",
requirements=["bar>2.4"],
)
See Poetry documentation for correct specification of pyproject.toml:
https://python-poetry.org/docs/pyproject/
You may also use the parameter `module_mapping` to teach Pants what modules each of your
requirements provide. For any requirement unspecified, Pants will default to the name of the
requirement. This setting is important for Pants to know how to convert your import
statements back into your dependencies. For example:
poetry_requirements(
module_mapping={
"ansicolors": ["colors"],
"setuptools": ["pkg_resources"],
}
)
"""
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(
self,
*,
source: str = "pyproject.toml",
module_mapping: Mapping[str, Iterable[str]] | None = None,
type_stubs_module_mapping: Mapping[str, Iterable[str]] | None = None,
overrides: OVERRIDES_TYPE = None,
) -> None:
"""
:param module_mapping: a mapping of requirement names to a list of the modules they provide.
For example, `{"ansicolors": ["colors"]}`. Any unspecified requirements will use the
requirement name as the default module, e.g. "Django" will default to
`modules=["django"]`.
"""
req_file_tgt = self._parse_context.create_object(
TargetGeneratorSourcesHelperTarget.alias,
name=source.replace(os.path.sep, "_"),
sources=[source],
)
requirements_dep = f":{req_file_tgt.name}"
normalized_module_mapping = normalize_module_mapping(module_mapping)
normalized_type_stubs_module_mapping = normalize_module_mapping(type_stubs_module_mapping)
dependencies_overrides = flatten_overrides_to_dependency_field(
overrides, macro_name="python_requirements", build_file_dir=self._parse_context.rel_path
)
requirements = parse_pyproject_toml(
PyProjectToml.deprecated_macro_create(self._parse_context, source)
)
for parsed_req in requirements:
| normalized_proj_name = canonicalize_project_name(parsed_req.project_name)
self._parse_context.create_object(
"python_requirement",
name=parsed_req.project_name,
requirements=[parsed_req],
modules=normalized_module_mapping.get(normalized_proj_name),
type_stub_modules=normalized_type_stubs_module_mapping.get(normalized_proj_name),
dependencies=[
requirements_dep,
*dependencies_overrides.get(normalized_proj_name, []),
],
) | conditional_block |
|
poetry_requirements_caof.py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Iterable, Mapping
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.macros.caof_utils import (
OVERRIDES_TYPE,
flatten_overrides_to_dependency_field,
)
from pants.backend.python.macros.poetry_requirements import PyProjectToml, parse_pyproject_toml
from pants.backend.python.target_types import normalize_module_mapping
from pants.core.target_types import TargetGeneratorSourcesHelperTarget
class PoetryRequirementsCAOF:
"""Translates dependencies specified in a pyproject.toml Poetry file to a set of
"python_requirements_library" targets.
For example, if pyproject.toml contains the following entries under
poetry.tool.dependencies: `foo = ">1"` and `bar = ">2.4"`,
python_requirement(
name="foo",
requirements=["foo>1"],
)
python_requirement(
name="bar",
requirements=["bar>2.4"],
)
| You may also use the parameter `module_mapping` to teach Pants what modules each of your
requirements provide. For any requirement unspecified, Pants will default to the name of the
requirement. This setting is important for Pants to know how to convert your import
statements back into your dependencies. For example:
poetry_requirements(
module_mapping={
"ansicolors": ["colors"],
"setuptools": ["pkg_resources"],
}
)
"""
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(
self,
*,
source: str = "pyproject.toml",
module_mapping: Mapping[str, Iterable[str]] | None = None,
type_stubs_module_mapping: Mapping[str, Iterable[str]] | None = None,
overrides: OVERRIDES_TYPE = None,
) -> None:
"""
:param module_mapping: a mapping of requirement names to a list of the modules they provide.
For example, `{"ansicolors": ["colors"]}`. Any unspecified requirements will use the
requirement name as the default module, e.g. "Django" will default to
`modules=["django"]`.
"""
req_file_tgt = self._parse_context.create_object(
TargetGeneratorSourcesHelperTarget.alias,
name=source.replace(os.path.sep, "_"),
sources=[source],
)
requirements_dep = f":{req_file_tgt.name}"
normalized_module_mapping = normalize_module_mapping(module_mapping)
normalized_type_stubs_module_mapping = normalize_module_mapping(type_stubs_module_mapping)
dependencies_overrides = flatten_overrides_to_dependency_field(
overrides, macro_name="python_requirements", build_file_dir=self._parse_context.rel_path
)
requirements = parse_pyproject_toml(
PyProjectToml.deprecated_macro_create(self._parse_context, source)
)
for parsed_req in requirements:
normalized_proj_name = canonicalize_project_name(parsed_req.project_name)
self._parse_context.create_object(
"python_requirement",
name=parsed_req.project_name,
requirements=[parsed_req],
modules=normalized_module_mapping.get(normalized_proj_name),
type_stub_modules=normalized_type_stubs_module_mapping.get(normalized_proj_name),
dependencies=[
requirements_dep,
*dependencies_overrides.get(normalized_proj_name, []),
],
) | See Poetry documentation for correct specification of pyproject.toml:
https://python-poetry.org/docs/pyproject/
| random_line_split |
yt-helper.ts | // YouTube Helper
// require utils.js
// require yt-parser
import {ytparser} from './yt-parser';
import * as _ from 'underscore';
export class yth {
/**
* @param options cf options Youtube
*/
static getSrc(vid, origin, options) {
origin = origin || (location.protocol + '//' + location.host + location.pathname);
const params = $.extend({
origin: origin, // origin toujours en premier car tout est placé dans ce paramètre
feature: 'player_embedded',
html5: true,
enablejsapi: 1,
controls: 0,
modestbranding: 1, | rel: 0,
autoplay: 0,
disablekb: 1,
playsinline: 1,
widgetid: 2
}, options);
return "https://www.youtube.com/embed/" + vid + "?" + $.param(params).replace(/&/g, '&');
};
/**
* Conversion d'une liste de piste en tracklist YouTube
* @param {[Disc.Track]} tracks
*/
static getTracklist(tracks) {
let lines = [];
tracks.forEach(track => {
let timecode = yth.getTimecode(track);
let line = `${timecode} - ${track.title}`;
if (track.performer) line += ` - ${track.performer}`;
lines.push(line);
});
return lines.join("\n");
};
/**
*
* @param tracklist {string}
* @param file {Disc.File}
* @param [options]
* @param {boolean} options.artistInTitle comme dans le m3u ?
* @param {boolean} options.artistBeforeTitle true si l'artiste apparait dans le titre de la chanson
* @param {boolean} options.containsDuration true si la durée de la piste apparait dans le texte d'entrée
* @param {boolean} options.durationBeforeTime true si la durée apparait avant le temps de début de la piste
*/
static setTracklist(tracklist, file, options?) {
// TODO : clear file.tracks
const lines = tracklist.split(/\r?\n/);
const cueTracks = ytparser.parseTracks(lines, options);
file.removeTracks();
cueTracks.forEach(cueTrack => {
const track = file.newTrack();
_.extend(track, cueTrack);
});
};
/**
* Conversion d'une liste de piste en tracklist YouTube
* @param {Disc.Track} track
*/
static getTimecode(track) {
return formatHMSS(track.startSeconds);
};
} | showinfo: 0, | random_line_split |
yt-helper.ts | // YouTube Helper
// require utils.js
// require yt-parser
import {ytparser} from './yt-parser';
import * as _ from 'underscore';
export class yth {
/**
* @param options cf options Youtube
*/
static getSrc(vid, origin, options) {
origin = origin || (location.protocol + '//' + location.host + location.pathname);
const params = $.extend({
origin: origin, // origin toujours en premier car tout est placé dans ce paramètre
feature: 'player_embedded',
html5: true,
enablejsapi: 1,
controls: 0,
modestbranding: 1,
showinfo: 0,
rel: 0,
autoplay: 0,
disablekb: 1,
playsinline: 1,
widgetid: 2
}, options);
return "https://www.youtube.com/embed/" + vid + "?" + $.param(params).replace(/&/g, '&');
};
/**
* Conversion d'une liste de piste en tracklist YouTube
* @param {[Disc.Track]} tracks
*/
static getTracklist(tracks) {
let lines = [];
tracks.forEach(track => {
let timecode = yth.getTimecode(track);
let line = `${timecode} - ${track.title}`;
if (track.performer) line += ` - ${track.performer}`;
lines.push(line);
});
return lines.join("\n");
};
/**
*
* @param tracklist {string}
* @param file {Disc.File}
* @param [options]
* @param {boolean} options.artistInTitle comme dans le m3u ?
* @param {boolean} options.artistBeforeTitle true si l'artiste apparait dans le titre de la chanson
* @param {boolean} options.containsDuration true si la durée de la piste apparait dans le texte d'entrée
* @param {boolean} options.durationBeforeTime true si la durée apparait avant le temps de début de la piste
*/
static setTracklist(tracklist, file, options?) {
// TODO : clear file.tracks
const lines = tracklist.split(/\r?\n/);
const cueTracks = ytparser.parseTracks(lines, options);
file.removeTracks();
cueTracks.forEach(cueTrack => {
const track = file.newTrack();
_.extend(track, cueTrack);
});
};
/**
* Conversion d'une liste de piste en tracklist YouTube
* @param {Disc.Track} track
*/
static getTimecode(track) {
| return formatHMSS(track.startSeconds);
};
}
| identifier_body |
|
yt-helper.ts | // YouTube Helper
// require utils.js
// require yt-parser
import {ytparser} from './yt-parser';
import * as _ from 'underscore';
export class | {
/**
* @param options cf options Youtube
*/
static getSrc(vid, origin, options) {
origin = origin || (location.protocol + '//' + location.host + location.pathname);
const params = $.extend({
origin: origin, // origin toujours en premier car tout est placé dans ce paramètre
feature: 'player_embedded',
html5: true,
enablejsapi: 1,
controls: 0,
modestbranding: 1,
showinfo: 0,
rel: 0,
autoplay: 0,
disablekb: 1,
playsinline: 1,
widgetid: 2
}, options);
return "https://www.youtube.com/embed/" + vid + "?" + $.param(params).replace(/&/g, '&');
};
/**
* Conversion d'une liste de piste en tracklist YouTube
* @param {[Disc.Track]} tracks
*/
static getTracklist(tracks) {
let lines = [];
tracks.forEach(track => {
let timecode = yth.getTimecode(track);
let line = `${timecode} - ${track.title}`;
if (track.performer) line += ` - ${track.performer}`;
lines.push(line);
});
return lines.join("\n");
};
/**
*
* @param tracklist {string}
* @param file {Disc.File}
* @param [options]
* @param {boolean} options.artistInTitle comme dans le m3u ?
* @param {boolean} options.artistBeforeTitle true si l'artiste apparait dans le titre de la chanson
* @param {boolean} options.containsDuration true si la durée de la piste apparait dans le texte d'entrée
* @param {boolean} options.durationBeforeTime true si la durée apparait avant le temps de début de la piste
*/
static setTracklist(tracklist, file, options?) {
// TODO : clear file.tracks
const lines = tracklist.split(/\r?\n/);
const cueTracks = ytparser.parseTracks(lines, options);
file.removeTracks();
cueTracks.forEach(cueTrack => {
const track = file.newTrack();
_.extend(track, cueTrack);
});
};
/**
* Conversion d'une liste de piste en tracklist YouTube
* @param {Disc.Track} track
*/
static getTimecode(track) {
return formatHMSS(track.startSeconds);
};
}
| yth | identifier_name |
evaluator.rs | //
// This file is part of zero_sum.
//
// zero_sum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// zero_sum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with zero_sum. If not, see <http://www.gnu.org/licenses/>.
//
// Copyright 2016-2017 Chris Foster
//
use std::fmt::Display;
use std::ops::{Add, Div, Mul, Neg, Sub};
use state::State;
/// An evaluation type.
///
/// This is usually a tuple around a signed numeric type.
///
/// # Example
///
/// There is a [helper macro](../macro.prepare_evaluation_tuple.html) to facilitate the implementation of tuple structs:
///
/// ```rust
/// #[macro_use]
/// extern crate zero_sum;
/// # use zero_sum::analysis::Evaluation;
/// # use std::fmt;
/// use std::i32;
/// use std::ops::{Add, Div, Mul, Neg, Sub};
///
/// #[derive(Clone, Copy, PartialEq, PartialOrd)]
/// struct Eval(i32);
///
/// prepare_evaluation_tuple!(Eval); // impl Add, Div, Mul, Neg, Sub, and Display
///
/// impl Evaluation for Eval {
/// fn null() -> Eval { Eval(0) }
/// fn shift(self, steps: i32) -> Eval { Eval(self.0 + steps) }
/// fn win() -> Eval { Eval(100000) }
/// fn max() -> Eval { Eval(i32::MAX) }
/// fn is_win(&self) -> bool { self.0 > 99000 }
/// }
/// # fn main() { }
/// ```
pub trait Evaluation:
Sized +
Clone +
Copy +
Display +
Add<Output = Self> +
Sub<Output = Self> +
Mul<Output = Self> +
Neg<Output = Self> +
Div<Output = Self> +
PartialEq +
PartialOrd {
/// An empty, or zero evaluation.
fn null() -> Self;
/// Shift the evaluation by the smallest representable amount `steps` times in the positive or negative direction.
fn shift(self, steps: i32) -> Self;
/// The base value of a win. The evaluator may add or subtract to it in
/// in order to promote it or discourage it in favor of others in the search.
fn win() -> Self;
/// The base value of a loss. The evaluator may add or subtract to it in
/// in order to promote it or discourage it in favor of others in the search.
fn lose() -> Self { -Self::win() }
/// The maximum value representable. This must be safely negatable.
fn max() -> Self;
/// The minimum value representable.
fn min() -> Self { -Self::max() }
/// Returns `true` if this evaluation contains a win. This is usually a check to
/// see if the value is above a certain threshold.
fn is_win(&self) -> bool;
/// Returns `true` if this evaluation contains a loss.
fn is_lose(&self) -> bool { (-*self).is_win() }
/// Returns `true` if this evaluation is either a win or a loss.
fn is_end(&self) -> bool { self.is_win() || self.is_lose() }
}
/// Evaluates a State.
pub trait Evaluator {
type State: State;
type Evaluation: Evaluation;
/// Returns the evaluation of `state`.
fn evaluate(&self, state: &Self::State) -> Self::Evaluation;
/// Returns the evaluation of `state` after executing `plies`.
///
/// # Panics
/// Will panic if the execution of any ply in `plies` causes an error.
fn evaluate_plies(&self, state: &Self::State, plies: &[<Self::State as State>::Ply]) -> Self::Evaluation |
}
/// Implement arithmetic operators (`Add`, `Sub`, `Mul`, `Neg`, `Div`) and `Display` for a tuple
/// struct in terms of the enclosed type.
///
/// # Example
///
/// ```rust
/// #[macro_use]
/// extern crate zero_sum;
/// # use zero_sum::analysis::Evaluation;
/// # use std::fmt;
/// use std::i32;
/// use std::ops::{Add, Div, Mul, Neg, Sub};
///
/// #[derive(Clone, Copy, PartialEq, PartialOrd)]
/// struct Eval(i32);
///
/// prepare_evaluation_tuple!(Eval); // impl Add, Div, Mul, Neg, Sub, and Display
///
/// impl Evaluation for Eval {
/// fn null() -> Eval { Eval(0) }
/// fn shift(self, steps: i32) -> Eval { Eval(self.0 + steps) }
/// fn win() -> Eval { Eval(100000) }
/// fn max() -> Eval { Eval(i32::MAX) }
/// fn is_win(&self) -> bool { self.0.abs() > 99000 }
/// }
/// # fn main() { }
/// ```
#[macro_export]
macro_rules! prepare_evaluation_tuple {
($type_: ident) => {
impl ::std::ops::Add for $type_ {
type Output = $type_;
fn add(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a + b)
}
}
impl ::std::ops::Sub for $type_ {
type Output = $type_;
fn sub(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a - b)
}
}
impl ::std::ops::Mul for $type_ {
type Output = $type_;
fn mul(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a * b)
}
}
impl ::std::ops::Div for $type_ {
type Output = $type_;
fn div(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a / b)
}
}
impl ::std::ops::Neg for $type_ {
type Output = $type_;
fn neg(self) -> $type_ {
let $type_(a) = self;
$type_(-a)
}
}
impl ::std::fmt::Display for $type_ {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
let $type_(a) = *self;
write!(f, "{}", a)
}
}
}
}
| {
let mut state = state.clone();
if let Err(error) = state.execute_plies(plies) {
panic!("Error calculating evaluation: {}", error);
}
if plies.len() % 2 == 0 {
self.evaluate(&state)
} else {
-self.evaluate(&state)
}
} | identifier_body |
evaluator.rs | //
// This file is part of zero_sum.
//
// zero_sum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// zero_sum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with zero_sum. If not, see <http://www.gnu.org/licenses/>.
//
// Copyright 2016-2017 Chris Foster
//
use std::fmt::Display;
use std::ops::{Add, Div, Mul, Neg, Sub};
use state::State;
/// An evaluation type.
/// | /// This is usually a tuple around a signed numeric type.
///
/// # Example
///
/// There is a [helper macro](../macro.prepare_evaluation_tuple.html) to facilitate the implementation of tuple structs:
///
/// ```rust
/// #[macro_use]
/// extern crate zero_sum;
/// # use zero_sum::analysis::Evaluation;
/// # use std::fmt;
/// use std::i32;
/// use std::ops::{Add, Div, Mul, Neg, Sub};
///
/// #[derive(Clone, Copy, PartialEq, PartialOrd)]
/// struct Eval(i32);
///
/// prepare_evaluation_tuple!(Eval); // impl Add, Div, Mul, Neg, Sub, and Display
///
/// impl Evaluation for Eval {
/// fn null() -> Eval { Eval(0) }
/// fn shift(self, steps: i32) -> Eval { Eval(self.0 + steps) }
/// fn win() -> Eval { Eval(100000) }
/// fn max() -> Eval { Eval(i32::MAX) }
/// fn is_win(&self) -> bool { self.0 > 99000 }
/// }
/// # fn main() { }
/// ```
pub trait Evaluation:
Sized +
Clone +
Copy +
Display +
Add<Output = Self> +
Sub<Output = Self> +
Mul<Output = Self> +
Neg<Output = Self> +
Div<Output = Self> +
PartialEq +
PartialOrd {
/// An empty, or zero evaluation.
fn null() -> Self;
/// Shift the evaluation by the smallest representable amount `steps` times in the positive or negative direction.
fn shift(self, steps: i32) -> Self;
/// The base value of a win. The evaluator may add or subtract to it in
/// in order to promote it or discourage it in favor of others in the search.
fn win() -> Self;
/// The base value of a loss. The evaluator may add or subtract to it in
/// in order to promote it or discourage it in favor of others in the search.
fn lose() -> Self { -Self::win() }
/// The maximum value representable. This must be safely negatable.
fn max() -> Self;
/// The minimum value representable.
fn min() -> Self { -Self::max() }
/// Returns `true` if this evaluation contains a win. This is usually a check to
/// see if the value is above a certain threshold.
fn is_win(&self) -> bool;
/// Returns `true` if this evaluation contains a loss.
fn is_lose(&self) -> bool { (-*self).is_win() }
/// Returns `true` if this evaluation is either a win or a loss.
fn is_end(&self) -> bool { self.is_win() || self.is_lose() }
}
/// Evaluates a State.
pub trait Evaluator {
type State: State;
type Evaluation: Evaluation;
/// Returns the evaluation of `state`.
fn evaluate(&self, state: &Self::State) -> Self::Evaluation;
/// Returns the evaluation of `state` after executing `plies`.
///
/// # Panics
/// Will panic if the execution of any ply in `plies` causes an error.
fn evaluate_plies(&self, state: &Self::State, plies: &[<Self::State as State>::Ply]) -> Self::Evaluation {
let mut state = state.clone();
if let Err(error) = state.execute_plies(plies) {
panic!("Error calculating evaluation: {}", error);
}
if plies.len() % 2 == 0 {
self.evaluate(&state)
} else {
-self.evaluate(&state)
}
}
}
/// Implement arithmetic operators (`Add`, `Sub`, `Mul`, `Neg`, `Div`) and `Display` for a tuple
/// struct in terms of the enclosed type.
///
/// # Example
///
/// ```rust
/// #[macro_use]
/// extern crate zero_sum;
/// # use zero_sum::analysis::Evaluation;
/// # use std::fmt;
/// use std::i32;
/// use std::ops::{Add, Div, Mul, Neg, Sub};
///
/// #[derive(Clone, Copy, PartialEq, PartialOrd)]
/// struct Eval(i32);
///
/// prepare_evaluation_tuple!(Eval); // impl Add, Div, Mul, Neg, Sub, and Display
///
/// impl Evaluation for Eval {
/// fn null() -> Eval { Eval(0) }
/// fn shift(self, steps: i32) -> Eval { Eval(self.0 + steps) }
/// fn win() -> Eval { Eval(100000) }
/// fn max() -> Eval { Eval(i32::MAX) }
/// fn is_win(&self) -> bool { self.0.abs() > 99000 }
/// }
/// # fn main() { }
/// ```
#[macro_export]
macro_rules! prepare_evaluation_tuple {
($type_: ident) => {
impl ::std::ops::Add for $type_ {
type Output = $type_;
fn add(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a + b)
}
}
impl ::std::ops::Sub for $type_ {
type Output = $type_;
fn sub(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a - b)
}
}
impl ::std::ops::Mul for $type_ {
type Output = $type_;
fn mul(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a * b)
}
}
impl ::std::ops::Div for $type_ {
type Output = $type_;
fn div(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a / b)
}
}
impl ::std::ops::Neg for $type_ {
type Output = $type_;
fn neg(self) -> $type_ {
let $type_(a) = self;
$type_(-a)
}
}
impl ::std::fmt::Display for $type_ {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
let $type_(a) = *self;
write!(f, "{}", a)
}
}
}
} | random_line_split |
|
evaluator.rs | //
// This file is part of zero_sum.
//
// zero_sum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// zero_sum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with zero_sum. If not, see <http://www.gnu.org/licenses/>.
//
// Copyright 2016-2017 Chris Foster
//
use std::fmt::Display;
use std::ops::{Add, Div, Mul, Neg, Sub};
use state::State;
/// An evaluation type.
///
/// This is usually a tuple around a signed numeric type.
///
/// # Example
///
/// There is a [helper macro](../macro.prepare_evaluation_tuple.html) to facilitate the implementation of tuple structs:
///
/// ```rust
/// #[macro_use]
/// extern crate zero_sum;
/// # use zero_sum::analysis::Evaluation;
/// # use std::fmt;
/// use std::i32;
/// use std::ops::{Add, Div, Mul, Neg, Sub};
///
/// #[derive(Clone, Copy, PartialEq, PartialOrd)]
/// struct Eval(i32);
///
/// prepare_evaluation_tuple!(Eval); // impl Add, Div, Mul, Neg, Sub, and Display
///
/// impl Evaluation for Eval {
/// fn null() -> Eval { Eval(0) }
/// fn shift(self, steps: i32) -> Eval { Eval(self.0 + steps) }
/// fn win() -> Eval { Eval(100000) }
/// fn max() -> Eval { Eval(i32::MAX) }
/// fn is_win(&self) -> bool { self.0 > 99000 }
/// }
/// # fn main() { }
/// ```
pub trait Evaluation:
Sized +
Clone +
Copy +
Display +
Add<Output = Self> +
Sub<Output = Self> +
Mul<Output = Self> +
Neg<Output = Self> +
Div<Output = Self> +
PartialEq +
PartialOrd {
/// An empty, or zero evaluation.
fn null() -> Self;
/// Shift the evaluation by the smallest representable amount `steps` times in the positive or negative direction.
fn shift(self, steps: i32) -> Self;
/// The base value of a win. The evaluator may add or subtract to it in
/// in order to promote it or discourage it in favor of others in the search.
fn win() -> Self;
/// The base value of a loss. The evaluator may add or subtract to it in
/// in order to promote it or discourage it in favor of others in the search.
fn | () -> Self { -Self::win() }
/// The maximum value representable. This must be safely negatable.
fn max() -> Self;
/// The minimum value representable.
fn min() -> Self { -Self::max() }
/// Returns `true` if this evaluation contains a win. This is usually a check to
/// see if the value is above a certain threshold.
fn is_win(&self) -> bool;
/// Returns `true` if this evaluation contains a loss.
fn is_lose(&self) -> bool { (-*self).is_win() }
/// Returns `true` if this evaluation is either a win or a loss.
fn is_end(&self) -> bool { self.is_win() || self.is_lose() }
}
/// Evaluates a State.
pub trait Evaluator {
type State: State;
type Evaluation: Evaluation;
/// Returns the evaluation of `state`.
fn evaluate(&self, state: &Self::State) -> Self::Evaluation;
/// Returns the evaluation of `state` after executing `plies`.
///
/// # Panics
/// Will panic if the execution of any ply in `plies` causes an error.
fn evaluate_plies(&self, state: &Self::State, plies: &[<Self::State as State>::Ply]) -> Self::Evaluation {
let mut state = state.clone();
if let Err(error) = state.execute_plies(plies) {
panic!("Error calculating evaluation: {}", error);
}
if plies.len() % 2 == 0 {
self.evaluate(&state)
} else {
-self.evaluate(&state)
}
}
}
/// Implement arithmetic operators (`Add`, `Sub`, `Mul`, `Neg`, `Div`) and `Display` for a tuple
/// struct in terms of the enclosed type.
///
/// # Example
///
/// ```rust
/// #[macro_use]
/// extern crate zero_sum;
/// # use zero_sum::analysis::Evaluation;
/// # use std::fmt;
/// use std::i32;
/// use std::ops::{Add, Div, Mul, Neg, Sub};
///
/// #[derive(Clone, Copy, PartialEq, PartialOrd)]
/// struct Eval(i32);
///
/// prepare_evaluation_tuple!(Eval); // impl Add, Div, Mul, Neg, Sub, and Display
///
/// impl Evaluation for Eval {
/// fn null() -> Eval { Eval(0) }
/// fn shift(self, steps: i32) -> Eval { Eval(self.0 + steps) }
/// fn win() -> Eval { Eval(100000) }
/// fn max() -> Eval { Eval(i32::MAX) }
/// fn is_win(&self) -> bool { self.0.abs() > 99000 }
/// }
/// # fn main() { }
/// ```
#[macro_export]
macro_rules! prepare_evaluation_tuple {
($type_: ident) => {
impl ::std::ops::Add for $type_ {
type Output = $type_;
fn add(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a + b)
}
}
impl ::std::ops::Sub for $type_ {
type Output = $type_;
fn sub(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a - b)
}
}
impl ::std::ops::Mul for $type_ {
type Output = $type_;
fn mul(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a * b)
}
}
impl ::std::ops::Div for $type_ {
type Output = $type_;
fn div(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a / b)
}
}
impl ::std::ops::Neg for $type_ {
type Output = $type_;
fn neg(self) -> $type_ {
let $type_(a) = self;
$type_(-a)
}
}
impl ::std::fmt::Display for $type_ {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
let $type_(a) = *self;
write!(f, "{}", a)
}
}
}
}
| lose | identifier_name |
evaluator.rs | //
// This file is part of zero_sum.
//
// zero_sum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// zero_sum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with zero_sum. If not, see <http://www.gnu.org/licenses/>.
//
// Copyright 2016-2017 Chris Foster
//
use std::fmt::Display;
use std::ops::{Add, Div, Mul, Neg, Sub};
use state::State;
/// An evaluation type.
///
/// This is usually a tuple around a signed numeric type.
///
/// # Example
///
/// There is a [helper macro](../macro.prepare_evaluation_tuple.html) to facilitate the implementation of tuple structs:
///
/// ```rust
/// #[macro_use]
/// extern crate zero_sum;
/// # use zero_sum::analysis::Evaluation;
/// # use std::fmt;
/// use std::i32;
/// use std::ops::{Add, Div, Mul, Neg, Sub};
///
/// #[derive(Clone, Copy, PartialEq, PartialOrd)]
/// struct Eval(i32);
///
/// prepare_evaluation_tuple!(Eval); // impl Add, Div, Mul, Neg, Sub, and Display
///
/// impl Evaluation for Eval {
/// fn null() -> Eval { Eval(0) }
/// fn shift(self, steps: i32) -> Eval { Eval(self.0 + steps) }
/// fn win() -> Eval { Eval(100000) }
/// fn max() -> Eval { Eval(i32::MAX) }
/// fn is_win(&self) -> bool { self.0 > 99000 }
/// }
/// # fn main() { }
/// ```
pub trait Evaluation:
Sized +
Clone +
Copy +
Display +
Add<Output = Self> +
Sub<Output = Self> +
Mul<Output = Self> +
Neg<Output = Self> +
Div<Output = Self> +
PartialEq +
PartialOrd {
/// An empty, or zero evaluation.
fn null() -> Self;
/// Shift the evaluation by the smallest representable amount `steps` times in the positive or negative direction.
fn shift(self, steps: i32) -> Self;
/// The base value of a win. The evaluator may add or subtract to it in
/// in order to promote it or discourage it in favor of others in the search.
fn win() -> Self;
/// The base value of a loss. The evaluator may add or subtract to it in
/// in order to promote it or discourage it in favor of others in the search.
fn lose() -> Self { -Self::win() }
/// The maximum value representable. This must be safely negatable.
fn max() -> Self;
/// The minimum value representable.
fn min() -> Self { -Self::max() }
/// Returns `true` if this evaluation contains a win. This is usually a check to
/// see if the value is above a certain threshold.
fn is_win(&self) -> bool;
/// Returns `true` if this evaluation contains a loss.
fn is_lose(&self) -> bool { (-*self).is_win() }
/// Returns `true` if this evaluation is either a win or a loss.
fn is_end(&self) -> bool { self.is_win() || self.is_lose() }
}
/// Evaluates a State.
pub trait Evaluator {
type State: State;
type Evaluation: Evaluation;
/// Returns the evaluation of `state`.
fn evaluate(&self, state: &Self::State) -> Self::Evaluation;
/// Returns the evaluation of `state` after executing `plies`.
///
/// # Panics
/// Will panic if the execution of any ply in `plies` causes an error.
fn evaluate_plies(&self, state: &Self::State, plies: &[<Self::State as State>::Ply]) -> Self::Evaluation {
let mut state = state.clone();
if let Err(error) = state.execute_plies(plies) {
panic!("Error calculating evaluation: {}", error);
}
if plies.len() % 2 == 0 {
self.evaluate(&state)
} else |
}
}
/// Implement arithmetic operators (`Add`, `Sub`, `Mul`, `Neg`, `Div`) and `Display` for a tuple
/// struct in terms of the enclosed type.
///
/// # Example
///
/// ```rust
/// #[macro_use]
/// extern crate zero_sum;
/// # use zero_sum::analysis::Evaluation;
/// # use std::fmt;
/// use std::i32;
/// use std::ops::{Add, Div, Mul, Neg, Sub};
///
/// #[derive(Clone, Copy, PartialEq, PartialOrd)]
/// struct Eval(i32);
///
/// prepare_evaluation_tuple!(Eval); // impl Add, Div, Mul, Neg, Sub, and Display
///
/// impl Evaluation for Eval {
/// fn null() -> Eval { Eval(0) }
/// fn shift(self, steps: i32) -> Eval { Eval(self.0 + steps) }
/// fn win() -> Eval { Eval(100000) }
/// fn max() -> Eval { Eval(i32::MAX) }
/// fn is_win(&self) -> bool { self.0.abs() > 99000 }
/// }
/// # fn main() { }
/// ```
#[macro_export]
macro_rules! prepare_evaluation_tuple {
($type_: ident) => {
impl ::std::ops::Add for $type_ {
type Output = $type_;
fn add(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a + b)
}
}
impl ::std::ops::Sub for $type_ {
type Output = $type_;
fn sub(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a - b)
}
}
impl ::std::ops::Mul for $type_ {
type Output = $type_;
fn mul(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a * b)
}
}
impl ::std::ops::Div for $type_ {
type Output = $type_;
fn div(self, $type_(b): $type_) -> $type_ {
let $type_(a) = self;
$type_(a / b)
}
}
impl ::std::ops::Neg for $type_ {
type Output = $type_;
fn neg(self) -> $type_ {
let $type_(a) = self;
$type_(-a)
}
}
impl ::std::fmt::Display for $type_ {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
let $type_(a) = *self;
write!(f, "{}", a)
}
}
}
}
| {
-self.evaluate(&state)
} | conditional_block |
regofsoffline.py | from datetime import datetime as dt
import os
from enum import IntEnum
import logging
from typing import Optional
from netCDF4 import Dataset, num2date
from hyo2.soundspeed.base.geodesy import Geodesy
from hyo2.soundspeed.profile.dicts import Dicts
from hyo2.soundspeed.profile.profile import Profile
from hyo2.soundspeed.profile.profilelist import ProfileList
from hyo2.abc.lib.progress.cli_progress import CliProgress
logger = logging.getLogger(__name__)
class RegOfsOffline:
class Model(IntEnum):
# East Coast
CBOFS = 10 # RG = True # Format is GoMOFS
DBOFS = 11 # RG = True # Format is GoMOFS
GoMOFS = 12 # RG = True # Format is GoMOFS
NYOFS = 13 # RG = False
SJROFS = 14 # RG = False
# Gulf of Mexico
NGOFS = 20 # RG = True # Format is GoMOFS
TBOFS = 21 # RG = True # Format is GoMOFS
# Great Lakes
LEOFS = 30 # RG = True # Format is GoMOFS
LHOFS = 31 # RG = False
LMOFS = 32 # RG = False
LOOFS = 33 # RG = False
LSOFS = 34 # RG = False
# Pacific Coast
CREOFS = 40 # RG = True # Format is GoMOFS
SFBOFS = 41 # RG = True # Format is GoMOFS
# noinspection DuplicatedCode
regofs_model_descs = \
{
Model.CBOFS: "Chesapeake Bay Operational Forecast System",
Model.DBOFS: "Delaware Bay Operational Forecast System",
Model.GoMOFS: "Gulf of Maine Operational Forecast System",
Model.NYOFS: "Port of New York and New Jersey Operational Forecast System",
Model.SJROFS: "St. John's River Operational Forecast System",
Model.NGOFS: "Northern Gulf of Mexico Operational Forecast System",
Model.TBOFS: "Tampa Bay Operational Forecast System",
Model.LEOFS: "Lake Erie Operational Forecast System",
Model.LHOFS: "Lake Huron Operational Forecast System",
Model.LMOFS: "Lake Michigan Operational Forecast System",
Model.LOOFS: "Lake Ontario Operational Forecast System",
Model.LSOFS: "Lake Superior Operational Forecast System",
Model.CREOFS: "Columbia River Estuary Operational Forecast System",
Model.SFBOFS: "San Francisco Bay Operational Forecast System"
}
def __init__(self, data_folder: str, prj: 'hyo2.soundspeed.soundspeed import SoundSpeedLibrary') -> None:
self.name = self.__class__.__name__
self.desc = "Abstract atlas" # a human-readable description
self.data_folder = data_folder
self.prj = prj
self.g = Geodesy()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def query(self, nc_path: str, lat: float, lon: float) -> Optional[ProfileList]:
if not os.path.exists(nc_path):
raise RuntimeError('Unable to locate %s' % nc_path)
logger.debug('nc path: %s' % nc_path)
if (lat is None) or (lon is None):
logger.error("invalid location query: (%s, %s)" % (lon, lat))
return None
logger.debug('query location: %s, %s' % (lat, lon))
progress = CliProgress()
try:
self._file = Dataset(nc_path)
progress.update(20)
except (RuntimeError, IOError) as e:
logger.warning("unable to access data: %s" % e)
self.clear_data()
progress.end()
return None
try:
self.name = self._file.title
time = self._file.variables['time']
self._timestamp = num2date(time[0], units=time.units)
logger.debug("Retrieved time: %s" % self._timestamp.isoformat())
# Now get latitudes, longitudes and depths for x,y,z referencing
self._lats = self._file.variables['lat'][:]
self._lons = self._file.variables['lon'][:]
# logger.debug('lat:(%s)\n%s' % (self._lats.shape, self._lats))
# logger.debug('lon:(%s)\n%s' % (self._lons.shape, self._lons))
self._zeta = self._file.variables['zeta'][0, :]
self._siglay = self._file.variables['siglay'][:]
self._h = self._file.variables['h'][:]
# logger.debug('zeta:(%s)\n%s' % (self._zeta.shape, self._zeta))
# logger.debug('siglay:(%s)\n%s' % (self._siglay.shape, self._siglay[:, 0]))
# logger.debug('h:(%s)\n%s' % (self._h.shape, self._h))
self._temp = self._file.variables['temp'][:]
self._sal = self._file.variables['salinity'][:]
# logger.debug('temp:(%s)\n%s' % (self._temp.shape, self._temp[:, 0]))
# logger.debug('sal:(%s)\n%s' % (self._sal.shape, self._sal[:, 0]))
except Exception as e:
logger.error("troubles in variable lookup for lat/long grid and/or depth: %s" % e)
self.clear_data()
progress.end()
return None
min_dist = 100000.0
min_idx = None
for idx, _ in enumerate(self._lats):
nc_lat = self._lats[idx]
nc_lon = self._lons[idx]
if nc_lon > 180.0:
nc_lon = nc_lon - 360.0
nc_dist = self.g.distance(nc_lon, nc_lat, lon, lat)
# logger.debug('loc: %.6f, %.6f -> %.6f' % (nc_lat, nc_lon, nc_dist))
if nc_dist < min_dist:
min_dist = nc_dist
min_idx = idx
if min_dist >= 10000.0:
logger.error("location too far from model nodes: %.f" % min_dist)
self.clear_data()
progress.end()
return None
self._loc_idx = min_idx
self._lon = self._lons[self._loc_idx]
if self._lon > 180.0:
self._lon = self._lon - 360.0
self._lat = self._lats[self._loc_idx]
logger.debug('closest node: %d [%s, %s] -> %s' % (self._loc_idx, self._lat, self._lon, min_dist))
zeta = self._zeta[self._loc_idx]
h = self._h[self._loc_idx]
siglay = -self._siglay[:, self._loc_idx]
# logger.debug('zeta: %s, h: %s, siglay: %s' % (zeta, h, siglay))
self._d = siglay * (h + zeta)
# logger.debug('d:(%s)\n%s' % (self._h.shape, self._d))
# Make a new SV object to return our query in
ssp = Profile()
ssp.meta.sensor_type = Dicts.sensor_types['Synthetic']
ssp.meta.probe_type = Dicts.probe_types[self.name]
ssp.meta.latitude = self._lat
ssp.meta.longitude = self._lon
ssp.meta.utc_time = dt(year=self._timestamp.year, month=self._timestamp.month,
day=self._timestamp.day, hour=self._timestamp.hour,
minute=self._timestamp.minute, second=self._timestamp.second)
ssp.meta.original_path = "%s_%s" % (self.name, self._timestamp.strftime("%Y%m%d_%H%M%S"))
ssp.init_data(self._d.shape[0])
ssp.data.depth = self._d[:]
ssp.data.temp = self._temp[0, :, self._loc_idx]
ssp.data.sal = self._sal[0, :, self._loc_idx]
ssp.calc_data_speed()
ssp.clone_data_to_proc()
ssp.init_sis()
profiles = ProfileList()
profiles.append_profile(ssp)
progress.end()
return profiles
def clear_data(self) -> None:
"""Delete the data and reset the last loaded day"""
logger.debug("clearing data")
if self._has_data_loaded:
|
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def __repr__(self):
msg = "%s" % super().__repr__()
msg += " <has data loaded: %s>\n" % (self._has_data_loaded,)
msg += " <loaded day: %s>\n" % (self._timestamp.strftime(r"%d\%m\%Y"),)
return msg
| if self._file:
self._file.close() | conditional_block |
regofsoffline.py | from datetime import datetime as dt
import os
from enum import IntEnum
import logging
from typing import Optional
from netCDF4 import Dataset, num2date
from hyo2.soundspeed.base.geodesy import Geodesy
from hyo2.soundspeed.profile.dicts import Dicts
from hyo2.soundspeed.profile.profile import Profile
from hyo2.soundspeed.profile.profilelist import ProfileList
from hyo2.abc.lib.progress.cli_progress import CliProgress
logger = logging.getLogger(__name__)
class RegOfsOffline:
class Model(IntEnum):
# East Coast
CBOFS = 10 # RG = True # Format is GoMOFS
DBOFS = 11 # RG = True # Format is GoMOFS
GoMOFS = 12 # RG = True # Format is GoMOFS
NYOFS = 13 # RG = False
SJROFS = 14 # RG = False
# Gulf of Mexico
NGOFS = 20 # RG = True # Format is GoMOFS
TBOFS = 21 # RG = True # Format is GoMOFS
# Great Lakes
LEOFS = 30 # RG = True # Format is GoMOFS
LHOFS = 31 # RG = False
LMOFS = 32 # RG = False
LOOFS = 33 # RG = False
LSOFS = 34 # RG = False
# Pacific Coast
CREOFS = 40 # RG = True # Format is GoMOFS
SFBOFS = 41 # RG = True # Format is GoMOFS
# noinspection DuplicatedCode
regofs_model_descs = \
{
Model.CBOFS: "Chesapeake Bay Operational Forecast System",
Model.DBOFS: "Delaware Bay Operational Forecast System",
Model.GoMOFS: "Gulf of Maine Operational Forecast System",
Model.NYOFS: "Port of New York and New Jersey Operational Forecast System",
Model.SJROFS: "St. John's River Operational Forecast System",
Model.NGOFS: "Northern Gulf of Mexico Operational Forecast System",
Model.TBOFS: "Tampa Bay Operational Forecast System",
Model.LEOFS: "Lake Erie Operational Forecast System",
Model.LHOFS: "Lake Huron Operational Forecast System",
Model.LMOFS: "Lake Michigan Operational Forecast System",
Model.LOOFS: "Lake Ontario Operational Forecast System",
Model.LSOFS: "Lake Superior Operational Forecast System",
Model.CREOFS: "Columbia River Estuary Operational Forecast System",
Model.SFBOFS: "San Francisco Bay Operational Forecast System" | def __init__(self, data_folder: str, prj: 'hyo2.soundspeed.soundspeed import SoundSpeedLibrary') -> None:
self.name = self.__class__.__name__
self.desc = "Abstract atlas" # a human-readable description
self.data_folder = data_folder
self.prj = prj
self.g = Geodesy()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def query(self, nc_path: str, lat: float, lon: float) -> Optional[ProfileList]:
if not os.path.exists(nc_path):
raise RuntimeError('Unable to locate %s' % nc_path)
logger.debug('nc path: %s' % nc_path)
if (lat is None) or (lon is None):
logger.error("invalid location query: (%s, %s)" % (lon, lat))
return None
logger.debug('query location: %s, %s' % (lat, lon))
progress = CliProgress()
try:
self._file = Dataset(nc_path)
progress.update(20)
except (RuntimeError, IOError) as e:
logger.warning("unable to access data: %s" % e)
self.clear_data()
progress.end()
return None
try:
self.name = self._file.title
time = self._file.variables['time']
self._timestamp = num2date(time[0], units=time.units)
logger.debug("Retrieved time: %s" % self._timestamp.isoformat())
# Now get latitudes, longitudes and depths for x,y,z referencing
self._lats = self._file.variables['lat'][:]
self._lons = self._file.variables['lon'][:]
# logger.debug('lat:(%s)\n%s' % (self._lats.shape, self._lats))
# logger.debug('lon:(%s)\n%s' % (self._lons.shape, self._lons))
self._zeta = self._file.variables['zeta'][0, :]
self._siglay = self._file.variables['siglay'][:]
self._h = self._file.variables['h'][:]
# logger.debug('zeta:(%s)\n%s' % (self._zeta.shape, self._zeta))
# logger.debug('siglay:(%s)\n%s' % (self._siglay.shape, self._siglay[:, 0]))
# logger.debug('h:(%s)\n%s' % (self._h.shape, self._h))
self._temp = self._file.variables['temp'][:]
self._sal = self._file.variables['salinity'][:]
# logger.debug('temp:(%s)\n%s' % (self._temp.shape, self._temp[:, 0]))
# logger.debug('sal:(%s)\n%s' % (self._sal.shape, self._sal[:, 0]))
except Exception as e:
logger.error("troubles in variable lookup for lat/long grid and/or depth: %s" % e)
self.clear_data()
progress.end()
return None
min_dist = 100000.0
min_idx = None
for idx, _ in enumerate(self._lats):
nc_lat = self._lats[idx]
nc_lon = self._lons[idx]
if nc_lon > 180.0:
nc_lon = nc_lon - 360.0
nc_dist = self.g.distance(nc_lon, nc_lat, lon, lat)
# logger.debug('loc: %.6f, %.6f -> %.6f' % (nc_lat, nc_lon, nc_dist))
if nc_dist < min_dist:
min_dist = nc_dist
min_idx = idx
if min_dist >= 10000.0:
logger.error("location too far from model nodes: %.f" % min_dist)
self.clear_data()
progress.end()
return None
self._loc_idx = min_idx
self._lon = self._lons[self._loc_idx]
if self._lon > 180.0:
self._lon = self._lon - 360.0
self._lat = self._lats[self._loc_idx]
logger.debug('closest node: %d [%s, %s] -> %s' % (self._loc_idx, self._lat, self._lon, min_dist))
zeta = self._zeta[self._loc_idx]
h = self._h[self._loc_idx]
siglay = -self._siglay[:, self._loc_idx]
# logger.debug('zeta: %s, h: %s, siglay: %s' % (zeta, h, siglay))
self._d = siglay * (h + zeta)
# logger.debug('d:(%s)\n%s' % (self._h.shape, self._d))
# Make a new SV object to return our query in
ssp = Profile()
ssp.meta.sensor_type = Dicts.sensor_types['Synthetic']
ssp.meta.probe_type = Dicts.probe_types[self.name]
ssp.meta.latitude = self._lat
ssp.meta.longitude = self._lon
ssp.meta.utc_time = dt(year=self._timestamp.year, month=self._timestamp.month,
day=self._timestamp.day, hour=self._timestamp.hour,
minute=self._timestamp.minute, second=self._timestamp.second)
ssp.meta.original_path = "%s_%s" % (self.name, self._timestamp.strftime("%Y%m%d_%H%M%S"))
ssp.init_data(self._d.shape[0])
ssp.data.depth = self._d[:]
ssp.data.temp = self._temp[0, :, self._loc_idx]
ssp.data.sal = self._sal[0, :, self._loc_idx]
ssp.calc_data_speed()
ssp.clone_data_to_proc()
ssp.init_sis()
profiles = ProfileList()
profiles.append_profile(ssp)
progress.end()
return profiles
def clear_data(self) -> None:
"""Delete the data and reset the last loaded day"""
logger.debug("clearing data")
if self._has_data_loaded:
if self._file:
self._file.close()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def __repr__(self):
msg = "%s" % super().__repr__()
msg += " <has data loaded: %s>\n" % (self._has_data_loaded,)
msg += " <loaded day: %s>\n" % (self._timestamp.strftime(r"%d\%m\%Y"),)
return msg | }
| random_line_split |
regofsoffline.py | from datetime import datetime as dt
import os
from enum import IntEnum
import logging
from typing import Optional
from netCDF4 import Dataset, num2date
from hyo2.soundspeed.base.geodesy import Geodesy
from hyo2.soundspeed.profile.dicts import Dicts
from hyo2.soundspeed.profile.profile import Profile
from hyo2.soundspeed.profile.profilelist import ProfileList
from hyo2.abc.lib.progress.cli_progress import CliProgress
logger = logging.getLogger(__name__)
class RegOfsOffline:
class Model(IntEnum):
# East Coast
CBOFS = 10 # RG = True # Format is GoMOFS
DBOFS = 11 # RG = True # Format is GoMOFS
GoMOFS = 12 # RG = True # Format is GoMOFS
NYOFS = 13 # RG = False
SJROFS = 14 # RG = False
# Gulf of Mexico
NGOFS = 20 # RG = True # Format is GoMOFS
TBOFS = 21 # RG = True # Format is GoMOFS
# Great Lakes
LEOFS = 30 # RG = True # Format is GoMOFS
LHOFS = 31 # RG = False
LMOFS = 32 # RG = False
LOOFS = 33 # RG = False
LSOFS = 34 # RG = False
# Pacific Coast
CREOFS = 40 # RG = True # Format is GoMOFS
SFBOFS = 41 # RG = True # Format is GoMOFS
# noinspection DuplicatedCode
regofs_model_descs = \
{
Model.CBOFS: "Chesapeake Bay Operational Forecast System",
Model.DBOFS: "Delaware Bay Operational Forecast System",
Model.GoMOFS: "Gulf of Maine Operational Forecast System",
Model.NYOFS: "Port of New York and New Jersey Operational Forecast System",
Model.SJROFS: "St. John's River Operational Forecast System",
Model.NGOFS: "Northern Gulf of Mexico Operational Forecast System",
Model.TBOFS: "Tampa Bay Operational Forecast System",
Model.LEOFS: "Lake Erie Operational Forecast System",
Model.LHOFS: "Lake Huron Operational Forecast System",
Model.LMOFS: "Lake Michigan Operational Forecast System",
Model.LOOFS: "Lake Ontario Operational Forecast System",
Model.LSOFS: "Lake Superior Operational Forecast System",
Model.CREOFS: "Columbia River Estuary Operational Forecast System",
Model.SFBOFS: "San Francisco Bay Operational Forecast System"
}
def __init__(self, data_folder: str, prj: 'hyo2.soundspeed.soundspeed import SoundSpeedLibrary') -> None:
|
def query(self, nc_path: str, lat: float, lon: float) -> Optional[ProfileList]:
if not os.path.exists(nc_path):
raise RuntimeError('Unable to locate %s' % nc_path)
logger.debug('nc path: %s' % nc_path)
if (lat is None) or (lon is None):
logger.error("invalid location query: (%s, %s)" % (lon, lat))
return None
logger.debug('query location: %s, %s' % (lat, lon))
progress = CliProgress()
try:
self._file = Dataset(nc_path)
progress.update(20)
except (RuntimeError, IOError) as e:
logger.warning("unable to access data: %s" % e)
self.clear_data()
progress.end()
return None
try:
self.name = self._file.title
time = self._file.variables['time']
self._timestamp = num2date(time[0], units=time.units)
logger.debug("Retrieved time: %s" % self._timestamp.isoformat())
# Now get latitudes, longitudes and depths for x,y,z referencing
self._lats = self._file.variables['lat'][:]
self._lons = self._file.variables['lon'][:]
# logger.debug('lat:(%s)\n%s' % (self._lats.shape, self._lats))
# logger.debug('lon:(%s)\n%s' % (self._lons.shape, self._lons))
self._zeta = self._file.variables['zeta'][0, :]
self._siglay = self._file.variables['siglay'][:]
self._h = self._file.variables['h'][:]
# logger.debug('zeta:(%s)\n%s' % (self._zeta.shape, self._zeta))
# logger.debug('siglay:(%s)\n%s' % (self._siglay.shape, self._siglay[:, 0]))
# logger.debug('h:(%s)\n%s' % (self._h.shape, self._h))
self._temp = self._file.variables['temp'][:]
self._sal = self._file.variables['salinity'][:]
# logger.debug('temp:(%s)\n%s' % (self._temp.shape, self._temp[:, 0]))
# logger.debug('sal:(%s)\n%s' % (self._sal.shape, self._sal[:, 0]))
except Exception as e:
logger.error("troubles in variable lookup for lat/long grid and/or depth: %s" % e)
self.clear_data()
progress.end()
return None
min_dist = 100000.0
min_idx = None
for idx, _ in enumerate(self._lats):
nc_lat = self._lats[idx]
nc_lon = self._lons[idx]
if nc_lon > 180.0:
nc_lon = nc_lon - 360.0
nc_dist = self.g.distance(nc_lon, nc_lat, lon, lat)
# logger.debug('loc: %.6f, %.6f -> %.6f' % (nc_lat, nc_lon, nc_dist))
if nc_dist < min_dist:
min_dist = nc_dist
min_idx = idx
if min_dist >= 10000.0:
logger.error("location too far from model nodes: %.f" % min_dist)
self.clear_data()
progress.end()
return None
self._loc_idx = min_idx
self._lon = self._lons[self._loc_idx]
if self._lon > 180.0:
self._lon = self._lon - 360.0
self._lat = self._lats[self._loc_idx]
logger.debug('closest node: %d [%s, %s] -> %s' % (self._loc_idx, self._lat, self._lon, min_dist))
zeta = self._zeta[self._loc_idx]
h = self._h[self._loc_idx]
siglay = -self._siglay[:, self._loc_idx]
# logger.debug('zeta: %s, h: %s, siglay: %s' % (zeta, h, siglay))
self._d = siglay * (h + zeta)
# logger.debug('d:(%s)\n%s' % (self._h.shape, self._d))
# Make a new SV object to return our query in
ssp = Profile()
ssp.meta.sensor_type = Dicts.sensor_types['Synthetic']
ssp.meta.probe_type = Dicts.probe_types[self.name]
ssp.meta.latitude = self._lat
ssp.meta.longitude = self._lon
ssp.meta.utc_time = dt(year=self._timestamp.year, month=self._timestamp.month,
day=self._timestamp.day, hour=self._timestamp.hour,
minute=self._timestamp.minute, second=self._timestamp.second)
ssp.meta.original_path = "%s_%s" % (self.name, self._timestamp.strftime("%Y%m%d_%H%M%S"))
ssp.init_data(self._d.shape[0])
ssp.data.depth = self._d[:]
ssp.data.temp = self._temp[0, :, self._loc_idx]
ssp.data.sal = self._sal[0, :, self._loc_idx]
ssp.calc_data_speed()
ssp.clone_data_to_proc()
ssp.init_sis()
profiles = ProfileList()
profiles.append_profile(ssp)
progress.end()
return profiles
def clear_data(self) -> None:
"""Delete the data and reset the last loaded day"""
logger.debug("clearing data")
if self._has_data_loaded:
if self._file:
self._file.close()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def __repr__(self):
msg = "%s" % super().__repr__()
msg += " <has data loaded: %s>\n" % (self._has_data_loaded,)
msg += " <loaded day: %s>\n" % (self._timestamp.strftime(r"%d\%m\%Y"),)
return msg
| self.name = self.__class__.__name__
self.desc = "Abstract atlas" # a human-readable description
self.data_folder = data_folder
self.prj = prj
self.g = Geodesy()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None | identifier_body |
regofsoffline.py | from datetime import datetime as dt
import os
from enum import IntEnum
import logging
from typing import Optional
from netCDF4 import Dataset, num2date
from hyo2.soundspeed.base.geodesy import Geodesy
from hyo2.soundspeed.profile.dicts import Dicts
from hyo2.soundspeed.profile.profile import Profile
from hyo2.soundspeed.profile.profilelist import ProfileList
from hyo2.abc.lib.progress.cli_progress import CliProgress
logger = logging.getLogger(__name__)
class RegOfsOffline:
class Model(IntEnum):
# East Coast
CBOFS = 10 # RG = True # Format is GoMOFS
DBOFS = 11 # RG = True # Format is GoMOFS
GoMOFS = 12 # RG = True # Format is GoMOFS
NYOFS = 13 # RG = False
SJROFS = 14 # RG = False
# Gulf of Mexico
NGOFS = 20 # RG = True # Format is GoMOFS
TBOFS = 21 # RG = True # Format is GoMOFS
# Great Lakes
LEOFS = 30 # RG = True # Format is GoMOFS
LHOFS = 31 # RG = False
LMOFS = 32 # RG = False
LOOFS = 33 # RG = False
LSOFS = 34 # RG = False
# Pacific Coast
CREOFS = 40 # RG = True # Format is GoMOFS
SFBOFS = 41 # RG = True # Format is GoMOFS
# noinspection DuplicatedCode
regofs_model_descs = \
{
Model.CBOFS: "Chesapeake Bay Operational Forecast System",
Model.DBOFS: "Delaware Bay Operational Forecast System",
Model.GoMOFS: "Gulf of Maine Operational Forecast System",
Model.NYOFS: "Port of New York and New Jersey Operational Forecast System",
Model.SJROFS: "St. John's River Operational Forecast System",
Model.NGOFS: "Northern Gulf of Mexico Operational Forecast System",
Model.TBOFS: "Tampa Bay Operational Forecast System",
Model.LEOFS: "Lake Erie Operational Forecast System",
Model.LHOFS: "Lake Huron Operational Forecast System",
Model.LMOFS: "Lake Michigan Operational Forecast System",
Model.LOOFS: "Lake Ontario Operational Forecast System",
Model.LSOFS: "Lake Superior Operational Forecast System",
Model.CREOFS: "Columbia River Estuary Operational Forecast System",
Model.SFBOFS: "San Francisco Bay Operational Forecast System"
}
def | (self, data_folder: str, prj: 'hyo2.soundspeed.soundspeed import SoundSpeedLibrary') -> None:
self.name = self.__class__.__name__
self.desc = "Abstract atlas" # a human-readable description
self.data_folder = data_folder
self.prj = prj
self.g = Geodesy()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def query(self, nc_path: str, lat: float, lon: float) -> Optional[ProfileList]:
if not os.path.exists(nc_path):
raise RuntimeError('Unable to locate %s' % nc_path)
logger.debug('nc path: %s' % nc_path)
if (lat is None) or (lon is None):
logger.error("invalid location query: (%s, %s)" % (lon, lat))
return None
logger.debug('query location: %s, %s' % (lat, lon))
progress = CliProgress()
try:
self._file = Dataset(nc_path)
progress.update(20)
except (RuntimeError, IOError) as e:
logger.warning("unable to access data: %s" % e)
self.clear_data()
progress.end()
return None
try:
self.name = self._file.title
time = self._file.variables['time']
self._timestamp = num2date(time[0], units=time.units)
logger.debug("Retrieved time: %s" % self._timestamp.isoformat())
# Now get latitudes, longitudes and depths for x,y,z referencing
self._lats = self._file.variables['lat'][:]
self._lons = self._file.variables['lon'][:]
# logger.debug('lat:(%s)\n%s' % (self._lats.shape, self._lats))
# logger.debug('lon:(%s)\n%s' % (self._lons.shape, self._lons))
self._zeta = self._file.variables['zeta'][0, :]
self._siglay = self._file.variables['siglay'][:]
self._h = self._file.variables['h'][:]
# logger.debug('zeta:(%s)\n%s' % (self._zeta.shape, self._zeta))
# logger.debug('siglay:(%s)\n%s' % (self._siglay.shape, self._siglay[:, 0]))
# logger.debug('h:(%s)\n%s' % (self._h.shape, self._h))
self._temp = self._file.variables['temp'][:]
self._sal = self._file.variables['salinity'][:]
# logger.debug('temp:(%s)\n%s' % (self._temp.shape, self._temp[:, 0]))
# logger.debug('sal:(%s)\n%s' % (self._sal.shape, self._sal[:, 0]))
except Exception as e:
logger.error("troubles in variable lookup for lat/long grid and/or depth: %s" % e)
self.clear_data()
progress.end()
return None
min_dist = 100000.0
min_idx = None
for idx, _ in enumerate(self._lats):
nc_lat = self._lats[idx]
nc_lon = self._lons[idx]
if nc_lon > 180.0:
nc_lon = nc_lon - 360.0
nc_dist = self.g.distance(nc_lon, nc_lat, lon, lat)
# logger.debug('loc: %.6f, %.6f -> %.6f' % (nc_lat, nc_lon, nc_dist))
if nc_dist < min_dist:
min_dist = nc_dist
min_idx = idx
if min_dist >= 10000.0:
logger.error("location too far from model nodes: %.f" % min_dist)
self.clear_data()
progress.end()
return None
self._loc_idx = min_idx
self._lon = self._lons[self._loc_idx]
if self._lon > 180.0:
self._lon = self._lon - 360.0
self._lat = self._lats[self._loc_idx]
logger.debug('closest node: %d [%s, %s] -> %s' % (self._loc_idx, self._lat, self._lon, min_dist))
zeta = self._zeta[self._loc_idx]
h = self._h[self._loc_idx]
siglay = -self._siglay[:, self._loc_idx]
# logger.debug('zeta: %s, h: %s, siglay: %s' % (zeta, h, siglay))
self._d = siglay * (h + zeta)
# logger.debug('d:(%s)\n%s' % (self._h.shape, self._d))
# Make a new SV object to return our query in
ssp = Profile()
ssp.meta.sensor_type = Dicts.sensor_types['Synthetic']
ssp.meta.probe_type = Dicts.probe_types[self.name]
ssp.meta.latitude = self._lat
ssp.meta.longitude = self._lon
ssp.meta.utc_time = dt(year=self._timestamp.year, month=self._timestamp.month,
day=self._timestamp.day, hour=self._timestamp.hour,
minute=self._timestamp.minute, second=self._timestamp.second)
ssp.meta.original_path = "%s_%s" % (self.name, self._timestamp.strftime("%Y%m%d_%H%M%S"))
ssp.init_data(self._d.shape[0])
ssp.data.depth = self._d[:]
ssp.data.temp = self._temp[0, :, self._loc_idx]
ssp.data.sal = self._sal[0, :, self._loc_idx]
ssp.calc_data_speed()
ssp.clone_data_to_proc()
ssp.init_sis()
profiles = ProfileList()
profiles.append_profile(ssp)
progress.end()
return profiles
def clear_data(self) -> None:
"""Delete the data and reset the last loaded day"""
logger.debug("clearing data")
if self._has_data_loaded:
if self._file:
self._file.close()
self._has_data_loaded = False # grids are "loaded" ? (netCDF files are opened)
self._file = None
self._day_idx = 0
self._timestamp = None
self._zeta = None
self._siglay = None
self._h = None
self._lats = None
self._lons = None
self._lat = None
self._lon = None
self._loc_idx = None
self._d = None
self._temp = None
self._sal = None
def __repr__(self):
msg = "%s" % super().__repr__()
msg += " <has data loaded: %s>\n" % (self._has_data_loaded,)
msg += " <loaded day: %s>\n" % (self._timestamp.strftime(r"%d\%m\%Y"),)
return msg
| __init__ | identifier_name |
Routes.js | import React from 'react';
import { connect } from 'react-redux';
import {Router, Route, IndexRedirect, browserHistory} from 'react-router';
import { updatePlayersArray, updateLastNumberRolled, updateNextPlayerIndexTurn, setFirstPlayerTurn, startGame, buy, receivingMoney, receiveMoney } from '../reducers/gameReducer';
import {purchaseEstablishment, allPlayers} from '../basestuff';
import Login from './Login';
import WhoAmI from './WhoAmI';
import Board from './Board';
const App = connect(
({ auth }) => ({ user: auth })
)(
({ user, children }) =>
<div id="parent">
<nav>
{user ? <WhoAmI /> : <Login />}
</nav>
{children}
</div>
);
const Routes = ({initialListen}) => {
return (
<Router history={browserHistory}>
<Route path="/" component={App} onEnter={initialListen}>
<IndexRedirect to="/board" />
<Route path="/board" component={Board} />
</Route>
</Router>
);
};
const mapDispatch = dispatch => ({
initialListen: function(){
socket.on('addPlayer', (players)=> {
dispatch(updatePlayersArray(players))
});
socket.on('playerRoll', (dice)=> {
dispatch(updateLastNumberRolled(dice.roll))
});
socket.on('endTurn', (indices) => {
dispatch(updateNextPlayerIndexTurn(indices.nextPlayerIndex, indices.lastPlayerIndex))
});
socket.on('startingPlayer', (player) => {
alert(`The starting player will be Player ${player.index + 1}`)
dispatch(setFirstPlayerTurn(player.index))
dispatch(startGame())
});
socket.on('playerBuy', ({game, playerId, establishmentId}) => {
let newState = purchaseEstablishment(game, playerId, establishmentId);
dispatch(buy(newState))
});
socket.on('playerReceiveMoney', ({playerAmountsToChange}) => {
playerAmountsToChange.forEach(changeObject => {
dispatch(receiveMoney(changeObject.playerIndex, changeObject.amount))
});
}); |
}
});
export default connect(null, mapDispatch)(Routes); | random_line_split |
|
main.rs | extern crate clap;
extern crate yaml_rust;
extern crate lxd;
use std::env;
use std::fs::File;
use std::io::Read;
use clap::{App, Arg, ArgMatches, SubCommand};
use yaml_rust::YamlLoader;
use lxd::{Container,LxdServer};
fn create_dividing_line(widths: &Vec<usize>) -> String {
let mut dividing_line = String::new();
dividing_line.push_str("+");
for width in widths {
dividing_line.push_str(&format!("{:-^1$}", "", width + 2));
dividing_line.push_str("+");
}
dividing_line.push_str("\n");
dividing_line
}
fn create_header_line(headers: &Vec<&str>, widths: &Vec<usize>) -> String {
let mut header_line = String::new();
header_line.push_str("|");
for (n, header) in headers.iter().enumerate() {
header_line.push_str(&format!("{:^1$}", &header, widths[n] + 2));
header_line.push_str("|");
}
header_line.push_str("\n");
header_line
}
fn create_content_line(item: &Vec<String>, widths: &Vec<usize>) -> String {
let mut content_line = String::new();
content_line.push_str("|");
for (n, column_content) in item.iter().enumerate() {
content_line.push_str(" ");
content_line.push_str(&format!("{:1$}", &column_content, widths[n] + 1));
content_line.push_str("|");
}
content_line.push_str("\n");
content_line
}
fn format_output(headers: &Vec<&str>, items: &Vec<Vec<String>>) -> String {
let mut widths = Vec::new();
for header in headers {
widths.push(header.len());
}
for item in items {
for (n, column) in item.iter().enumerate() {
if column.len() > widths[n] {
widths[n] = column.len();
}
}
}
let dividing_line = create_dividing_line(&widths);
let mut output_string = String::new();
output_string.push_str(&dividing_line);
output_string.push_str(&create_header_line(headers, &widths));
output_string.push_str(&dividing_line);
for item in items {
output_string.push_str(&create_content_line(item, &widths));
}
output_string.push_str(&dividing_line);
output_string
}
fn prepare_container_line(c: &Container) -> Vec<String> |
fn list(matches: &ArgMatches) {
let home_dir = env::var("HOME").unwrap();
let mut config_file = File::open(home_dir.clone() + "/.config/lxc/config.yml").unwrap();
let mut file_contents = String::new();
config_file.read_to_string(&mut file_contents).unwrap();
let lxd_config = YamlLoader::load_from_str(&file_contents).unwrap();
let default_remote = lxd_config[0]["default-remote"].as_str().unwrap();
let remote = matches.value_of("resource").unwrap_or(default_remote);
let lxd_server = match lxd_config[0]["remotes"][remote]["addr"].as_str() {
Some(remote_addr) => remote_addr,
None => panic!("No remote named {} configured", remote)
};
let server = LxdServer::new(
lxd_server,
&(home_dir.clone() + "/.config/lxc/client.crt"),
&(home_dir.clone() + "/.config/lxc/client.key")
);
let headers = vec!["NAME", "STATE", "IPV4", "IPV6", "EPHEMERAL", "SNAPSHOTS"];
let container_items = server.list_containers().iter().map(prepare_container_line).collect();
print!("{}", format_output(&headers, &container_items));
}
fn main() {
let matches = App::new("lxd")
.subcommand(SubCommand::with_name("list")
.arg(Arg::with_name("resource")
.help("the resource to use")
.required(true)
.index(1)))
.get_matches();
match matches.subcommand_name() {
Some("list") => list(matches.subcommand_matches("list").unwrap()),
_ => println!("{}", matches.usage()),
}
}
| {
let mut ipv4_address = String::new();
let mut ipv6_address = String::new();
for ip in &c.status.ips {
if ip.protocol == "IPV4" && ip.address != "127.0.0.1" {
ipv4_address = ip.address.clone();
}
if ip.protocol == "IPV6" && ip.address != "::1" {
ipv6_address = ip.address.clone();
}
}
let ephemeral = if c.ephemeral { "YES" } else { "NO" };
vec![c.name.clone(), c.status.status.clone().to_uppercase(), ipv4_address.to_string(), ipv6_address.to_string(), ephemeral.to_string(), c.snapshot_urls.len().to_string()]
} | identifier_body |
main.rs | extern crate clap;
extern crate yaml_rust;
extern crate lxd;
use std::env;
use std::fs::File;
use std::io::Read;
use clap::{App, Arg, ArgMatches, SubCommand};
use yaml_rust::YamlLoader;
use lxd::{Container,LxdServer};
fn create_dividing_line(widths: &Vec<usize>) -> String {
let mut dividing_line = String::new();
dividing_line.push_str("+");
for width in widths {
dividing_line.push_str(&format!("{:-^1$}", "", width + 2));
dividing_line.push_str("+");
}
dividing_line.push_str("\n");
dividing_line
}
fn create_header_line(headers: &Vec<&str>, widths: &Vec<usize>) -> String {
let mut header_line = String::new();
header_line.push_str("|");
for (n, header) in headers.iter().enumerate() {
header_line.push_str(&format!("{:^1$}", &header, widths[n] + 2));
header_line.push_str("|");
}
header_line.push_str("\n");
header_line
}
fn | (item: &Vec<String>, widths: &Vec<usize>) -> String {
let mut content_line = String::new();
content_line.push_str("|");
for (n, column_content) in item.iter().enumerate() {
content_line.push_str(" ");
content_line.push_str(&format!("{:1$}", &column_content, widths[n] + 1));
content_line.push_str("|");
}
content_line.push_str("\n");
content_line
}
fn format_output(headers: &Vec<&str>, items: &Vec<Vec<String>>) -> String {
let mut widths = Vec::new();
for header in headers {
widths.push(header.len());
}
for item in items {
for (n, column) in item.iter().enumerate() {
if column.len() > widths[n] {
widths[n] = column.len();
}
}
}
let dividing_line = create_dividing_line(&widths);
let mut output_string = String::new();
output_string.push_str(&dividing_line);
output_string.push_str(&create_header_line(headers, &widths));
output_string.push_str(&dividing_line);
for item in items {
output_string.push_str(&create_content_line(item, &widths));
}
output_string.push_str(&dividing_line);
output_string
}
fn prepare_container_line(c: &Container) -> Vec<String> {
let mut ipv4_address = String::new();
let mut ipv6_address = String::new();
for ip in &c.status.ips {
if ip.protocol == "IPV4" && ip.address != "127.0.0.1" {
ipv4_address = ip.address.clone();
}
if ip.protocol == "IPV6" && ip.address != "::1" {
ipv6_address = ip.address.clone();
}
}
let ephemeral = if c.ephemeral { "YES" } else { "NO" };
vec![c.name.clone(), c.status.status.clone().to_uppercase(), ipv4_address.to_string(), ipv6_address.to_string(), ephemeral.to_string(), c.snapshot_urls.len().to_string()]
}
fn list(matches: &ArgMatches) {
let home_dir = env::var("HOME").unwrap();
let mut config_file = File::open(home_dir.clone() + "/.config/lxc/config.yml").unwrap();
let mut file_contents = String::new();
config_file.read_to_string(&mut file_contents).unwrap();
let lxd_config = YamlLoader::load_from_str(&file_contents).unwrap();
let default_remote = lxd_config[0]["default-remote"].as_str().unwrap();
let remote = matches.value_of("resource").unwrap_or(default_remote);
let lxd_server = match lxd_config[0]["remotes"][remote]["addr"].as_str() {
Some(remote_addr) => remote_addr,
None => panic!("No remote named {} configured", remote)
};
let server = LxdServer::new(
lxd_server,
&(home_dir.clone() + "/.config/lxc/client.crt"),
&(home_dir.clone() + "/.config/lxc/client.key")
);
let headers = vec!["NAME", "STATE", "IPV4", "IPV6", "EPHEMERAL", "SNAPSHOTS"];
let container_items = server.list_containers().iter().map(prepare_container_line).collect();
print!("{}", format_output(&headers, &container_items));
}
fn main() {
let matches = App::new("lxd")
.subcommand(SubCommand::with_name("list")
.arg(Arg::with_name("resource")
.help("the resource to use")
.required(true)
.index(1)))
.get_matches();
match matches.subcommand_name() {
Some("list") => list(matches.subcommand_matches("list").unwrap()),
_ => println!("{}", matches.usage()),
}
}
| create_content_line | identifier_name |
main.rs | extern crate clap;
extern crate yaml_rust;
extern crate lxd;
use std::env;
use std::fs::File;
use std::io::Read;
use clap::{App, Arg, ArgMatches, SubCommand};
use yaml_rust::YamlLoader;
use lxd::{Container,LxdServer};
fn create_dividing_line(widths: &Vec<usize>) -> String {
let mut dividing_line = String::new();
dividing_line.push_str("+");
for width in widths {
dividing_line.push_str(&format!("{:-^1$}", "", width + 2));
dividing_line.push_str("+");
}
dividing_line.push_str("\n");
dividing_line
}
fn create_header_line(headers: &Vec<&str>, widths: &Vec<usize>) -> String {
let mut header_line = String::new();
header_line.push_str("|");
for (n, header) in headers.iter().enumerate() {
header_line.push_str(&format!("{:^1$}", &header, widths[n] + 2));
header_line.push_str("|");
}
header_line.push_str("\n");
header_line
}
fn create_content_line(item: &Vec<String>, widths: &Vec<usize>) -> String {
let mut content_line = String::new();
content_line.push_str("|");
for (n, column_content) in item.iter().enumerate() {
content_line.push_str(" ");
content_line.push_str(&format!("{:1$}", &column_content, widths[n] + 1));
content_line.push_str("|");
}
content_line.push_str("\n");
content_line
}
fn format_output(headers: &Vec<&str>, items: &Vec<Vec<String>>) -> String {
let mut widths = Vec::new();
for header in headers {
widths.push(header.len());
}
for item in items {
for (n, column) in item.iter().enumerate() {
if column.len() > widths[n] {
widths[n] = column.len();
}
}
}
let dividing_line = create_dividing_line(&widths);
let mut output_string = String::new();
output_string.push_str(&dividing_line);
output_string.push_str(&create_header_line(headers, &widths));
output_string.push_str(&dividing_line);
for item in items {
output_string.push_str(&create_content_line(item, &widths));
}
output_string.push_str(&dividing_line);
output_string
}
fn prepare_container_line(c: &Container) -> Vec<String> {
let mut ipv4_address = String::new();
let mut ipv6_address = String::new();
for ip in &c.status.ips {
if ip.protocol == "IPV4" && ip.address != "127.0.0.1" {
ipv4_address = ip.address.clone();
}
if ip.protocol == "IPV6" && ip.address != "::1" {
ipv6_address = ip.address.clone();
}
}
let ephemeral = if c.ephemeral { "YES" } else { "NO" };
vec![c.name.clone(), c.status.status.clone().to_uppercase(), ipv4_address.to_string(), ipv6_address.to_string(), ephemeral.to_string(), c.snapshot_urls.len().to_string()]
}
fn list(matches: &ArgMatches) {
let home_dir = env::var("HOME").unwrap();
let mut config_file = File::open(home_dir.clone() + "/.config/lxc/config.yml").unwrap();
let mut file_contents = String::new();
config_file.read_to_string(&mut file_contents).unwrap();
let lxd_config = YamlLoader::load_from_str(&file_contents).unwrap();
let default_remote = lxd_config[0]["default-remote"].as_str().unwrap();
let remote = matches.value_of("resource").unwrap_or(default_remote);
let lxd_server = match lxd_config[0]["remotes"][remote]["addr"].as_str() {
Some(remote_addr) => remote_addr,
None => panic!("No remote named {} configured", remote)
};
let server = LxdServer::new(
lxd_server,
&(home_dir.clone() + "/.config/lxc/client.crt"),
&(home_dir.clone() + "/.config/lxc/client.key")
);
let headers = vec!["NAME", "STATE", "IPV4", "IPV6", "EPHEMERAL", "SNAPSHOTS"];
let container_items = server.list_containers().iter().map(prepare_container_line).collect();
print!("{}", format_output(&headers, &container_items));
}
fn main() {
let matches = App::new("lxd")
.subcommand(SubCommand::with_name("list")
.arg(Arg::with_name("resource")
.help("the resource to use")
.required(true) |
match matches.subcommand_name() {
Some("list") => list(matches.subcommand_matches("list").unwrap()),
_ => println!("{}", matches.usage()),
}
} | .index(1)))
.get_matches(); | random_line_split |
main.rs | extern crate clap;
extern crate yaml_rust;
extern crate lxd;
use std::env;
use std::fs::File;
use std::io::Read;
use clap::{App, Arg, ArgMatches, SubCommand};
use yaml_rust::YamlLoader;
use lxd::{Container,LxdServer};
fn create_dividing_line(widths: &Vec<usize>) -> String {
let mut dividing_line = String::new();
dividing_line.push_str("+");
for width in widths {
dividing_line.push_str(&format!("{:-^1$}", "", width + 2));
dividing_line.push_str("+");
}
dividing_line.push_str("\n");
dividing_line
}
fn create_header_line(headers: &Vec<&str>, widths: &Vec<usize>) -> String {
let mut header_line = String::new();
header_line.push_str("|");
for (n, header) in headers.iter().enumerate() {
header_line.push_str(&format!("{:^1$}", &header, widths[n] + 2));
header_line.push_str("|");
}
header_line.push_str("\n");
header_line
}
fn create_content_line(item: &Vec<String>, widths: &Vec<usize>) -> String {
let mut content_line = String::new();
content_line.push_str("|");
for (n, column_content) in item.iter().enumerate() {
content_line.push_str(" ");
content_line.push_str(&format!("{:1$}", &column_content, widths[n] + 1));
content_line.push_str("|");
}
content_line.push_str("\n");
content_line
}
fn format_output(headers: &Vec<&str>, items: &Vec<Vec<String>>) -> String {
let mut widths = Vec::new();
for header in headers {
widths.push(header.len());
}
for item in items {
for (n, column) in item.iter().enumerate() {
if column.len() > widths[n] {
widths[n] = column.len();
}
}
}
let dividing_line = create_dividing_line(&widths);
let mut output_string = String::new();
output_string.push_str(&dividing_line);
output_string.push_str(&create_header_line(headers, &widths));
output_string.push_str(&dividing_line);
for item in items {
output_string.push_str(&create_content_line(item, &widths));
}
output_string.push_str(&dividing_line);
output_string
}
fn prepare_container_line(c: &Container) -> Vec<String> {
let mut ipv4_address = String::new();
let mut ipv6_address = String::new();
for ip in &c.status.ips {
if ip.protocol == "IPV4" && ip.address != "127.0.0.1" |
if ip.protocol == "IPV6" && ip.address != "::1" {
ipv6_address = ip.address.clone();
}
}
let ephemeral = if c.ephemeral { "YES" } else { "NO" };
vec![c.name.clone(), c.status.status.clone().to_uppercase(), ipv4_address.to_string(), ipv6_address.to_string(), ephemeral.to_string(), c.snapshot_urls.len().to_string()]
}
fn list(matches: &ArgMatches) {
let home_dir = env::var("HOME").unwrap();
let mut config_file = File::open(home_dir.clone() + "/.config/lxc/config.yml").unwrap();
let mut file_contents = String::new();
config_file.read_to_string(&mut file_contents).unwrap();
let lxd_config = YamlLoader::load_from_str(&file_contents).unwrap();
let default_remote = lxd_config[0]["default-remote"].as_str().unwrap();
let remote = matches.value_of("resource").unwrap_or(default_remote);
let lxd_server = match lxd_config[0]["remotes"][remote]["addr"].as_str() {
Some(remote_addr) => remote_addr,
None => panic!("No remote named {} configured", remote)
};
let server = LxdServer::new(
lxd_server,
&(home_dir.clone() + "/.config/lxc/client.crt"),
&(home_dir.clone() + "/.config/lxc/client.key")
);
let headers = vec!["NAME", "STATE", "IPV4", "IPV6", "EPHEMERAL", "SNAPSHOTS"];
let container_items = server.list_containers().iter().map(prepare_container_line).collect();
print!("{}", format_output(&headers, &container_items));
}
fn main() {
let matches = App::new("lxd")
.subcommand(SubCommand::with_name("list")
.arg(Arg::with_name("resource")
.help("the resource to use")
.required(true)
.index(1)))
.get_matches();
match matches.subcommand_name() {
Some("list") => list(matches.subcommand_matches("list").unwrap()),
_ => println!("{}", matches.usage()),
}
}
| {
ipv4_address = ip.address.clone();
} | conditional_block |
red.py | #!/usr/bin/python
# Example using an RGB character LCD wired directly to Raspberry Pi or BeagleBone Black.
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi configuration:
lcd_rs = 27 # Change this to pin 21 on older revision Raspberry Pi's
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_red = 4
lcd_green = 17
lcd_blue = 7 # Pin 7 is CE1
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_RGBCharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_red, lcd_green, lcd_blue)
# Show some basic colors.
lcd.set_color(1.0, 0.0, 0.0)
lcd.clear()
| lcd.message('Joyeux')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 0.0)
lcd.clear()
lcd.message('Noel')
time.sleep(3.0)
lcd.set_color(0.0, 0.0, 1.0)
lcd.clear()
lcd.message('Je vais')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 0.0)
lcd.clear()
lcd.message('te faire')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 1.0)
lcd.clear()
lcd.message('des trucs')
time.sleep(3.0)
lcd.set_color(1.0, 0.0, 1.0)
lcd.clear()
lcd.message('de fou')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 1.0)
lcd.clear()
lcd.message('MOUAHHH')
time.sleep(3.0) | random_line_split |
|
aasincos.js |
function aasin(v) {
var ONE_TOL = 1.00000000000001;
var av = fabs(v);
if (av >= 1) {
if (av > ONE_TOL) pj_ctx_set_errno(-19);
return v < 0 ? -M_HALFPI : M_HALFPI;
}
return asin(v);
}
function | (v) {
var ONE_TOL = 1.00000000000001;
var av = fabs(v);
if (av >= 1) {
if (av > ONE_TOL) pj_ctx_set_errno(-19);
return (v < 0 ? M_PI : 0);
}
return acos(v);
}
function asqrt(v) { return ((v <= 0) ? 0 : sqrt(v)); }
function aatan2(n, d) {
var ATOL = 1e-50;
return ((fabs(n) < ATOL && fabs(d) < ATOL) ? 0 : atan2(n,d));
}
| aacos | identifier_name |
aasincos.js | function aasin(v) {
var ONE_TOL = 1.00000000000001;
var av = fabs(v);
if (av >= 1) {
if (av > ONE_TOL) pj_ctx_set_errno(-19);
return v < 0 ? -M_HALFPI : M_HALFPI;
}
return asin(v);
}
function aacos(v) {
var ONE_TOL = 1.00000000000001;
var av = fabs(v);
if (av >= 1) {
if (av > ONE_TOL) pj_ctx_set_errno(-19);
return (v < 0 ? M_PI : 0);
}
return acos(v); | function aatan2(n, d) {
var ATOL = 1e-50;
return ((fabs(n) < ATOL && fabs(d) < ATOL) ? 0 : atan2(n,d));
} | }
function asqrt(v) { return ((v <= 0) ? 0 : sqrt(v)); }
| random_line_split |
aasincos.js |
function aasin(v) {
var ONE_TOL = 1.00000000000001;
var av = fabs(v);
if (av >= 1) {
if (av > ONE_TOL) pj_ctx_set_errno(-19);
return v < 0 ? -M_HALFPI : M_HALFPI;
}
return asin(v);
}
function aacos(v) {
var ONE_TOL = 1.00000000000001;
var av = fabs(v);
if (av >= 1) |
return acos(v);
}
function asqrt(v) { return ((v <= 0) ? 0 : sqrt(v)); }
function aatan2(n, d) {
var ATOL = 1e-50;
return ((fabs(n) < ATOL && fabs(d) < ATOL) ? 0 : atan2(n,d));
}
| {
if (av > ONE_TOL) pj_ctx_set_errno(-19);
return (v < 0 ? M_PI : 0);
} | conditional_block |
aasincos.js |
function aasin(v) {
var ONE_TOL = 1.00000000000001;
var av = fabs(v);
if (av >= 1) {
if (av > ONE_TOL) pj_ctx_set_errno(-19);
return v < 0 ? -M_HALFPI : M_HALFPI;
}
return asin(v);
}
function aacos(v) {
var ONE_TOL = 1.00000000000001;
var av = fabs(v);
if (av >= 1) {
if (av > ONE_TOL) pj_ctx_set_errno(-19);
return (v < 0 ? M_PI : 0);
}
return acos(v);
}
function asqrt(v) |
function aatan2(n, d) {
var ATOL = 1e-50;
return ((fabs(n) < ATOL && fabs(d) < ATOL) ? 0 : atan2(n,d));
}
| { return ((v <= 0) ? 0 : sqrt(v)); } | identifier_body |
brain_subprocess.py | # Copyright (c) 2016 Claudiu Popa <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import textwrap
import six
import astroid
PY33 = sys.version_info >= (3, 3)
PY36 = sys.version_info >= (3, 6)
def _subprocess_transform():
if six.PY3:
|
else:
communicate = ('string', 'string')
communicate_signature = 'def communicate(self, input=None)'
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
pass
"""
if PY33:
wait_signature = 'def wait(self, timeout=None)'
else:
wait_signature = 'def wait(self)'
if six.PY3:
ctx_manager = '''
def __enter__(self): return self
def __exit__(self, *args): pass
'''
else:
ctx_manager = ''
code = textwrap.dedent('''
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
%(communicate_signature)s:
return %(communicate)r
%(wait_signature)s:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
def terminate(self):
pass
def kill(self):
pass
%(ctx_manager)s
''' % {'communicate': communicate,
'communicate_signature': communicate_signature,
'wait_signature': wait_signature,
'ctx_manager': ctx_manager})
init_lines = textwrap.dedent(init).splitlines()
indented_init = '\n'.join([' ' * 4 + line for line in init_lines])
code += indented_init
return astroid.parse(code)
astroid.register_module_extender(astroid.MANAGER, 'subprocess', _subprocess_transform)
| communicate = (bytes('string', 'ascii'), bytes('string', 'ascii'))
communicate_signature = 'def communicate(self, input=None, timeout=None)'
if PY36:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), *,
encoding=None, errors=None):
pass
"""
else:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=()):
pass
""" | conditional_block |
brain_subprocess.py | # Copyright (c) 2016 Claudiu Popa <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import textwrap
import six
import astroid
PY33 = sys.version_info >= (3, 3)
PY36 = sys.version_info >= (3, 6)
def _subprocess_transform():
|
astroid.register_module_extender(astroid.MANAGER, 'subprocess', _subprocess_transform)
| if six.PY3:
communicate = (bytes('string', 'ascii'), bytes('string', 'ascii'))
communicate_signature = 'def communicate(self, input=None, timeout=None)'
if PY36:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), *,
encoding=None, errors=None):
pass
"""
else:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=()):
pass
"""
else:
communicate = ('string', 'string')
communicate_signature = 'def communicate(self, input=None)'
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
pass
"""
if PY33:
wait_signature = 'def wait(self, timeout=None)'
else:
wait_signature = 'def wait(self)'
if six.PY3:
ctx_manager = '''
def __enter__(self): return self
def __exit__(self, *args): pass
'''
else:
ctx_manager = ''
code = textwrap.dedent('''
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
%(communicate_signature)s:
return %(communicate)r
%(wait_signature)s:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
def terminate(self):
pass
def kill(self):
pass
%(ctx_manager)s
''' % {'communicate': communicate,
'communicate_signature': communicate_signature,
'wait_signature': wait_signature,
'ctx_manager': ctx_manager})
init_lines = textwrap.dedent(init).splitlines()
indented_init = '\n'.join([' ' * 4 + line for line in init_lines])
code += indented_init
return astroid.parse(code) | identifier_body |
brain_subprocess.py | # Copyright (c) 2016 Claudiu Popa <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import textwrap
import six
import astroid
PY33 = sys.version_info >= (3, 3)
PY36 = sys.version_info >= (3, 6)
def _subprocess_transform():
if six.PY3:
communicate = (bytes('string', 'ascii'), bytes('string', 'ascii'))
communicate_signature = 'def communicate(self, input=None, timeout=None)'
if PY36:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), *,
encoding=None, errors=None):
pass
"""
else:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=()):
pass
"""
else:
communicate = ('string', 'string')
communicate_signature = 'def communicate(self, input=None)'
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
pass
"""
if PY33: | def __enter__(self): return self
def __exit__(self, *args): pass
'''
else:
ctx_manager = ''
code = textwrap.dedent('''
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
%(communicate_signature)s:
return %(communicate)r
%(wait_signature)s:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
def terminate(self):
pass
def kill(self):
pass
%(ctx_manager)s
''' % {'communicate': communicate,
'communicate_signature': communicate_signature,
'wait_signature': wait_signature,
'ctx_manager': ctx_manager})
init_lines = textwrap.dedent(init).splitlines()
indented_init = '\n'.join([' ' * 4 + line for line in init_lines])
code += indented_init
return astroid.parse(code)
astroid.register_module_extender(astroid.MANAGER, 'subprocess', _subprocess_transform) | wait_signature = 'def wait(self, timeout=None)'
else:
wait_signature = 'def wait(self)'
if six.PY3:
ctx_manager = ''' | random_line_split |
brain_subprocess.py | # Copyright (c) 2016 Claudiu Popa <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import textwrap
import six
import astroid
PY33 = sys.version_info >= (3, 3)
PY36 = sys.version_info >= (3, 6)
def | ():
if six.PY3:
communicate = (bytes('string', 'ascii'), bytes('string', 'ascii'))
communicate_signature = 'def communicate(self, input=None, timeout=None)'
if PY36:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), *,
encoding=None, errors=None):
pass
"""
else:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=()):
pass
"""
else:
communicate = ('string', 'string')
communicate_signature = 'def communicate(self, input=None)'
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
pass
"""
if PY33:
wait_signature = 'def wait(self, timeout=None)'
else:
wait_signature = 'def wait(self)'
if six.PY3:
ctx_manager = '''
def __enter__(self): return self
def __exit__(self, *args): pass
'''
else:
ctx_manager = ''
code = textwrap.dedent('''
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
%(communicate_signature)s:
return %(communicate)r
%(wait_signature)s:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
def terminate(self):
pass
def kill(self):
pass
%(ctx_manager)s
''' % {'communicate': communicate,
'communicate_signature': communicate_signature,
'wait_signature': wait_signature,
'ctx_manager': ctx_manager})
init_lines = textwrap.dedent(init).splitlines()
indented_init = '\n'.join([' ' * 4 + line for line in init_lines])
code += indented_init
return astroid.parse(code)
astroid.register_module_extender(astroid.MANAGER, 'subprocess', _subprocess_transform)
| _subprocess_transform | identifier_name |
string.rs | /*
* Copyright (c) 2016 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/// Convert a snake case string to a camel case.
pub fn snake_to_camel(string: &str) -> String {
let mut chars = string.chars();
let string =
match chars.next() {
None => String::new(),
Some(first) => first.to_uppercase().collect::<String>() + chars.as_str(),
};
let mut camel = String::new();
let mut underscore = false;
for character in string.chars() {
if character == '_' {
underscore = true;
}
else {
if underscore {
camel.push_str(&character.to_uppercase().collect::<String>());
}
else {
camel.push(character);
}
underscore = false;
}
}
camel
}
/// Transform a camel case command name to its dashed version.
/// WinOpen is transformed to win-open.
pub fn to_dash_name(name: &str) -> String {
let mut result = String::new();
for (index, character) in name.chars().enumerate() {
let string: String = character.to_lowercase().collect();
if character.is_uppercase() && index > 0 { | }
result.push_str(&string);
}
result
} | result.push('-'); | random_line_split |
string.rs | /*
* Copyright (c) 2016 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/// Convert a snake case string to a camel case.
pub fn snake_to_camel(string: &str) -> String |
/// Transform a camel case command name to its dashed version.
/// WinOpen is transformed to win-open.
pub fn to_dash_name(name: &str) -> String {
let mut result = String::new();
for (index, character) in name.chars().enumerate() {
let string: String = character.to_lowercase().collect();
if character.is_uppercase() && index > 0 {
result.push('-');
}
result.push_str(&string);
}
result
}
| {
let mut chars = string.chars();
let string =
match chars.next() {
None => String::new(),
Some(first) => first.to_uppercase().collect::<String>() + chars.as_str(),
};
let mut camel = String::new();
let mut underscore = false;
for character in string.chars() {
if character == '_' {
underscore = true;
}
else {
if underscore {
camel.push_str(&character.to_uppercase().collect::<String>());
}
else {
camel.push(character);
}
underscore = false;
}
}
camel
} | identifier_body |
string.rs | /*
* Copyright (c) 2016 Boucher, Antoni <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/// Convert a snake case string to a camel case.
pub fn snake_to_camel(string: &str) -> String {
let mut chars = string.chars();
let string =
match chars.next() {
None => String::new(),
Some(first) => first.to_uppercase().collect::<String>() + chars.as_str(),
};
let mut camel = String::new();
let mut underscore = false;
for character in string.chars() {
if character == '_' {
underscore = true;
}
else {
if underscore {
camel.push_str(&character.to_uppercase().collect::<String>());
}
else {
camel.push(character);
}
underscore = false;
}
}
camel
}
/// Transform a camel case command name to its dashed version.
/// WinOpen is transformed to win-open.
pub fn | (name: &str) -> String {
let mut result = String::new();
for (index, character) in name.chars().enumerate() {
let string: String = character.to_lowercase().collect();
if character.is_uppercase() && index > 0 {
result.push('-');
}
result.push_str(&string);
}
result
}
| to_dash_name | identifier_name |
selectors.ts | import { createSelector } from 'reselect';
import { tKeys } from 'services/i18n';
import { getSelectValuesToLabelsMap } from 'shared/helpers';
import { ISelectOption } from 'shared/types/form';
import { IUsersSearchFilters } from 'shared/types/githubSearch';
import { IUsersSearchFormProps } from './UsersSearchForm';
type OptionType = ISelectOption<IUsersSearchFilters['searchBy']>;
type LabelsType = Record<IUsersSearchFilters['searchFor'], string>;
const { userSearch: intl } = tKeys.features;
export const selectFiltersLabels = createSelector(
(props: IUsersSearchFormProps) => props.t,
(t): Record<keyof IUsersSearchFilters, string> => ({
searchBy: t(intl.searchBy),
searchFor: t(intl.searchFor),
perPage: t(intl.resultsPerPage),
reposLanguage: t(intl.repositoriesLanguage),
minRepos: t(intl.minRepos),
maxRepos: t(intl.maxRepos),
}),
); |
export const selectOptions = createSelector(
(props: IUsersSearchFormProps) => props.t,
(t): OptionType[] => ([
{ value: 'username-email', label: t(intl.usernameAndEmail) },
{ value: 'login', label: t(intl.username) },
{ value: 'email', label: t(intl.email) },
{ value: 'fullname', label: t(intl.fullName) },
]),
);
export const selectLabels = createSelector(
(props: IUsersSearchFormProps) => props.t,
(t): LabelsType => ({
both: t(intl.usersAndOrganizations),
org: t(intl.organizations),
user: t(intl.users),
}),
);
type ValueFormatter<T> = (x: T) => any;
interface IUserSearchFiltersFormattersMap {
searchBy: ValueFormatter<IUsersSearchFilters['searchBy']>;
searchFor: ValueFormatter<IUsersSearchFilters['searchFor']>;
[key: string]: unknown;
}
export const selectFiltersValuesFormatters = createSelector(
selectOptions,
selectLabels,
(options, labels): IUserSearchFiltersFormattersMap => ({
searchBy: searchByValue => getSelectValuesToLabelsMap(options)[searchByValue].toLowerCase(),
searchFor: searchForValue => labels[searchForValue].toLowerCase(),
}),
); | random_line_split |
|
Carousel.js | /**
* @author
*/
imports("Controls.Composite.Carousel");
using("System.Fx.Marquee");
var Carousel = Control.extend({
onChange: function (e) {
var ul = this.find('.x-carousel-header'), t;
if (t = ul.first(e.from))
t.removeClass('x-carousel-header-selected');
if(t = ul.first(e.to))
t.addClass('x-carousel-header-selected');
},
init: function (options) {
var me = this;
me.marquee = new Marquee(me, options.direction, options.loop, options.deferUpdate);
if (options.duration != null)
me.marquee.duration = options.duration;
if (options.delay != null)
me.marquee.delay = options.delay;
me.marquee.on('changing', me.onChange, me);
me.query('.x-carousel-header > li').setWidth(me.getWidth() / me.marquee.length).on(options.event || 'mouseover', function (e) {
| me.marquee.start();
}
}).defineMethods("marquee", "moveTo moveBy start stop"); | me.marquee.moveTo(this.index());
});
me.onChange({to: 0});
| random_line_split |
error_plot.py | import numpy as np
import matplotlib.pyplot as plt
__all__ = ('error_plot',)
def error_plot(network, logx=False, ax=None, show=True):
""" Makes line plot that shows training progress. x-axis
is an epoch number and y-axis is an error.
Parameters
----------
logx : bool
Parameter set up logarithmic scale to x-axis.
Defaults to ``False``.
ax : object or None
Matplotlib axis object. ``None`` values means that axis equal
to the current one (the same as ``ax = plt.gca()``). | the plot. Defaults to ``True``.
Returns
-------
object
Matplotlib axis instance.
"""
if ax is None:
ax = plt.gca()
if not network.errors:
network.logs.warning("There is no data to plot")
return ax
train_errors = network.errors.normalized()
validation_errors = network.validation_errors.normalized()
if len(train_errors) != len(validation_errors):
network.logs.warning("Number of train and validation errors are "
"not the same. Ignored validation errors.")
validation_errors = []
if all(np.isnan(validation_errors)):
validation_errors = []
errors_range = np.arange(len(train_errors))
plot_function = ax.semilogx if logx else ax.plot
line_error_in, = plot_function(errors_range, train_errors)
if validation_errors:
line_error_out, = plot_function(errors_range, validation_errors)
ax.legend(
[line_error_in, line_error_out],
['Train', 'Validation']
)
ax.set_title('Training perfomance')
ax.set_ylim(bottom=0)
ax.set_ylabel('Error')
ax.set_xlabel('Epoch')
if show:
plt.show()
return ax | Defaults to ``None``.
show : bool
If parameter is equal to ``True`` plot will instantly shows | random_line_split |
error_plot.py | import numpy as np
import matplotlib.pyplot as plt
__all__ = ('error_plot',)
def error_plot(network, logx=False, ax=None, show=True):
""" Makes line plot that shows training progress. x-axis
is an epoch number and y-axis is an error.
Parameters
----------
logx : bool
Parameter set up logarithmic scale to x-axis.
Defaults to ``False``.
ax : object or None
Matplotlib axis object. ``None`` values means that axis equal
to the current one (the same as ``ax = plt.gca()``).
Defaults to ``None``.
show : bool
If parameter is equal to ``True`` plot will instantly shows
the plot. Defaults to ``True``.
Returns
-------
object
Matplotlib axis instance.
"""
if ax is None:
ax = plt.gca()
if not network.errors:
|
train_errors = network.errors.normalized()
validation_errors = network.validation_errors.normalized()
if len(train_errors) != len(validation_errors):
network.logs.warning("Number of train and validation errors are "
"not the same. Ignored validation errors.")
validation_errors = []
if all(np.isnan(validation_errors)):
validation_errors = []
errors_range = np.arange(len(train_errors))
plot_function = ax.semilogx if logx else ax.plot
line_error_in, = plot_function(errors_range, train_errors)
if validation_errors:
line_error_out, = plot_function(errors_range, validation_errors)
ax.legend(
[line_error_in, line_error_out],
['Train', 'Validation']
)
ax.set_title('Training perfomance')
ax.set_ylim(bottom=0)
ax.set_ylabel('Error')
ax.set_xlabel('Epoch')
if show:
plt.show()
return ax
| network.logs.warning("There is no data to plot")
return ax | conditional_block |
error_plot.py | import numpy as np
import matplotlib.pyplot as plt
__all__ = ('error_plot',)
def error_plot(network, logx=False, ax=None, show=True):
| """ Makes line plot that shows training progress. x-axis
is an epoch number and y-axis is an error.
Parameters
----------
logx : bool
Parameter set up logarithmic scale to x-axis.
Defaults to ``False``.
ax : object or None
Matplotlib axis object. ``None`` values means that axis equal
to the current one (the same as ``ax = plt.gca()``).
Defaults to ``None``.
show : bool
If parameter is equal to ``True`` plot will instantly shows
the plot. Defaults to ``True``.
Returns
-------
object
Matplotlib axis instance.
"""
if ax is None:
ax = plt.gca()
if not network.errors:
network.logs.warning("There is no data to plot")
return ax
train_errors = network.errors.normalized()
validation_errors = network.validation_errors.normalized()
if len(train_errors) != len(validation_errors):
network.logs.warning("Number of train and validation errors are "
"not the same. Ignored validation errors.")
validation_errors = []
if all(np.isnan(validation_errors)):
validation_errors = []
errors_range = np.arange(len(train_errors))
plot_function = ax.semilogx if logx else ax.plot
line_error_in, = plot_function(errors_range, train_errors)
if validation_errors:
line_error_out, = plot_function(errors_range, validation_errors)
ax.legend(
[line_error_in, line_error_out],
['Train', 'Validation']
)
ax.set_title('Training perfomance')
ax.set_ylim(bottom=0)
ax.set_ylabel('Error')
ax.set_xlabel('Epoch')
if show:
plt.show()
return ax | identifier_body |
|
error_plot.py | import numpy as np
import matplotlib.pyplot as plt
__all__ = ('error_plot',)
def | (network, logx=False, ax=None, show=True):
""" Makes line plot that shows training progress. x-axis
is an epoch number and y-axis is an error.
Parameters
----------
logx : bool
Parameter set up logarithmic scale to x-axis.
Defaults to ``False``.
ax : object or None
Matplotlib axis object. ``None`` values means that axis equal
to the current one (the same as ``ax = plt.gca()``).
Defaults to ``None``.
show : bool
If parameter is equal to ``True`` plot will instantly shows
the plot. Defaults to ``True``.
Returns
-------
object
Matplotlib axis instance.
"""
if ax is None:
ax = plt.gca()
if not network.errors:
network.logs.warning("There is no data to plot")
return ax
train_errors = network.errors.normalized()
validation_errors = network.validation_errors.normalized()
if len(train_errors) != len(validation_errors):
network.logs.warning("Number of train and validation errors are "
"not the same. Ignored validation errors.")
validation_errors = []
if all(np.isnan(validation_errors)):
validation_errors = []
errors_range = np.arange(len(train_errors))
plot_function = ax.semilogx if logx else ax.plot
line_error_in, = plot_function(errors_range, train_errors)
if validation_errors:
line_error_out, = plot_function(errors_range, validation_errors)
ax.legend(
[line_error_in, line_error_out],
['Train', 'Validation']
)
ax.set_title('Training perfomance')
ax.set_ylim(bottom=0)
ax.set_ylabel('Error')
ax.set_xlabel('Epoch')
if show:
plt.show()
return ax
| error_plot | identifier_name |
histogram.rs | /*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ip.rsh"
rs_allocation gSrc;
rs_allocation gDest;
rs_allocation gSums;
rs_allocation gSum;
int gWidth;
int gHeight;
int gStep;
int gSteps;
void RS_KERNEL pass1(int in, uint x, uint y) {
for (int i=0; i < (256); i++) {
rsSetElementAt_int(gSums, 0, i, y);
}
for (int i = 0; i < gStep; i++) {
int py = y*gStep + i;
if (py >= gHeight) return;
for (int px=0; px < gWidth; px++) {
uchar4 c = rsGetElementAt_uchar4(gSrc, px, py);
int lum = (77 * c.r + 150 * c.g + 29 * c.b) >> 8;
int old = rsGetElementAt_int(gSums, lum, y);
rsSetElementAt_int(gSums, old+1, lum, y);
}
}
}
int RS_KERNEL pass2(uint x) {
int sum = 0;
for (int i=0; i < gSteps; i++) {
sum += rsGetElementAt_int(gSums, x, i);
}
return sum;
}
void rescale() {
int maxv = 0;
for (int i=0; i < 256; i++) {
maxv = max(maxv, rsGetElementAt_int(gSum, i));
}
float overMax = (1.f / maxv) * gHeight;
for (int i=0; i < 256; i++) {
int t = rsGetElementAt_int(gSum, i);
t = gHeight - (overMax * rsGetElementAt_int(gSum, i));
t = max(0, t);
rsSetElementAt_int(gSum, t, i);
}
}
static const uchar4 gClear = {0, 0, 0, 0xff};
uchar4 RS_KERNEL clear() {
return gClear;
}
uchar4 RS_KERNEL draw(uint x, uint y) {
int l = rsGetElementAt_int(gSum, x >> 2);
if (y > l) { | return gClear;
} | return 0xff;
} | random_line_split |
urls.py | """litchi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views | Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^captcha/', include('captcha.urls')),
url(r'^session/', include('apps.session.urls', namespace='session')),
] | 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') | random_line_split |
project-fn-ret-contravariant.rs | #![feature(unboxed_closures)]
#![feature(rustc_attrs)]
// Test for projection cache. We should be able to project distinct
// lifetimes from `foo` as we reinstantiate it multiple times, but not
// if we do it just once. In this variant, the region `'a` is used in
// an contravariant position, which affects the results.
// revisions: ok oneuse transmute krisskross
#![allow(dead_code, unused_variables)]
fn foo<'a>() -> &'a u32 { loop { } }
fn | <T>(t: T, x: T::Output) -> T::Output
where T: FnOnce<()>
{
t()
}
#[cfg(ok)] // two instantiations: OK
fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
let a = bar(foo, x);
let b = bar(foo, y);
(a, b)
}
#[cfg(oneuse)] // one instantiation: OK (surprisingly)
fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
let f /* : fn() -> &'static u32 */ = foo; // <-- inferred type annotated
let a = bar(f, x); // this is considered ok because fn args are contravariant...
let b = bar(f, y); // ...and hence we infer T to distinct values in each call.
(a, b)
}
#[cfg(transmute)] // one instantiations: BAD
fn baz<'a,'b>(x: &'a u32) -> &'static u32 {
bar(foo, x) //[transmute]~ ERROR E0759
}
#[cfg(krisskross)] // two instantiations, mixing and matching: BAD
fn transmute<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
let a = bar(foo, y);
let b = bar(foo, x);
(a, b) //[krisskross]~ ERROR lifetime mismatch [E0623]
//[krisskross]~^ ERROR lifetime mismatch [E0623]
}
#[rustc_error]
fn main() { } //[ok,oneuse]~ ERROR fatal error triggered by #[rustc_error]
| bar | identifier_name |
project-fn-ret-contravariant.rs | #![feature(unboxed_closures)]
#![feature(rustc_attrs)]
// Test for projection cache. We should be able to project distinct
// lifetimes from `foo` as we reinstantiate it multiple times, but not
// if we do it just once. In this variant, the region `'a` is used in
// an contravariant position, which affects the results.
// revisions: ok oneuse transmute krisskross
#![allow(dead_code, unused_variables)]
fn foo<'a>() -> &'a u32 { loop { } }
fn bar<T>(t: T, x: T::Output) -> T::Output
where T: FnOnce<()>
{
t()
}
#[cfg(ok)] // two instantiations: OK
fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
let a = bar(foo, x);
let b = bar(foo, y);
(a, b)
}
#[cfg(oneuse)] // one instantiation: OK (surprisingly)
fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
let f /* : fn() -> &'static u32 */ = foo; // <-- inferred type annotated
let a = bar(f, x); // this is considered ok because fn args are contravariant...
let b = bar(f, y); // ...and hence we infer T to distinct values in each call. |
#[cfg(transmute)] // one instantiations: BAD
fn baz<'a,'b>(x: &'a u32) -> &'static u32 {
bar(foo, x) //[transmute]~ ERROR E0759
}
#[cfg(krisskross)] // two instantiations, mixing and matching: BAD
fn transmute<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
let a = bar(foo, y);
let b = bar(foo, x);
(a, b) //[krisskross]~ ERROR lifetime mismatch [E0623]
//[krisskross]~^ ERROR lifetime mismatch [E0623]
}
#[rustc_error]
fn main() { } //[ok,oneuse]~ ERROR fatal error triggered by #[rustc_error] | (a, b)
} | random_line_split |
project-fn-ret-contravariant.rs | #![feature(unboxed_closures)]
#![feature(rustc_attrs)]
// Test for projection cache. We should be able to project distinct
// lifetimes from `foo` as we reinstantiate it multiple times, but not
// if we do it just once. In this variant, the region `'a` is used in
// an contravariant position, which affects the results.
// revisions: ok oneuse transmute krisskross
#![allow(dead_code, unused_variables)]
fn foo<'a>() -> &'a u32 { loop { } }
fn bar<T>(t: T, x: T::Output) -> T::Output
where T: FnOnce<()>
{
t()
}
#[cfg(ok)] // two instantiations: OK
fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) |
#[cfg(oneuse)] // one instantiation: OK (surprisingly)
fn baz<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
let f /* : fn() -> &'static u32 */ = foo; // <-- inferred type annotated
let a = bar(f, x); // this is considered ok because fn args are contravariant...
let b = bar(f, y); // ...and hence we infer T to distinct values in each call.
(a, b)
}
#[cfg(transmute)] // one instantiations: BAD
fn baz<'a,'b>(x: &'a u32) -> &'static u32 {
bar(foo, x) //[transmute]~ ERROR E0759
}
#[cfg(krisskross)] // two instantiations, mixing and matching: BAD
fn transmute<'a,'b>(x: &'a u32, y: &'b u32) -> (&'a u32, &'b u32) {
let a = bar(foo, y);
let b = bar(foo, x);
(a, b) //[krisskross]~ ERROR lifetime mismatch [E0623]
//[krisskross]~^ ERROR lifetime mismatch [E0623]
}
#[rustc_error]
fn main() { } //[ok,oneuse]~ ERROR fatal error triggered by #[rustc_error]
| {
let a = bar(foo, x);
let b = bar(foo, y);
(a, b)
} | identifier_body |
datacatalog_entry_factory.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from google.cloud import datacatalog
from google.protobuf import timestamp_pb2
from google.datacatalog_connectors.commons.prepare.base_entry_factory import \
BaseEntryFactory
from google.datacatalog_connectors.rdbms.common import constants
class DataCatalogEntryFactory(BaseEntryFactory):
NO_VALUE_SPECIFIED = 'UNDEFINED'
EMPTY_TOKEN = '?'
def __init__(self, project_id, location_id, entry_resource_url_prefix,
entry_group_id, metadata_definition):
self.__project_id = project_id
self.__location_id = location_id
self.__entry_resource_url_prefix = entry_resource_url_prefix
self.__entry_group_id = entry_group_id
self.__metadata_definition = metadata_definition
def make_entries_for_table_container(self, table_container):
"""Create Datacatalog entries from a table container dict.
:param table_container:
:return: entry_id, entry
"""
entry_id = self._format_id(table_container['name'])
entry = datacatalog.Entry()
entry.user_specified_type = self.__metadata_definition[
'table_container_def']['type']
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table_container['name'])
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table_container.get('create_time'),
table_container.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
desc = table_container.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
entry.linked_resource = '{}/{}'.format(
self.__entry_resource_url_prefix, entry_id)
return entry_id, entry
def make_entry_for_tables(self, table, table_container_name):
"""Create Datacatalog entries from a table dict.
:param table:
:param table_container_name:
:return: entry_id, entry
"""
entry_id = self._format_id('{}__{}'.format(table_container_name,
table['name']))
entry = datacatalog.Entry()
# some RDBMS' store views and tables definitions in the same
# system table, and the name is not user friendly, so we only
# keep it if it's a VIEW type.
table_type = table.get(constants.TABLE_TYPE_KEY)
if table_type and table_type.lower() == \
constants.VIEW_TYPE_VALUE:
table_type = table_type.lower()
else:
table_type = self.__metadata_definition['table_def']['type']
entry.user_specified_type = table_type
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table['name'])
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
desc = table.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.linked_resource = '{}/{}/{}'.format(
self.__entry_resource_url_prefix, table_container_name,
self._format_id(table['name']))
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table.get('create_time'),
table.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
columns = []
for column in table['columns']:
desc = column.get('desc')
if pd.isna(desc):
desc = ''
columns.append(
datacatalog.ColumnSchema(
column=self._format_id(column['name']),
description=desc,
type=DataCatalogEntryFactory.__format_entry_column_type(
column['type'])))
entry.schema.columns.extend(columns)
return entry_id, entry
@staticmethod
def | (date_value):
if pd.notnull(date_value):
return int(date_value.timestamp())
@staticmethod
def __convert_source_system_timestamp_fields(raw_create_time,
raw_update_time):
create_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_create_time)
if not pd.isnull(raw_update_time):
update_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_update_time)
else:
update_time = create_time
return create_time, update_time
@staticmethod
def __format_entry_column_type(source_name):
if isinstance(source_name, bytes):
# We've noticed some MySQL instances use bytes-like objects
# instead of `str` to specify the column types. We are using UTF-8
# to decode such objects when it happens because UTF-8 is the
# default character set for MySQL 8.0 onwards.
#
# We didn't notice similar behavior with other RDBMS but, if so,
# we should handle encoding as a configuration option that each
# RDBMS connector would have to set up. It might be exposed as a
# CLI arg, so users could easily change that. There is also the
# option to scrape that config directly from the DB.
source_name = source_name.decode("utf-8")
formatted_name = source_name.replace('&', '_')
formatted_name = formatted_name.replace(':', '_')
formatted_name = formatted_name.replace('/', '_')
formatted_name = formatted_name.replace(' ', '_')
if formatted_name == DataCatalogEntryFactory.EMPTY_TOKEN:
formatted_name = DataCatalogEntryFactory.NO_VALUE_SPECIFIED
return formatted_name
| __convert_date_value_to_epoch | identifier_name |
datacatalog_entry_factory.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from google.cloud import datacatalog
from google.protobuf import timestamp_pb2
from google.datacatalog_connectors.commons.prepare.base_entry_factory import \
BaseEntryFactory
from google.datacatalog_connectors.rdbms.common import constants
class DataCatalogEntryFactory(BaseEntryFactory):
NO_VALUE_SPECIFIED = 'UNDEFINED'
EMPTY_TOKEN = '?'
def __init__(self, project_id, location_id, entry_resource_url_prefix,
entry_group_id, metadata_definition):
self.__project_id = project_id
self.__location_id = location_id
self.__entry_resource_url_prefix = entry_resource_url_prefix
self.__entry_group_id = entry_group_id
self.__metadata_definition = metadata_definition
def make_entries_for_table_container(self, table_container):
"""Create Datacatalog entries from a table container dict.
:param table_container:
:return: entry_id, entry
"""
entry_id = self._format_id(table_container['name'])
entry = datacatalog.Entry()
entry.user_specified_type = self.__metadata_definition[
'table_container_def']['type']
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table_container['name'])
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table_container.get('create_time'),
table_container.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
desc = table_container.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
entry.linked_resource = '{}/{}'.format(
self.__entry_resource_url_prefix, entry_id)
return entry_id, entry
def make_entry_for_tables(self, table, table_container_name):
"""Create Datacatalog entries from a table dict.
:param table:
:param table_container_name:
:return: entry_id, entry
"""
entry_id = self._format_id('{}__{}'.format(table_container_name,
table['name']))
entry = datacatalog.Entry()
# some RDBMS' store views and tables definitions in the same
# system table, and the name is not user friendly, so we only
# keep it if it's a VIEW type.
table_type = table.get(constants.TABLE_TYPE_KEY)
if table_type and table_type.lower() == \
constants.VIEW_TYPE_VALUE:
|
else:
table_type = self.__metadata_definition['table_def']['type']
entry.user_specified_type = table_type
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table['name'])
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
desc = table.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.linked_resource = '{}/{}/{}'.format(
self.__entry_resource_url_prefix, table_container_name,
self._format_id(table['name']))
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table.get('create_time'),
table.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
columns = []
for column in table['columns']:
desc = column.get('desc')
if pd.isna(desc):
desc = ''
columns.append(
datacatalog.ColumnSchema(
column=self._format_id(column['name']),
description=desc,
type=DataCatalogEntryFactory.__format_entry_column_type(
column['type'])))
entry.schema.columns.extend(columns)
return entry_id, entry
@staticmethod
def __convert_date_value_to_epoch(date_value):
if pd.notnull(date_value):
return int(date_value.timestamp())
@staticmethod
def __convert_source_system_timestamp_fields(raw_create_time,
raw_update_time):
create_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_create_time)
if not pd.isnull(raw_update_time):
update_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_update_time)
else:
update_time = create_time
return create_time, update_time
@staticmethod
def __format_entry_column_type(source_name):
if isinstance(source_name, bytes):
# We've noticed some MySQL instances use bytes-like objects
# instead of `str` to specify the column types. We are using UTF-8
# to decode such objects when it happens because UTF-8 is the
# default character set for MySQL 8.0 onwards.
#
# We didn't notice similar behavior with other RDBMS but, if so,
# we should handle encoding as a configuration option that each
# RDBMS connector would have to set up. It might be exposed as a
# CLI arg, so users could easily change that. There is also the
# option to scrape that config directly from the DB.
source_name = source_name.decode("utf-8")
formatted_name = source_name.replace('&', '_')
formatted_name = formatted_name.replace(':', '_')
formatted_name = formatted_name.replace('/', '_')
formatted_name = formatted_name.replace(' ', '_')
if formatted_name == DataCatalogEntryFactory.EMPTY_TOKEN:
formatted_name = DataCatalogEntryFactory.NO_VALUE_SPECIFIED
return formatted_name
| table_type = table_type.lower() | conditional_block |
datacatalog_entry_factory.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from google.cloud import datacatalog
from google.protobuf import timestamp_pb2
from google.datacatalog_connectors.commons.prepare.base_entry_factory import \
BaseEntryFactory
from google.datacatalog_connectors.rdbms.common import constants
class DataCatalogEntryFactory(BaseEntryFactory):
NO_VALUE_SPECIFIED = 'UNDEFINED'
EMPTY_TOKEN = '?'
def __init__(self, project_id, location_id, entry_resource_url_prefix,
entry_group_id, metadata_definition):
self.__project_id = project_id
self.__location_id = location_id
self.__entry_resource_url_prefix = entry_resource_url_prefix
self.__entry_group_id = entry_group_id
self.__metadata_definition = metadata_definition
def make_entries_for_table_container(self, table_container):
"""Create Datacatalog entries from a table container dict.
:param table_container:
:return: entry_id, entry
"""
entry_id = self._format_id(table_container['name'])
entry = datacatalog.Entry()
entry.user_specified_type = self.__metadata_definition[
'table_container_def']['type']
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table_container['name'])
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table_container.get('create_time'),
table_container.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
desc = table_container.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
entry.linked_resource = '{}/{}'.format(
self.__entry_resource_url_prefix, entry_id)
return entry_id, entry
def make_entry_for_tables(self, table, table_container_name):
"""Create Datacatalog entries from a table dict.
:param table:
:param table_container_name:
:return: entry_id, entry
"""
entry_id = self._format_id('{}__{}'.format(table_container_name,
table['name']))
entry = datacatalog.Entry()
# some RDBMS' store views and tables definitions in the same
# system table, and the name is not user friendly, so we only
# keep it if it's a VIEW type.
table_type = table.get(constants.TABLE_TYPE_KEY)
if table_type and table_type.lower() == \
constants.VIEW_TYPE_VALUE:
table_type = table_type.lower()
else:
table_type = self.__metadata_definition['table_def']['type']
entry.user_specified_type = table_type
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table['name'])
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
desc = table.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.linked_resource = '{}/{}/{}'.format(
self.__entry_resource_url_prefix, table_container_name,
self._format_id(table['name']))
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table.get('create_time'),
table.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
columns = []
for column in table['columns']:
desc = column.get('desc')
if pd.isna(desc):
desc = ''
columns.append(
datacatalog.ColumnSchema(
column=self._format_id(column['name']),
description=desc,
type=DataCatalogEntryFactory.__format_entry_column_type(
column['type'])))
entry.schema.columns.extend(columns)
return entry_id, entry
@staticmethod
def __convert_date_value_to_epoch(date_value):
if pd.notnull(date_value):
return int(date_value.timestamp())
@staticmethod
def __convert_source_system_timestamp_fields(raw_create_time, | if not pd.isnull(raw_update_time):
update_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_update_time)
else:
update_time = create_time
return create_time, update_time
@staticmethod
def __format_entry_column_type(source_name):
if isinstance(source_name, bytes):
# We've noticed some MySQL instances use bytes-like objects
# instead of `str` to specify the column types. We are using UTF-8
# to decode such objects when it happens because UTF-8 is the
# default character set for MySQL 8.0 onwards.
#
# We didn't notice similar behavior with other RDBMS but, if so,
# we should handle encoding as a configuration option that each
# RDBMS connector would have to set up. It might be exposed as a
# CLI arg, so users could easily change that. There is also the
# option to scrape that config directly from the DB.
source_name = source_name.decode("utf-8")
formatted_name = source_name.replace('&', '_')
formatted_name = formatted_name.replace(':', '_')
formatted_name = formatted_name.replace('/', '_')
formatted_name = formatted_name.replace(' ', '_')
if formatted_name == DataCatalogEntryFactory.EMPTY_TOKEN:
formatted_name = DataCatalogEntryFactory.NO_VALUE_SPECIFIED
return formatted_name | raw_update_time):
create_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_create_time) | random_line_split |
datacatalog_entry_factory.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from google.cloud import datacatalog
from google.protobuf import timestamp_pb2
from google.datacatalog_connectors.commons.prepare.base_entry_factory import \
BaseEntryFactory
from google.datacatalog_connectors.rdbms.common import constants
class DataCatalogEntryFactory(BaseEntryFactory):
NO_VALUE_SPECIFIED = 'UNDEFINED'
EMPTY_TOKEN = '?'
def __init__(self, project_id, location_id, entry_resource_url_prefix,
entry_group_id, metadata_definition):
|
def make_entries_for_table_container(self, table_container):
"""Create Datacatalog entries from a table container dict.
:param table_container:
:return: entry_id, entry
"""
entry_id = self._format_id(table_container['name'])
entry = datacatalog.Entry()
entry.user_specified_type = self.__metadata_definition[
'table_container_def']['type']
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table_container['name'])
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table_container.get('create_time'),
table_container.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
desc = table_container.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
entry.linked_resource = '{}/{}'.format(
self.__entry_resource_url_prefix, entry_id)
return entry_id, entry
def make_entry_for_tables(self, table, table_container_name):
"""Create Datacatalog entries from a table dict.
:param table:
:param table_container_name:
:return: entry_id, entry
"""
entry_id = self._format_id('{}__{}'.format(table_container_name,
table['name']))
entry = datacatalog.Entry()
# some RDBMS' store views and tables definitions in the same
# system table, and the name is not user friendly, so we only
# keep it if it's a VIEW type.
table_type = table.get(constants.TABLE_TYPE_KEY)
if table_type and table_type.lower() == \
constants.VIEW_TYPE_VALUE:
table_type = table_type.lower()
else:
table_type = self.__metadata_definition['table_def']['type']
entry.user_specified_type = table_type
entry.user_specified_system = self.__entry_group_id
entry.display_name = self._format_display_name(table['name'])
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
entry_id)
desc = table.get('desc')
if pd.isna(desc):
desc = ''
entry.description = desc
entry.linked_resource = '{}/{}/{}'.format(
self.__entry_resource_url_prefix, table_container_name,
self._format_id(table['name']))
create_time, update_time = \
DataCatalogEntryFactory.__convert_source_system_timestamp_fields(
table.get('create_time'),
table.get('update_time'))
if create_time and update_time:
created_timestamp = timestamp_pb2.Timestamp()
created_timestamp.FromSeconds(create_time)
entry.source_system_timestamps.create_time = created_timestamp
updated_timestamp = timestamp_pb2.Timestamp()
updated_timestamp.FromSeconds(update_time)
entry.source_system_timestamps.update_time = updated_timestamp
columns = []
for column in table['columns']:
desc = column.get('desc')
if pd.isna(desc):
desc = ''
columns.append(
datacatalog.ColumnSchema(
column=self._format_id(column['name']),
description=desc,
type=DataCatalogEntryFactory.__format_entry_column_type(
column['type'])))
entry.schema.columns.extend(columns)
return entry_id, entry
@staticmethod
def __convert_date_value_to_epoch(date_value):
if pd.notnull(date_value):
return int(date_value.timestamp())
@staticmethod
def __convert_source_system_timestamp_fields(raw_create_time,
raw_update_time):
create_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_create_time)
if not pd.isnull(raw_update_time):
update_time = DataCatalogEntryFactory. \
__convert_date_value_to_epoch(raw_update_time)
else:
update_time = create_time
return create_time, update_time
@staticmethod
def __format_entry_column_type(source_name):
if isinstance(source_name, bytes):
# We've noticed some MySQL instances use bytes-like objects
# instead of `str` to specify the column types. We are using UTF-8
# to decode such objects when it happens because UTF-8 is the
# default character set for MySQL 8.0 onwards.
#
# We didn't notice similar behavior with other RDBMS but, if so,
# we should handle encoding as a configuration option that each
# RDBMS connector would have to set up. It might be exposed as a
# CLI arg, so users could easily change that. There is also the
# option to scrape that config directly from the DB.
source_name = source_name.decode("utf-8")
formatted_name = source_name.replace('&', '_')
formatted_name = formatted_name.replace(':', '_')
formatted_name = formatted_name.replace('/', '_')
formatted_name = formatted_name.replace(' ', '_')
if formatted_name == DataCatalogEntryFactory.EMPTY_TOKEN:
formatted_name = DataCatalogEntryFactory.NO_VALUE_SPECIFIED
return formatted_name
| self.__project_id = project_id
self.__location_id = location_id
self.__entry_resource_url_prefix = entry_resource_url_prefix
self.__entry_group_id = entry_group_id
self.__metadata_definition = metadata_definition | identifier_body |
html.py | """
Configuration parameters:
path.internal.ansi2html
"""
import sys
import os
import re
from subprocess import Popen, PIPE
MYDIR = os.path.abspath(os.path.join(__file__, '..', '..'))
sys.path.append("%s/lib/" % MYDIR)
# pylint: disable=wrong-import-position
from config import CONFIG
from globals import error
from buttons import TWITTER_BUTTON, GITHUB_BUTTON, GITHUB_BUTTON_FOOTER
import frontend.ansi
# temporary having it here, but actually we have the same data
# in the adapter module
GITHUB_REPOSITORY = {
"late.nz" : 'chubin/late.nz',
"cheat.sheets" : 'chubin/cheat.sheets',
"cheat.sheets dir" : 'chubin/cheat.sheets',
"tldr" : 'tldr-pages/tldr',
"cheat" : 'chrisallenlane/cheat',
"learnxiny" : 'adambard/learnxinyminutes-docs',
"internal" : '',
"search" : '',
"unknown" : '',
}
def visualize(answer_data, request_options):
query = answer_data['query']
answers = answer_data['answers']
topics_list = answer_data['topics_list']
editable = (len(answers) == 1 and answers[0]['topic_type'] == 'cheat.sheets')
repository_button = ''
if len(answers) == 1:
repository_button = _github_button(answers[0]['topic_type'])
result, found = frontend.ansi.visualize(answer_data, request_options)
return _render_html(query, result, editable, repository_button, topics_list, request_options), found
def _github_button(topic_type):
full_name = GITHUB_REPOSITORY.get(topic_type, '')
if not full_name:
return ''
short_name = full_name.split('/', 1)[1] # pylint: disable=unused-variable
button = (
"<!-- Place this tag where you want the button to render. -->"
'<a aria-label="Star %(full_name)s on GitHub"'
' data-count-aria-label="# stargazers on GitHub"'
' data-count-api="/repos/%(full_name)s#stargazers_count"'
' data-count-href="/%(full_name)s/stargazers"'
' data-icon="octicon-star"'
' href="https://github.com/%(full_name)s"'
' class="github-button">%(short_name)s</a>'
) % locals()
return button
def | (query, result, editable, repository_button, topics_list, request_options):
def _html_wrapper(data):
"""
Convert ANSI text `data` to HTML
"""
cmd = ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
print("ERROR: %s" % cmd)
raise
data = data.encode('utf-8')
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
error((stdout + stderr).decode('utf-8'))
return stdout.decode('utf-8')
result = result + "\n$"
result = _html_wrapper(result)
title = "<title>cheat.sh/%s</title>" % query
submit_button = ('<input type="submit" style="position: absolute;'
' left: -9999px; width: 1px; height: 1px;" tabindex="-1" />')
topic_list = ('<datalist id="topics">%s</datalist>'
% ("\n".join("<option value='%s'></option>" % x for x in topics_list)))
curl_line = "<span class='pre'>$ curl cheat.sh/</span>"
if query == ':firstpage':
query = ""
form_html = ('<form action="/" method="GET">'
'%s%s'
'<input'
' type="text" value="%s" name="topic"'
' list="topics" autofocus autocomplete="off"/>'
'%s'
'</form>') \
% (submit_button, curl_line, query, topic_list)
edit_button = ''
if editable:
# It's possible that topic directory starts with omitted underscore
if '/' in query:
query = '_' + query
edit_page_link = 'https://github.com/chubin/cheat.sheets/edit/master/sheets/' + query
edit_button = (
'<pre style="position:absolute;padding-left:40em;overflow:visible;height:0;">'
'[<a href="%s" style="color:cyan">edit</a>]'
'</pre>') % edit_page_link
result = re.sub("<pre>", edit_button + form_html + "<pre>", result)
result = re.sub("<head>", "<head>" + title, result)
if not request_options.get('quiet'):
result = result.replace('</body>',
TWITTER_BUTTON \
+ GITHUB_BUTTON \
+ repository_button \
+ GITHUB_BUTTON_FOOTER \
+ '</body>')
return result
| _render_html | identifier_name |
html.py | """
Configuration parameters:
path.internal.ansi2html
"""
import sys
import os
import re
from subprocess import Popen, PIPE
MYDIR = os.path.abspath(os.path.join(__file__, '..', '..'))
sys.path.append("%s/lib/" % MYDIR)
# pylint: disable=wrong-import-position
from config import CONFIG
from globals import error
from buttons import TWITTER_BUTTON, GITHUB_BUTTON, GITHUB_BUTTON_FOOTER
import frontend.ansi
# temporary having it here, but actually we have the same data
# in the adapter module
GITHUB_REPOSITORY = {
"late.nz" : 'chubin/late.nz',
"cheat.sheets" : 'chubin/cheat.sheets',
"cheat.sheets dir" : 'chubin/cheat.sheets',
"tldr" : 'tldr-pages/tldr',
"cheat" : 'chrisallenlane/cheat',
"learnxiny" : 'adambard/learnxinyminutes-docs',
"internal" : '',
"search" : '',
"unknown" : '',
}
def visualize(answer_data, request_options):
query = answer_data['query']
answers = answer_data['answers']
topics_list = answer_data['topics_list']
editable = (len(answers) == 1 and answers[0]['topic_type'] == 'cheat.sheets')
repository_button = ''
if len(answers) == 1:
repository_button = _github_button(answers[0]['topic_type'])
result, found = frontend.ansi.visualize(answer_data, request_options)
return _render_html(query, result, editable, repository_button, topics_list, request_options), found
def _github_button(topic_type):
full_name = GITHUB_REPOSITORY.get(topic_type, '')
if not full_name:
return ''
short_name = full_name.split('/', 1)[1] # pylint: disable=unused-variable
button = (
"<!-- Place this tag where you want the button to render. -->"
'<a aria-label="Star %(full_name)s on GitHub"'
' data-count-aria-label="# stargazers on GitHub"'
' data-count-api="/repos/%(full_name)s#stargazers_count"'
' data-count-href="/%(full_name)s/stargazers"'
' data-icon="octicon-star"'
' href="https://github.com/%(full_name)s"'
' class="github-button">%(short_name)s</a>'
) % locals()
return button
def _render_html(query, result, editable, repository_button, topics_list, request_options):
def _html_wrapper(data):
|
result = result + "\n$"
result = _html_wrapper(result)
title = "<title>cheat.sh/%s</title>" % query
submit_button = ('<input type="submit" style="position: absolute;'
' left: -9999px; width: 1px; height: 1px;" tabindex="-1" />')
topic_list = ('<datalist id="topics">%s</datalist>'
% ("\n".join("<option value='%s'></option>" % x for x in topics_list)))
curl_line = "<span class='pre'>$ curl cheat.sh/</span>"
if query == ':firstpage':
query = ""
form_html = ('<form action="/" method="GET">'
'%s%s'
'<input'
' type="text" value="%s" name="topic"'
' list="topics" autofocus autocomplete="off"/>'
'%s'
'</form>') \
% (submit_button, curl_line, query, topic_list)
edit_button = ''
if editable:
# It's possible that topic directory starts with omitted underscore
if '/' in query:
query = '_' + query
edit_page_link = 'https://github.com/chubin/cheat.sheets/edit/master/sheets/' + query
edit_button = (
'<pre style="position:absolute;padding-left:40em;overflow:visible;height:0;">'
'[<a href="%s" style="color:cyan">edit</a>]'
'</pre>') % edit_page_link
result = re.sub("<pre>", edit_button + form_html + "<pre>", result)
result = re.sub("<head>", "<head>" + title, result)
if not request_options.get('quiet'):
result = result.replace('</body>',
TWITTER_BUTTON \
+ GITHUB_BUTTON \
+ repository_button \
+ GITHUB_BUTTON_FOOTER \
+ '</body>')
return result
| """
Convert ANSI text `data` to HTML
"""
cmd = ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
print("ERROR: %s" % cmd)
raise
data = data.encode('utf-8')
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
error((stdout + stderr).decode('utf-8'))
return stdout.decode('utf-8') | identifier_body |
html.py | """ | Configuration parameters:
path.internal.ansi2html
"""
import sys
import os
import re
from subprocess import Popen, PIPE
MYDIR = os.path.abspath(os.path.join(__file__, '..', '..'))
sys.path.append("%s/lib/" % MYDIR)
# pylint: disable=wrong-import-position
from config import CONFIG
from globals import error
from buttons import TWITTER_BUTTON, GITHUB_BUTTON, GITHUB_BUTTON_FOOTER
import frontend.ansi
# temporary having it here, but actually we have the same data
# in the adapter module
GITHUB_REPOSITORY = {
"late.nz" : 'chubin/late.nz',
"cheat.sheets" : 'chubin/cheat.sheets',
"cheat.sheets dir" : 'chubin/cheat.sheets',
"tldr" : 'tldr-pages/tldr',
"cheat" : 'chrisallenlane/cheat',
"learnxiny" : 'adambard/learnxinyminutes-docs',
"internal" : '',
"search" : '',
"unknown" : '',
}
def visualize(answer_data, request_options):
query = answer_data['query']
answers = answer_data['answers']
topics_list = answer_data['topics_list']
editable = (len(answers) == 1 and answers[0]['topic_type'] == 'cheat.sheets')
repository_button = ''
if len(answers) == 1:
repository_button = _github_button(answers[0]['topic_type'])
result, found = frontend.ansi.visualize(answer_data, request_options)
return _render_html(query, result, editable, repository_button, topics_list, request_options), found
def _github_button(topic_type):
full_name = GITHUB_REPOSITORY.get(topic_type, '')
if not full_name:
return ''
short_name = full_name.split('/', 1)[1] # pylint: disable=unused-variable
button = (
"<!-- Place this tag where you want the button to render. -->"
'<a aria-label="Star %(full_name)s on GitHub"'
' data-count-aria-label="# stargazers on GitHub"'
' data-count-api="/repos/%(full_name)s#stargazers_count"'
' data-count-href="/%(full_name)s/stargazers"'
' data-icon="octicon-star"'
' href="https://github.com/%(full_name)s"'
' class="github-button">%(short_name)s</a>'
) % locals()
return button
def _render_html(query, result, editable, repository_button, topics_list, request_options):
def _html_wrapper(data):
"""
Convert ANSI text `data` to HTML
"""
cmd = ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
print("ERROR: %s" % cmd)
raise
data = data.encode('utf-8')
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
error((stdout + stderr).decode('utf-8'))
return stdout.decode('utf-8')
result = result + "\n$"
result = _html_wrapper(result)
title = "<title>cheat.sh/%s</title>" % query
submit_button = ('<input type="submit" style="position: absolute;'
' left: -9999px; width: 1px; height: 1px;" tabindex="-1" />')
topic_list = ('<datalist id="topics">%s</datalist>'
% ("\n".join("<option value='%s'></option>" % x for x in topics_list)))
curl_line = "<span class='pre'>$ curl cheat.sh/</span>"
if query == ':firstpage':
query = ""
form_html = ('<form action="/" method="GET">'
'%s%s'
'<input'
' type="text" value="%s" name="topic"'
' list="topics" autofocus autocomplete="off"/>'
'%s'
'</form>') \
% (submit_button, curl_line, query, topic_list)
edit_button = ''
if editable:
# It's possible that topic directory starts with omitted underscore
if '/' in query:
query = '_' + query
edit_page_link = 'https://github.com/chubin/cheat.sheets/edit/master/sheets/' + query
edit_button = (
'<pre style="position:absolute;padding-left:40em;overflow:visible;height:0;">'
'[<a href="%s" style="color:cyan">edit</a>]'
'</pre>') % edit_page_link
result = re.sub("<pre>", edit_button + form_html + "<pre>", result)
result = re.sub("<head>", "<head>" + title, result)
if not request_options.get('quiet'):
result = result.replace('</body>',
TWITTER_BUTTON \
+ GITHUB_BUTTON \
+ repository_button \
+ GITHUB_BUTTON_FOOTER \
+ '</body>')
return result | random_line_split |
|
html.py | """
Configuration parameters:
path.internal.ansi2html
"""
import sys
import os
import re
from subprocess import Popen, PIPE
MYDIR = os.path.abspath(os.path.join(__file__, '..', '..'))
sys.path.append("%s/lib/" % MYDIR)
# pylint: disable=wrong-import-position
from config import CONFIG
from globals import error
from buttons import TWITTER_BUTTON, GITHUB_BUTTON, GITHUB_BUTTON_FOOTER
import frontend.ansi
# temporary having it here, but actually we have the same data
# in the adapter module
GITHUB_REPOSITORY = {
"late.nz" : 'chubin/late.nz',
"cheat.sheets" : 'chubin/cheat.sheets',
"cheat.sheets dir" : 'chubin/cheat.sheets',
"tldr" : 'tldr-pages/tldr',
"cheat" : 'chrisallenlane/cheat',
"learnxiny" : 'adambard/learnxinyminutes-docs',
"internal" : '',
"search" : '',
"unknown" : '',
}
def visualize(answer_data, request_options):
query = answer_data['query']
answers = answer_data['answers']
topics_list = answer_data['topics_list']
editable = (len(answers) == 1 and answers[0]['topic_type'] == 'cheat.sheets')
repository_button = ''
if len(answers) == 1:
repository_button = _github_button(answers[0]['topic_type'])
result, found = frontend.ansi.visualize(answer_data, request_options)
return _render_html(query, result, editable, repository_button, topics_list, request_options), found
def _github_button(topic_type):
full_name = GITHUB_REPOSITORY.get(topic_type, '')
if not full_name:
return ''
short_name = full_name.split('/', 1)[1] # pylint: disable=unused-variable
button = (
"<!-- Place this tag where you want the button to render. -->"
'<a aria-label="Star %(full_name)s on GitHub"'
' data-count-aria-label="# stargazers on GitHub"'
' data-count-api="/repos/%(full_name)s#stargazers_count"'
' data-count-href="/%(full_name)s/stargazers"'
' data-icon="octicon-star"'
' href="https://github.com/%(full_name)s"'
' class="github-button">%(short_name)s</a>'
) % locals()
return button
def _render_html(query, result, editable, repository_button, topics_list, request_options):
def _html_wrapper(data):
"""
Convert ANSI text `data` to HTML
"""
cmd = ["bash", CONFIG['path.internal.ansi2html'], "--palette=solarized", "--bg=dark"]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
print("ERROR: %s" % cmd)
raise
data = data.encode('utf-8')
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
error((stdout + stderr).decode('utf-8'))
return stdout.decode('utf-8')
result = result + "\n$"
result = _html_wrapper(result)
title = "<title>cheat.sh/%s</title>" % query
submit_button = ('<input type="submit" style="position: absolute;'
' left: -9999px; width: 1px; height: 1px;" tabindex="-1" />')
topic_list = ('<datalist id="topics">%s</datalist>'
% ("\n".join("<option value='%s'></option>" % x for x in topics_list)))
curl_line = "<span class='pre'>$ curl cheat.sh/</span>"
if query == ':firstpage':
query = ""
form_html = ('<form action="/" method="GET">'
'%s%s'
'<input'
' type="text" value="%s" name="topic"'
' list="topics" autofocus autocomplete="off"/>'
'%s'
'</form>') \
% (submit_button, curl_line, query, topic_list)
edit_button = ''
if editable:
# It's possible that topic directory starts with omitted underscore
if '/' in query:
query = '_' + query
edit_page_link = 'https://github.com/chubin/cheat.sheets/edit/master/sheets/' + query
edit_button = (
'<pre style="position:absolute;padding-left:40em;overflow:visible;height:0;">'
'[<a href="%s" style="color:cyan">edit</a>]'
'</pre>') % edit_page_link
result = re.sub("<pre>", edit_button + form_html + "<pre>", result)
result = re.sub("<head>", "<head>" + title, result)
if not request_options.get('quiet'):
|
return result
| result = result.replace('</body>',
TWITTER_BUTTON \
+ GITHUB_BUTTON \
+ repository_button \
+ GITHUB_BUTTON_FOOTER \
+ '</body>') | conditional_block |
client.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import uuid
import socket
import time
__appname__ = "pymessage"
__author__ = "Marco Sirabella, Owen Davies"
__copyright__ = ""
__credits__ = "Marco Sirabella, Owen Davies"
__license__ = "new BSD 3-Clause"
__version__ = "0.0.3"
__maintainers__ = "Marco Sirabella, Owen Davies"
__email__ = "[email protected], [email protected]"
__status__ = "Prototype"
__module__ = ""
address = ('localhost', 5350)
lguid = '0'
def connect():
|
connect()
| sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.send((hex(uuid.getnode()) + '\n').encode() + bytes(False)) # ik this is such BAD CODE
print("sent")
sock.send(lguid.encode())
print('sent latest guid: {}'.format(lguid))
# contents = "latest guid +5: {}".format(lguid + '5')
msg = True
fullmsg = ''
while msg:
msg = sock.recv(16).decode() # low byte count for whatever reason
#print('mes rec: {}'.format(msg))
fullmsg += msg
print('received message: {}'.format(fullmsg))
sock.close() | identifier_body |
client.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import uuid
import socket
import time
__appname__ = "pymessage"
__author__ = "Marco Sirabella, Owen Davies"
__copyright__ = ""
__credits__ = "Marco Sirabella, Owen Davies"
__license__ = "new BSD 3-Clause"
__version__ = "0.0.3"
__maintainers__ = "Marco Sirabella, Owen Davies"
__email__ = "[email protected], [email protected]"
__status__ = "Prototype"
__module__ = ""
address = ('localhost', 5350)
lguid = '0'
def connect(): | print('sent latest guid: {}'.format(lguid))
# contents = "latest guid +5: {}".format(lguid + '5')
msg = True
fullmsg = ''
while msg:
msg = sock.recv(16).decode() # low byte count for whatever reason
#print('mes rec: {}'.format(msg))
fullmsg += msg
print('received message: {}'.format(fullmsg))
sock.close()
connect() | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.send((hex(uuid.getnode()) + '\n').encode() + bytes(False)) # ik this is such BAD CODE
print("sent")
sock.send(lguid.encode()) | random_line_split |
client.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import uuid
import socket
import time
__appname__ = "pymessage"
__author__ = "Marco Sirabella, Owen Davies"
__copyright__ = ""
__credits__ = "Marco Sirabella, Owen Davies"
__license__ = "new BSD 3-Clause"
__version__ = "0.0.3"
__maintainers__ = "Marco Sirabella, Owen Davies"
__email__ = "[email protected], [email protected]"
__status__ = "Prototype"
__module__ = ""
address = ('localhost', 5350)
lguid = '0'
def | ():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.send((hex(uuid.getnode()) + '\n').encode() + bytes(False)) # ik this is such BAD CODE
print("sent")
sock.send(lguid.encode())
print('sent latest guid: {}'.format(lguid))
# contents = "latest guid +5: {}".format(lguid + '5')
msg = True
fullmsg = ''
while msg:
msg = sock.recv(16).decode() # low byte count for whatever reason
#print('mes rec: {}'.format(msg))
fullmsg += msg
print('received message: {}'.format(fullmsg))
sock.close()
connect()
| connect | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.